Update vendor, go.mod and go.sum for k8s bump to v0.18.3

Signed-off-by: Billy McFall <22157057+Billy99@users.noreply.github.com>
This commit is contained in:
Billy McFall
2020-10-23 10:52:52 -04:00
parent c8739f64b9
commit 36b5edff29
892 changed files with 147015 additions and 61162 deletions

View File

@@ -28,10 +28,15 @@ go get -u github.com/evanphx/json-patch
# Configuration
There is a single global configuration variable `jsonpatch.SupportNegativeIndices'. This
defaults to `true` and enables the non-standard practice of allowing negative indices
to mean indices starting at the end of an array. This functionality can be disabled
by setting `jsonpatch.SupportNegativeIndices = false`.
* There is a global configuration variable `jsonpatch.SupportNegativeIndices`.
This defaults to `true` and enables the non-standard practice of allowing
negative indices to mean indices starting at the end of an array. This
functionality can be disabled by setting `jsonpatch.SupportNegativeIndices =
false`.
* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`,
which limits the total size increase in bytes caused by "copy" operations in a
patch. It defaults to 0, which means there is no limit.
## Create and apply a merge patch
Given both an original JSON document and a modified JSON document, you can create

38
vendor/github.com/evanphx/json-patch/errors.go generated vendored Normal file
View File

@@ -0,0 +1,38 @@
package jsonpatch
import "fmt"
// AccumulatedCopySizeError is an error type returned when the accumulated size
// increase caused by copy operations in a patch operation has exceeded the
// limit.
type AccumulatedCopySizeError struct {
limit int64
accumulated int64
}
// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError.
func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError {
return &AccumulatedCopySizeError{limit: l, accumulated: a}
}
// Error implements the error interface.
func (a *AccumulatedCopySizeError) Error() string {
return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit)
}
// ArraySizeError is an error type returned when the array size has exceeded
// the limit.
type ArraySizeError struct {
limit int
size int
}
// NewArraySizeError returns an ArraySizeError.
func NewArraySizeError(l, s int) *ArraySizeError {
return &ArraySizeError{limit: l, size: s}
}
// Error implements the error interface.
func (a *ArraySizeError) Error() string {
return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit)
}

View File

@@ -14,7 +14,15 @@ const (
eAry
)
var SupportNegativeIndices bool = true
var (
// SupportNegativeIndices decides whether to support non-standard practice of
// allowing negative indices to mean indices starting at the end of an array.
// Default to true.
SupportNegativeIndices bool = true
// AccumulatedCopySizeLimit limits the total size increase in bytes caused by
// "copy" operations in a patch.
AccumulatedCopySizeLimit int64 = 0
)
type lazyNode struct {
raw *json.RawMessage
@@ -63,6 +71,20 @@ func (n *lazyNode) UnmarshalJSON(data []byte) error {
return nil
}
func deepCopy(src *lazyNode) (*lazyNode, int, error) {
if src == nil {
return nil, 0, nil
}
a, err := src.MarshalJSON()
if err != nil {
return nil, 0, err
}
sz := len(a)
ra := make(json.RawMessage, sz)
copy(ra, a)
return newLazyNode(&ra), sz, nil
}
func (n *lazyNode) intoDoc() (*partialDoc, error) {
if n.which == eDoc {
return &n.doc, nil
@@ -344,35 +366,14 @@ func (d *partialDoc) remove(key string) error {
return nil
}
// set should only be used to implement the "replace" operation, so "key" must
// be an already existing index in "d".
func (d *partialArray) set(key string, val *lazyNode) error {
if key == "-" {
*d = append(*d, val)
return nil
}
idx, err := strconv.Atoi(key)
if err != nil {
return err
}
sz := len(*d)
if idx+1 > sz {
sz = idx + 1
}
ary := make([]*lazyNode, sz)
cur := *d
copy(ary, cur)
if idx >= len(ary) {
return fmt.Errorf("Unable to access invalid index: %d", idx)
}
ary[idx] = val
*d = ary
(*d)[idx] = val
return nil
}
@@ -387,7 +388,9 @@ func (d *partialArray) add(key string, val *lazyNode) error {
return err
}
ary := make([]*lazyNode, len(*d)+1)
sz := len(*d) + 1
ary := make([]*lazyNode, sz)
cur := *d
@@ -527,7 +530,7 @@ func (p Patch) move(doc *container, op operation) error {
return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path)
}
return con.set(key, val)
return con.add(key, val)
}
func (p Patch) test(doc *container, op operation) error {
@@ -561,7 +564,7 @@ func (p Patch) test(doc *container, op operation) error {
return fmt.Errorf("Testing value %s failed", path)
}
func (p Patch) copy(doc *container, op operation) error {
func (p Patch) copy(doc *container, op operation, accumulatedCopySize *int64) error {
from := op.from()
con, key := findObject(doc, from)
@@ -583,7 +586,16 @@ func (p Patch) copy(doc *container, op operation) error {
return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path)
}
return con.set(key, val)
valCopy, sz, err := deepCopy(val)
if err != nil {
return err
}
(*accumulatedCopySize) += int64(sz)
if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit {
return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize)
}
return con.add(key, valCopy)
}
// Equal indicates if 2 JSON documents have the same structural equality.
@@ -636,6 +648,8 @@ func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
err = nil
var accumulatedCopySize int64
for _, op := range p {
switch op.kind() {
case "add":
@@ -649,7 +663,7 @@ func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
case "test":
err = p.test(&pd, op)
case "copy":
err = p.copy(&pd, op)
err = p.copy(&pd, op, &accumulatedCopySize)
default:
err = fmt.Errorf("Unexpected kind: %s", op.kind())
}