mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-24 19:12:02 +00:00
Update vendor
This commit is contained in:
parent
c7c1a99e8e
commit
b7649db53a
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
@ -1761,7 +1761,7 @@ sigs.k8s.io/kustomize/pkg/transformers
|
|||||||
sigs.k8s.io/kustomize/pkg/transformers/config
|
sigs.k8s.io/kustomize/pkg/transformers/config
|
||||||
sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig
|
sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig
|
||||||
sigs.k8s.io/kustomize/pkg/types
|
sigs.k8s.io/kustomize/pkg/types
|
||||||
# sigs.k8s.io/structured-merge-diff v0.0.0-20190719182312-e94e05bfbbe3 => sigs.k8s.io/structured-merge-diff v0.0.0-20190719182312-e94e05bfbbe3
|
# sigs.k8s.io/structured-merge-diff v0.0.0-20190724202554-0c1d754dd648 => sigs.k8s.io/structured-merge-diff v0.0.0-20190724202554-0c1d754dd648
|
||||||
sigs.k8s.io/structured-merge-diff/fieldpath
|
sigs.k8s.io/structured-merge-diff/fieldpath
|
||||||
sigs.k8s.io/structured-merge-diff/merge
|
sigs.k8s.io/structured-merge-diff/merge
|
||||||
sigs.k8s.io/structured-merge-diff/schema
|
sigs.k8s.io/structured-merge-diff/schema
|
||||||
|
7
vendor/sigs.k8s.io/structured-merge-diff/fieldpath/BUILD
generated
vendored
7
vendor/sigs.k8s.io/structured-merge-diff/fieldpath/BUILD
generated
vendored
@ -8,12 +8,17 @@ go_library(
|
|||||||
"fromvalue.go",
|
"fromvalue.go",
|
||||||
"managers.go",
|
"managers.go",
|
||||||
"path.go",
|
"path.go",
|
||||||
|
"serialize.go",
|
||||||
|
"serialize-pe.go",
|
||||||
"set.go",
|
"set.go",
|
||||||
],
|
],
|
||||||
importmap = "k8s.io/kubernetes/vendor/sigs.k8s.io/structured-merge-diff/fieldpath",
|
importmap = "k8s.io/kubernetes/vendor/sigs.k8s.io/structured-merge-diff/fieldpath",
|
||||||
importpath = "sigs.k8s.io/structured-merge-diff/fieldpath",
|
importpath = "sigs.k8s.io/structured-merge-diff/fieldpath",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = ["//vendor/sigs.k8s.io/structured-merge-diff/value:go_default_library"],
|
deps = [
|
||||||
|
"//vendor/github.com/json-iterator/go:go_default_library",
|
||||||
|
"//vendor/sigs.k8s.io/structured-merge-diff/value:go_default_library",
|
||||||
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
filegroup(
|
filegroup(
|
||||||
|
155
vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize-pe.go
generated
vendored
Normal file
155
vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize-pe.go
generated
vendored
Normal file
@ -0,0 +1,155 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2018 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package fieldpath
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
jsoniter "github.com/json-iterator/go"
|
||||||
|
"sigs.k8s.io/structured-merge-diff/value"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ErrUnknownPathElementType = errors.New("unknown path element type")
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Field indicates that the content of this path element is a field's name
|
||||||
|
peField = "f"
|
||||||
|
|
||||||
|
// Value indicates that the content of this path element is a field's value
|
||||||
|
peValue = "v"
|
||||||
|
|
||||||
|
// Index indicates that the content of this path element is an index in an array
|
||||||
|
peIndex = "i"
|
||||||
|
|
||||||
|
// Key indicates that the content of this path element is a key value map
|
||||||
|
peKey = "k"
|
||||||
|
|
||||||
|
// Separator separates the type of a path element from the contents
|
||||||
|
peSeparator = ":"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
peFieldSepBytes = []byte(peField + peSeparator)
|
||||||
|
peValueSepBytes = []byte(peValue + peSeparator)
|
||||||
|
peIndexSepBytes = []byte(peIndex + peSeparator)
|
||||||
|
peKeySepBytes = []byte(peKey + peSeparator)
|
||||||
|
peSepBytes = []byte(peSeparator)
|
||||||
|
)
|
||||||
|
|
||||||
|
// DeserializePathElement parses a serialized path element
|
||||||
|
func DeserializePathElement(s string) (PathElement, error) {
|
||||||
|
b := []byte(s)
|
||||||
|
if len(b) < 2 {
|
||||||
|
return PathElement{}, errors.New("key must be 2 characters long:")
|
||||||
|
}
|
||||||
|
typeSep, b := b[:2], b[2:]
|
||||||
|
if typeSep[1] != peSepBytes[0] {
|
||||||
|
return PathElement{}, fmt.Errorf("missing colon: %v", s)
|
||||||
|
}
|
||||||
|
switch typeSep[0] {
|
||||||
|
case peFieldSepBytes[0]:
|
||||||
|
// Slice s rather than convert b, to save on
|
||||||
|
// allocations.
|
||||||
|
str := s[2:]
|
||||||
|
return PathElement{
|
||||||
|
FieldName: &str,
|
||||||
|
}, nil
|
||||||
|
case peValueSepBytes[0]:
|
||||||
|
iter := readPool.BorrowIterator(b)
|
||||||
|
defer readPool.ReturnIterator(iter)
|
||||||
|
v, err := value.ReadJSONIter(iter)
|
||||||
|
if err != nil {
|
||||||
|
return PathElement{}, err
|
||||||
|
}
|
||||||
|
return PathElement{Value: &v}, nil
|
||||||
|
case peKeySepBytes[0]:
|
||||||
|
iter := readPool.BorrowIterator(b)
|
||||||
|
defer readPool.ReturnIterator(iter)
|
||||||
|
v, err := value.ReadJSONIter(iter)
|
||||||
|
if err != nil {
|
||||||
|
return PathElement{}, err
|
||||||
|
}
|
||||||
|
if v.MapValue == nil {
|
||||||
|
return PathElement{}, fmt.Errorf("expected key value pairs but got %#v", v)
|
||||||
|
}
|
||||||
|
return PathElement{Key: v.MapValue}, nil
|
||||||
|
case peIndexSepBytes[0]:
|
||||||
|
i, err := strconv.Atoi(s[2:])
|
||||||
|
if err != nil {
|
||||||
|
return PathElement{}, err
|
||||||
|
}
|
||||||
|
return PathElement{
|
||||||
|
Index: &i,
|
||||||
|
}, nil
|
||||||
|
default:
|
||||||
|
return PathElement{}, ErrUnknownPathElementType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
readPool = jsoniter.NewIterator(jsoniter.ConfigCompatibleWithStandardLibrary).Pool()
|
||||||
|
writePool = jsoniter.NewStream(jsoniter.ConfigCompatibleWithStandardLibrary, nil, 1024).Pool()
|
||||||
|
)
|
||||||
|
|
||||||
|
// SerializePathElement serializes a path element
|
||||||
|
func SerializePathElement(pe PathElement) (string, error) {
|
||||||
|
buf := strings.Builder{}
|
||||||
|
err := serializePathElementToWriter(&buf, pe)
|
||||||
|
return buf.String(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func serializePathElementToWriter(w io.Writer, pe PathElement) error {
|
||||||
|
stream := writePool.BorrowStream(w)
|
||||||
|
defer writePool.ReturnStream(stream)
|
||||||
|
switch {
|
||||||
|
case pe.FieldName != nil:
|
||||||
|
if _, err := stream.Write(peFieldSepBytes); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stream.WriteRaw(*pe.FieldName)
|
||||||
|
case pe.Key != nil:
|
||||||
|
if _, err := stream.Write(peKeySepBytes); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
v := value.Value{MapValue: pe.Key}
|
||||||
|
v.WriteJSONStream(stream)
|
||||||
|
case pe.Value != nil:
|
||||||
|
if _, err := stream.Write(peValueSepBytes); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pe.Value.WriteJSONStream(stream)
|
||||||
|
case pe.Index != nil:
|
||||||
|
if _, err := stream.Write(peIndexSepBytes); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stream.WriteInt(*pe.Index)
|
||||||
|
default:
|
||||||
|
return errors.New("invalid PathElement")
|
||||||
|
}
|
||||||
|
b := stream.Buffer()
|
||||||
|
err := stream.Flush()
|
||||||
|
// Help jsoniter manage its buffers--without this, the next
|
||||||
|
// use of the stream is likely to require an allocation. Look
|
||||||
|
// at the jsoniter stream code to understand why. They were probably
|
||||||
|
// optimizing for folks using the buffer directly.
|
||||||
|
stream.SetBuffer(b[:0])
|
||||||
|
return err
|
||||||
|
}
|
237
vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize.go
generated
vendored
Normal file
237
vendor/sigs.k8s.io/structured-merge-diff/fieldpath/serialize.go
generated
vendored
Normal file
@ -0,0 +1,237 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package fieldpath
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
jsoniter "github.com/json-iterator/go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *Set) ToJSON() ([]byte, error) {
|
||||||
|
buf := bytes.Buffer{}
|
||||||
|
err := s.ToJSONStream(&buf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Set) ToJSONStream(w io.Writer) error {
|
||||||
|
stream := writePool.BorrowStream(w)
|
||||||
|
defer writePool.ReturnStream(stream)
|
||||||
|
|
||||||
|
var r reusableBuilder
|
||||||
|
|
||||||
|
stream.WriteObjectStart()
|
||||||
|
err := s.emitContents_v1(false, stream, &r)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stream.WriteObjectEnd()
|
||||||
|
return stream.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func manageMemory(stream *jsoniter.Stream) error {
|
||||||
|
// Help jsoniter manage its buffers--without this, it does a bunch of
|
||||||
|
// alloctaions that are not necessary. They were probably optimizing
|
||||||
|
// for folks using the buffer directly.
|
||||||
|
b := stream.Buffer()
|
||||||
|
if len(b) > 4096 || cap(b)-len(b) < 2048 {
|
||||||
|
if err := stream.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stream.SetBuffer(b[:0])
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type reusableBuilder struct {
|
||||||
|
bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *reusableBuilder) unsafeString() string {
|
||||||
|
b := r.Bytes()
|
||||||
|
return *(*string)(unsafe.Pointer(&b))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *reusableBuilder) reset() *bytes.Buffer {
|
||||||
|
r.Reset()
|
||||||
|
return &r.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Set) emitContents_v1(includeSelf bool, stream *jsoniter.Stream, r *reusableBuilder) error {
|
||||||
|
mi, ci := 0, 0
|
||||||
|
first := true
|
||||||
|
preWrite := func() {
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
stream.WriteMore()
|
||||||
|
}
|
||||||
|
|
||||||
|
for mi < len(s.Members.members) && ci < len(s.Children.members) {
|
||||||
|
mpe := s.Members.members[mi]
|
||||||
|
cpe := s.Children.members[ci].pathElement
|
||||||
|
|
||||||
|
if mpe.Less(cpe) {
|
||||||
|
preWrite()
|
||||||
|
if err := serializePathElementToWriter(r.reset(), mpe); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stream.WriteObjectField(r.unsafeString())
|
||||||
|
stream.WriteEmptyObject()
|
||||||
|
mi++
|
||||||
|
} else if cpe.Less(mpe) {
|
||||||
|
preWrite()
|
||||||
|
if err := serializePathElementToWriter(r.reset(), cpe); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stream.WriteObjectField(r.unsafeString())
|
||||||
|
stream.WriteObjectStart()
|
||||||
|
if err := s.Children.members[ci].set.emitContents_v1(false, stream, r); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stream.WriteObjectEnd()
|
||||||
|
ci++
|
||||||
|
} else {
|
||||||
|
preWrite()
|
||||||
|
if err := serializePathElementToWriter(r.reset(), cpe); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stream.WriteObjectField(r.unsafeString())
|
||||||
|
stream.WriteObjectStart()
|
||||||
|
if err := s.Children.members[ci].set.emitContents_v1(true, stream, r); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stream.WriteObjectEnd()
|
||||||
|
mi++
|
||||||
|
ci++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for mi < len(s.Members.members) {
|
||||||
|
mpe := s.Members.members[mi]
|
||||||
|
|
||||||
|
preWrite()
|
||||||
|
if err := serializePathElementToWriter(r.reset(), mpe); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stream.WriteObjectField(r.unsafeString())
|
||||||
|
stream.WriteEmptyObject()
|
||||||
|
mi++
|
||||||
|
}
|
||||||
|
|
||||||
|
for ci < len(s.Children.members) {
|
||||||
|
cpe := s.Children.members[ci].pathElement
|
||||||
|
|
||||||
|
preWrite()
|
||||||
|
if err := serializePathElementToWriter(r.reset(), cpe); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stream.WriteObjectField(r.unsafeString())
|
||||||
|
stream.WriteObjectStart()
|
||||||
|
if err := s.Children.members[ci].set.emitContents_v1(false, stream, r); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stream.WriteObjectEnd()
|
||||||
|
ci++
|
||||||
|
}
|
||||||
|
|
||||||
|
if includeSelf && !first {
|
||||||
|
preWrite()
|
||||||
|
stream.WriteObjectField(".")
|
||||||
|
stream.WriteEmptyObject()
|
||||||
|
}
|
||||||
|
return manageMemory(stream)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromJSON clears s and reads a JSON formatted set structure.
|
||||||
|
func (s *Set) FromJSON(r io.Reader) error {
|
||||||
|
// The iterator pool is completely useless for memory management, grrr.
|
||||||
|
iter := jsoniter.Parse(jsoniter.ConfigCompatibleWithStandardLibrary, r, 4096)
|
||||||
|
|
||||||
|
found, _ := readIter_v1(iter)
|
||||||
|
if found == nil {
|
||||||
|
*s = Set{}
|
||||||
|
} else {
|
||||||
|
*s = *found
|
||||||
|
}
|
||||||
|
return iter.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns true if this subtree is also (or only) a member of parent; s is nil
|
||||||
|
// if there are no further children.
|
||||||
|
func readIter_v1(iter *jsoniter.Iterator) (children *Set, isMember bool) {
|
||||||
|
iter.ReadMapCB(func(iter *jsoniter.Iterator, key string) bool {
|
||||||
|
if key == "." {
|
||||||
|
isMember = true
|
||||||
|
iter.Skip()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
pe, err := DeserializePathElement(key)
|
||||||
|
if err == ErrUnknownPathElementType {
|
||||||
|
// Ignore these-- a future version maybe knows what
|
||||||
|
// they are. We drop these completely rather than try
|
||||||
|
// to preserve things we don't understand.
|
||||||
|
iter.Skip()
|
||||||
|
return true
|
||||||
|
} else if err != nil {
|
||||||
|
iter.ReportError("parsing key as path element", err.Error())
|
||||||
|
iter.Skip()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
grandchildren, childIsMember := readIter_v1(iter)
|
||||||
|
if childIsMember {
|
||||||
|
if children == nil {
|
||||||
|
children = &Set{}
|
||||||
|
}
|
||||||
|
m := &children.Members.members
|
||||||
|
// Since we expect that most of the time these will have been
|
||||||
|
// serialized in the right order, we just verify that and append.
|
||||||
|
appendOK := len(*m) == 0 || (*m)[len(*m)-1].Less(pe)
|
||||||
|
if appendOK {
|
||||||
|
*m = append(*m, pe)
|
||||||
|
} else {
|
||||||
|
children.Members.Insert(pe)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if grandchildren != nil {
|
||||||
|
if children == nil {
|
||||||
|
children = &Set{}
|
||||||
|
}
|
||||||
|
// Since we expect that most of the time these will have been
|
||||||
|
// serialized in the right order, we just verify that and append.
|
||||||
|
m := &children.Children.members
|
||||||
|
appendOK := len(*m) == 0 || (*m)[len(*m)-1].pathElement.Less(pe)
|
||||||
|
if appendOK {
|
||||||
|
*m = append(*m, setNode{pe, grandchildren})
|
||||||
|
} else {
|
||||||
|
*children.Children.Descend(pe) = *grandchildren
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
if children == nil {
|
||||||
|
isMember = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return children, isMember
|
||||||
|
}
|
10
vendor/sigs.k8s.io/structured-merge-diff/fieldpath/set.go
generated
vendored
10
vendor/sigs.k8s.io/structured-merge-diff/fieldpath/set.go
generated
vendored
@ -173,9 +173,17 @@ type setNode struct {
|
|||||||
|
|
||||||
// SetNodeMap is a map of PathElement to subset.
|
// SetNodeMap is a map of PathElement to subset.
|
||||||
type SetNodeMap struct {
|
type SetNodeMap struct {
|
||||||
members []setNode
|
members sortedSetNode
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type sortedSetNode []setNode
|
||||||
|
|
||||||
|
// Implement the sort interface; this would permit bulk creation, which would
|
||||||
|
// be faster than doing it one at a time via Insert.
|
||||||
|
func (s sortedSetNode) Len() int { return len(s) }
|
||||||
|
func (s sortedSetNode) Less(i, j int) bool { return s[i].pathElement.Less(s[j].pathElement) }
|
||||||
|
func (s sortedSetNode) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
|
||||||
// Descend adds pe to the set if necessary, returning the associated subset.
|
// Descend adds pe to the set if necessary, returning the associated subset.
|
||||||
func (s *SetNodeMap) Descend(pe PathElement) *Set {
|
func (s *SetNodeMap) Descend(pe PathElement) *Set {
|
||||||
loc := sort.Search(len(s.members), func(i int) bool {
|
loc := sort.Search(len(s.members), func(i int) bool {
|
||||||
|
6
vendor/sigs.k8s.io/structured-merge-diff/value/BUILD
generated
vendored
6
vendor/sigs.k8s.io/structured-merge-diff/value/BUILD
generated
vendored
@ -4,13 +4,17 @@ go_library(
|
|||||||
name = "go_default_library",
|
name = "go_default_library",
|
||||||
srcs = [
|
srcs = [
|
||||||
"doc.go",
|
"doc.go",
|
||||||
|
"fastjson.go",
|
||||||
"unstructured.go",
|
"unstructured.go",
|
||||||
"value.go",
|
"value.go",
|
||||||
],
|
],
|
||||||
importmap = "k8s.io/kubernetes/vendor/sigs.k8s.io/structured-merge-diff/value",
|
importmap = "k8s.io/kubernetes/vendor/sigs.k8s.io/structured-merge-diff/value",
|
||||||
importpath = "sigs.k8s.io/structured-merge-diff/value",
|
importpath = "sigs.k8s.io/structured-merge-diff/value",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = ["//vendor/gopkg.in/yaml.v2:go_default_library"],
|
deps = [
|
||||||
|
"//vendor/github.com/json-iterator/go:go_default_library",
|
||||||
|
"//vendor/gopkg.in/yaml.v2:go_default_library",
|
||||||
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
filegroup(
|
filegroup(
|
||||||
|
149
vendor/sigs.k8s.io/structured-merge-diff/value/fastjson.go
generated
vendored
Normal file
149
vendor/sigs.k8s.io/structured-merge-diff/value/fastjson.go
generated
vendored
Normal file
@ -0,0 +1,149 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package value
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
jsoniter "github.com/json-iterator/go"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
readPool = jsoniter.NewIterator(jsoniter.ConfigCompatibleWithStandardLibrary).Pool()
|
||||||
|
writePool = jsoniter.NewStream(jsoniter.ConfigCompatibleWithStandardLibrary, nil, 1024).Pool()
|
||||||
|
)
|
||||||
|
|
||||||
|
// FromJSONFast is a helper function for reading a JSON document
|
||||||
|
func FromJSONFast(input []byte) (Value, error) {
|
||||||
|
iter := readPool.BorrowIterator(input)
|
||||||
|
defer readPool.ReturnIterator(iter)
|
||||||
|
return ReadJSONIter(iter)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReadJSONIter(iter *jsoniter.Iterator) (Value, error) {
|
||||||
|
next := iter.WhatIsNext()
|
||||||
|
switch next {
|
||||||
|
case jsoniter.InvalidValue:
|
||||||
|
iter.ReportError("reading an object", "got invalid token")
|
||||||
|
return Value{}, iter.Error
|
||||||
|
case jsoniter.StringValue:
|
||||||
|
str := String(iter.ReadString())
|
||||||
|
return Value{StringValue: &str}, nil
|
||||||
|
case jsoniter.NumberValue:
|
||||||
|
number := iter.ReadNumber()
|
||||||
|
isFloat := false
|
||||||
|
for _, c := range number {
|
||||||
|
if c == 'e' || c == 'E' || c == '.' {
|
||||||
|
isFloat = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if isFloat {
|
||||||
|
f, err := number.Float64()
|
||||||
|
if err != nil {
|
||||||
|
iter.ReportError("parsing as float", err.Error())
|
||||||
|
return Value{}, err
|
||||||
|
}
|
||||||
|
return Value{FloatValue: (*Float)(&f)}, nil
|
||||||
|
}
|
||||||
|
i, err := number.Int64()
|
||||||
|
if err != nil {
|
||||||
|
iter.ReportError("parsing as float", err.Error())
|
||||||
|
return Value{}, err
|
||||||
|
}
|
||||||
|
return Value{IntValue: (*Int)(&i)}, nil
|
||||||
|
case jsoniter.NilValue:
|
||||||
|
iter.ReadNil()
|
||||||
|
return Value{Null: true}, nil
|
||||||
|
case jsoniter.BoolValue:
|
||||||
|
b := Boolean(iter.ReadBool())
|
||||||
|
return Value{BooleanValue: &b}, nil
|
||||||
|
case jsoniter.ArrayValue:
|
||||||
|
list := &List{}
|
||||||
|
iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
|
||||||
|
v, err := ReadJSONIter(iter)
|
||||||
|
if err != nil {
|
||||||
|
iter.Error = err
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
list.Items = append(list.Items, v)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
return Value{ListValue: list}, iter.Error
|
||||||
|
case jsoniter.ObjectValue:
|
||||||
|
m := &Map{}
|
||||||
|
iter.ReadObjectCB(func(iter *jsoniter.Iterator, key string) bool {
|
||||||
|
v, err := ReadJSONIter(iter)
|
||||||
|
if err != nil {
|
||||||
|
iter.Error = err
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
m.Items = append(m.Items, Field{Name: key, Value: v})
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
return Value{MapValue: m}, iter.Error
|
||||||
|
default:
|
||||||
|
return Value{}, fmt.Errorf("unexpected object type %v", next)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToJSONFast is a helper function for producing a JSon document.
|
||||||
|
func (v *Value) ToJSONFast() ([]byte, error) {
|
||||||
|
buf := bytes.Buffer{}
|
||||||
|
stream := writePool.BorrowStream(&buf)
|
||||||
|
defer writePool.ReturnStream(stream)
|
||||||
|
v.WriteJSONStream(stream)
|
||||||
|
err := stream.Flush()
|
||||||
|
return buf.Bytes(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *Value) WriteJSONStream(stream *jsoniter.Stream) {
|
||||||
|
switch {
|
||||||
|
case v.Null:
|
||||||
|
stream.WriteNil()
|
||||||
|
case v.FloatValue != nil:
|
||||||
|
stream.WriteFloat64(float64(*v.FloatValue))
|
||||||
|
case v.IntValue != nil:
|
||||||
|
stream.WriteInt64(int64(*v.IntValue))
|
||||||
|
case v.BooleanValue != nil:
|
||||||
|
stream.WriteBool(bool(*v.BooleanValue))
|
||||||
|
case v.StringValue != nil:
|
||||||
|
stream.WriteString(string(*v.StringValue))
|
||||||
|
case v.ListValue != nil:
|
||||||
|
stream.WriteArrayStart()
|
||||||
|
for i := range v.ListValue.Items {
|
||||||
|
if i > 0 {
|
||||||
|
stream.WriteMore()
|
||||||
|
}
|
||||||
|
v.ListValue.Items[i].WriteJSONStream(stream)
|
||||||
|
}
|
||||||
|
stream.WriteArrayEnd()
|
||||||
|
case v.MapValue != nil:
|
||||||
|
stream.WriteObjectStart()
|
||||||
|
for i := range v.MapValue.Items {
|
||||||
|
if i > 0 {
|
||||||
|
stream.WriteMore()
|
||||||
|
}
|
||||||
|
stream.WriteObjectField(v.MapValue.Items[i].Name)
|
||||||
|
v.MapValue.Items[i].Value.WriteJSONStream(stream)
|
||||||
|
}
|
||||||
|
stream.WriteObjectEnd()
|
||||||
|
default:
|
||||||
|
stream.Write([]byte("invalid_value"))
|
||||||
|
}
|
||||||
|
}
|
53
vendor/sigs.k8s.io/structured-merge-diff/value/value.go
generated
vendored
53
vendor/sigs.k8s.io/structured-merge-diff/value/value.go
generated
vendored
@ -173,7 +173,7 @@ type Map struct {
|
|||||||
order []int
|
order []int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Map) computeOrder() {
|
func (m *Map) computeOrder() []int {
|
||||||
if len(m.order) != len(m.Items) {
|
if len(m.order) != len(m.Items) {
|
||||||
m.order = make([]int, len(m.Items))
|
m.order = make([]int, len(m.Items))
|
||||||
for i := range m.order {
|
for i := range m.order {
|
||||||
@ -183,28 +183,67 @@ func (m *Map) computeOrder() {
|
|||||||
return m.Items[m.order[i]].Name < m.Items[m.order[j]].Name
|
return m.Items[m.order[i]].Name < m.Items[m.order[j]].Name
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
return m.order
|
||||||
}
|
}
|
||||||
|
|
||||||
// Less compares two maps lexically.
|
// Less compares two maps lexically.
|
||||||
func (m *Map) Less(rhs *Map) bool {
|
func (m *Map) Less(rhs *Map) bool {
|
||||||
m.computeOrder()
|
var noAllocL, noAllocR [2]int
|
||||||
rhs.computeOrder()
|
var morder, rorder []int
|
||||||
|
|
||||||
|
// For very short maps (<2 elements) this permits us to avoid
|
||||||
|
// allocating the order array. We could make this accomodate larger
|
||||||
|
// maps, but 2 items should be enough to cover most path element
|
||||||
|
// comparisons, and at some point there will be diminishing returns.
|
||||||
|
// This has a large effect on the path element deserialization test,
|
||||||
|
// because everything is sorted / compared, but only once.
|
||||||
|
switch len(m.Items) {
|
||||||
|
case 0:
|
||||||
|
morder = noAllocL[0:0]
|
||||||
|
case 1:
|
||||||
|
morder = noAllocL[0:1]
|
||||||
|
case 2:
|
||||||
|
morder = noAllocL[0:2]
|
||||||
|
if m.Items[0].Name > m.Items[1].Name {
|
||||||
|
morder[0] = 1
|
||||||
|
} else {
|
||||||
|
morder[1] = 1
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
morder = m.computeOrder()
|
||||||
|
}
|
||||||
|
|
||||||
|
switch len(rhs.Items) {
|
||||||
|
case 0:
|
||||||
|
rorder = noAllocR[0:0]
|
||||||
|
case 1:
|
||||||
|
rorder = noAllocR[0:1]
|
||||||
|
case 2:
|
||||||
|
rorder = noAllocR[0:2]
|
||||||
|
if rhs.Items[0].Name > rhs.Items[1].Name {
|
||||||
|
rorder[0] = 1
|
||||||
|
} else {
|
||||||
|
rorder[1] = 1
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
rorder = rhs.computeOrder()
|
||||||
|
}
|
||||||
|
|
||||||
i := 0
|
i := 0
|
||||||
for {
|
for {
|
||||||
if i >= len(m.order) && i >= len(rhs.order) {
|
if i >= len(morder) && i >= len(rorder) {
|
||||||
// Maps are the same length and all items are equal.
|
// Maps are the same length and all items are equal.
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if i >= len(m.order) {
|
if i >= len(morder) {
|
||||||
// LHS is shorter.
|
// LHS is shorter.
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if i >= len(rhs.order) {
|
if i >= len(rorder) {
|
||||||
// RHS is shorter.
|
// RHS is shorter.
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
fa, fb := &m.Items[m.order[i]], &rhs.Items[rhs.order[i]]
|
fa, fb := &m.Items[morder[i]], &rhs.Items[rorder[i]]
|
||||||
if fa.Name != fb.Name {
|
if fa.Name != fb.Name {
|
||||||
// the map having the field name that sorts lexically less is "less"
|
// the map having the field name that sorts lexically less is "less"
|
||||||
return fa.Name < fb.Name
|
return fa.Name < fb.Name
|
||||||
|
Loading…
Reference in New Issue
Block a user