diff --git a/apis/management.cattle.io/v3/schema/schema.go b/apis/management.cattle.io/v3/schema/schema.go index ce944f67..266bd74f 100644 --- a/apis/management.cattle.io/v3/schema/schema.go +++ b/apis/management.cattle.io/v3/schema/schema.go @@ -219,6 +219,7 @@ func nodeTypes(schemas *types.Schemas) *types.Schemas { AddMapperForType(&Version, v3.NodeStatus{}, &m.Drop{Field: "nodeTemplateSpec"}, &m.Embed{Field: "internalNodeStatus"}, + &m.Drop{Field: "config"}, &m.SliceMerge{From: []string{"conditions", "nodeConditions"}, To: "conditions"}). AddMapperForType(&Version, v3.Node{}, &m.Embed{Field: "status"}, diff --git a/client/management/v3/zz_generated_node.go b/client/management/v3/zz_generated_node.go index e82bc18c..6511bd04 100644 --- a/client/management/v3/zz_generated_node.go +++ b/client/management/v3/zz_generated_node.go @@ -11,7 +11,6 @@ const ( NodeFieldCapacity = "capacity" NodeFieldClusterID = "clusterId" NodeFieldConditions = "conditions" - NodeFieldConfig = "config" NodeFieldControlPlane = "controlPlane" NodeFieldCreated = "created" NodeFieldCreatorID = "creatorId" @@ -58,7 +57,6 @@ type Node struct { Capacity map[string]string `json:"capacity,omitempty" yaml:"capacity,omitempty"` ClusterID string `json:"clusterId,omitempty" yaml:"clusterId,omitempty"` Conditions []NodeCondition `json:"conditions,omitempty" yaml:"conditions,omitempty"` - Config *NodeConfigStatus `json:"config,omitempty" yaml:"config,omitempty"` ControlPlane bool `json:"controlPlane,omitempty" yaml:"controlPlane,omitempty"` Created string `json:"created,omitempty" yaml:"created,omitempty"` CreatorID string `json:"creatorId,omitempty" yaml:"creatorId,omitempty"` diff --git a/client/management/v3/zz_generated_node_status.go b/client/management/v3/zz_generated_node_status.go index 7af2df5b..3fbc7da0 100644 --- a/client/management/v3/zz_generated_node_status.go +++ b/client/management/v3/zz_generated_node_status.go @@ -5,7 +5,6 @@ const ( NodeStatusFieldAllocatable = "allocatable" NodeStatusFieldCapacity = "capacity" NodeStatusFieldConditions = "conditions" - NodeStatusFieldConfig = "config" NodeStatusFieldDockerInfo = "dockerInfo" NodeStatusFieldExternalIPAddress = "externalIpAddress" NodeStatusFieldHostname = "hostname" @@ -26,7 +25,6 @@ type NodeStatus struct { Allocatable map[string]string `json:"allocatable,omitempty" yaml:"allocatable,omitempty"` Capacity map[string]string `json:"capacity,omitempty" yaml:"capacity,omitempty"` Conditions []NodeCondition `json:"conditions,omitempty" yaml:"conditions,omitempty"` - Config *NodeConfigStatus `json:"config,omitempty" yaml:"config,omitempty"` DockerInfo *DockerInfo `json:"dockerInfo,omitempty" yaml:"dockerInfo,omitempty"` ExternalIPAddress string `json:"externalIpAddress,omitempty" yaml:"externalIpAddress,omitempty"` Hostname string `json:"hostname,omitempty" yaml:"hostname,omitempty"` diff --git a/vendor.conf b/vendor.conf index f5fad8ee..5ec3051c 100644 --- a/vendor.conf +++ b/vendor.conf @@ -2,4 +2,4 @@ github.com/rancher/types github.com/pkg/errors v0.8.0 -github.com/rancher/norman 4c3df5a3de57701e5ff57e13b45a3b219b5d9d33 transitive=true +github.com/rancher/norman dfeffc8a3f6840d6ea8ed4c63a9381dbbf9edb50 transitive=true diff --git a/vendor/github.com/golang/groupcache/.gitignore b/vendor/github.com/golang/groupcache/.gitignore deleted file mode 100644 index b25c15b8..00000000 --- a/vendor/github.com/golang/groupcache/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*~ diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE deleted file mode 100644 index 37ec93a1..00000000 --- a/vendor/github.com/golang/groupcache/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/golang/groupcache/README.md b/vendor/github.com/golang/groupcache/README.md deleted file mode 100644 index 70c29da1..00000000 --- a/vendor/github.com/golang/groupcache/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# groupcache - -## Summary - -groupcache is a caching and cache-filling library, intended as a -replacement for memcached in many cases. - -For API docs and examples, see http://godoc.org/github.com/golang/groupcache - -## Comparison to memcached - -### **Like memcached**, groupcache: - - * shards by key to select which peer is responsible for that key - -### **Unlike memcached**, groupcache: - - * does not require running a separate set of servers, thus massively - reducing deployment/configuration pain. groupcache is a client - library as well as a server. It connects to its own peers. - - * comes with a cache filling mechanism. Whereas memcached just says - "Sorry, cache miss", often resulting in a thundering herd of - database (or whatever) loads from an unbounded number of clients - (which has resulted in several fun outages), groupcache coordinates - cache fills such that only one load in one process of an entire - replicated set of processes populates the cache, then multiplexes - the loaded value to all callers. - - * does not support versioned values. If key "foo" is value "bar", - key "foo" must always be "bar". There are neither cache expiration - times, nor explicit cache evictions. Thus there is also no CAS, - nor Increment/Decrement. This also means that groupcache.... - - * ... supports automatic mirroring of super-hot items to multiple - processes. This prevents memcached hot spotting where a machine's - CPU and/or NIC are overloaded by very popular keys/values. - - * is currently only available for Go. It's very unlikely that I - (bradfitz@) will port the code to any other language. - -## Loading process - -In a nutshell, a groupcache lookup of **Get("foo")** looks like: - -(On machine #5 of a set of N machines running the same code) - - 1. Is the value of "foo" in local memory because it's super hot? If so, use it. - - 2. Is the value of "foo" in local memory because peer #5 (the current - peer) is the owner of it? If so, use it. - - 3. Amongst all the peers in my set of N, am I the owner of the key - "foo"? (e.g. does it consistent hash to 5?) If so, load it. If - other callers come in, via the same process or via RPC requests - from peers, they block waiting for the load to finish and get the - same answer. If not, RPC to the peer that's the owner and get - the answer. If the RPC fails, just load it locally (still with - local dup suppression). - -## Users - -groupcache is in production use by dl.google.com (its original user), -parts of Blogger, parts of Google Code, parts of Google Fiber, parts -of Google production monitoring systems, etc. - -## Presentations - -See http://talks.golang.org/2013/oscon-dl.slide - -## Help - -Use the golang-nuts mailing list for any discussion or questions. diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go deleted file mode 100644 index cdfe2991..00000000 --- a/vendor/github.com/golang/groupcache/lru/lru.go +++ /dev/null @@ -1,121 +0,0 @@ -/* -Copyright 2013 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package lru implements an LRU cache. -package lru - -import "container/list" - -// Cache is an LRU cache. It is not safe for concurrent access. -type Cache struct { - // MaxEntries is the maximum number of cache entries before - // an item is evicted. Zero means no limit. - MaxEntries int - - // OnEvicted optionally specificies a callback function to be - // executed when an entry is purged from the cache. - OnEvicted func(key Key, value interface{}) - - ll *list.List - cache map[interface{}]*list.Element -} - -// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators -type Key interface{} - -type entry struct { - key Key - value interface{} -} - -// New creates a new Cache. -// If maxEntries is zero, the cache has no limit and it's assumed -// that eviction is done by the caller. -func New(maxEntries int) *Cache { - return &Cache{ - MaxEntries: maxEntries, - ll: list.New(), - cache: make(map[interface{}]*list.Element), - } -} - -// Add adds a value to the cache. -func (c *Cache) Add(key Key, value interface{}) { - if c.cache == nil { - c.cache = make(map[interface{}]*list.Element) - c.ll = list.New() - } - if ee, ok := c.cache[key]; ok { - c.ll.MoveToFront(ee) - ee.Value.(*entry).value = value - return - } - ele := c.ll.PushFront(&entry{key, value}) - c.cache[key] = ele - if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { - c.RemoveOldest() - } -} - -// Get looks up a key's value from the cache. -func (c *Cache) Get(key Key) (value interface{}, ok bool) { - if c.cache == nil { - return - } - if ele, hit := c.cache[key]; hit { - c.ll.MoveToFront(ele) - return ele.Value.(*entry).value, true - } - return -} - -// Remove removes the provided key from the cache. -func (c *Cache) Remove(key Key) { - if c.cache == nil { - return - } - if ele, hit := c.cache[key]; hit { - c.removeElement(ele) - } -} - -// RemoveOldest removes the oldest item from the cache. -func (c *Cache) RemoveOldest() { - if c.cache == nil { - return - } - ele := c.ll.Back() - if ele != nil { - c.removeElement(ele) - } -} - -func (c *Cache) removeElement(e *list.Element) { - c.ll.Remove(e) - kv := e.Value.(*entry) - delete(c.cache, kv.key) - if c.OnEvicted != nil { - c.OnEvicted(kv.key, kv.value) - } -} - -// Len returns the number of items in the cache. -func (c *Cache) Len() int { - if c.cache == nil { - return 0 - } - return c.ll.Len() -} diff --git a/vendor/github.com/rancher/norman/vendor.conf b/vendor/github.com/rancher/norman/vendor.conf index dc59fb25..bb7d6b63 100644 --- a/vendor/github.com/rancher/norman/vendor.conf +++ b/vendor/github.com/rancher/norman/vendor.conf @@ -1,7 +1,7 @@ # package github.com/rancher/norman -k8s.io/kubernetes v1.12.1-lite5 https://github.com/ibuildthecloud/k3s.git transitive=true,staging=true +k8s.io/kubernetes v1.12.1-lite7 https://github.com/ibuildthecloud/k3s.git transitive=true,staging=true github.com/maruel/panicparse c0182c169410cfa80c7e8f046dad208eaef91338 bitbucket.org/ww/goautoneg a547fc61f48d567d5b4ec6f8aee5573d8efce11d https://github.com/rancher/goautoneg.git golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5 diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD deleted file mode 100644 index 1ed8649a..00000000 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD +++ /dev/null @@ -1,40 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = ["util_test.go"], - embed = [":go_default_library"], -) - -go_library( - name = "go_default_library", - srcs = [ - "errors.go", - "util.go", - ], - importmap = "k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/mergepatch", - importpath = "k8s.io/apimachinery/pkg/util/mergepatch", - deps = [ - "//vendor/github.com/davecgh/go-spew/spew:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS deleted file mode 100644 index 8e8d9fce..00000000 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -approvers: -- pwittrock -reviewers: -- mengqiy -- apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go deleted file mode 100644 index 16501d5a..00000000 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mergepatch - -import ( - "errors" - "fmt" - "reflect" -) - -var ( - ErrBadJSONDoc = errors.New("invalid JSON document") - ErrNoListOfLists = errors.New("lists of lists are not supported") - ErrBadPatchFormatForPrimitiveList = errors.New("invalid patch format of primitive list") - ErrBadPatchFormatForRetainKeys = errors.New("invalid patch format of retainKeys") - ErrBadPatchFormatForSetElementOrderList = errors.New("invalid patch format of setElementOrder list") - ErrPatchContentNotMatchRetainKeys = errors.New("patch content doesn't match retainKeys list") - ErrUnsupportedStrategicMergePatchFormat = errors.New("strategic merge patch format is not supported") -) - -func ErrNoMergeKey(m map[string]interface{}, k string) error { - return fmt.Errorf("map: %v does not contain declared merge key: %s", m, k) -} - -func ErrBadArgType(expected, actual interface{}) error { - return fmt.Errorf("expected a %s, but received a %s", - reflect.TypeOf(expected), - reflect.TypeOf(actual)) -} - -func ErrBadArgKind(expected, actual interface{}) error { - var expectedKindString, actualKindString string - if expected == nil { - expectedKindString = "nil" - } else { - expectedKindString = reflect.TypeOf(expected).Kind().String() - } - if actual == nil { - actualKindString = "nil" - } else { - actualKindString = reflect.TypeOf(actual).Kind().String() - } - return fmt.Errorf("expected a %s, but received a %s", expectedKindString, actualKindString) -} - -func ErrBadPatchType(t interface{}, m map[string]interface{}) error { - return fmt.Errorf("unknown patch type: %s in map: %v", t, m) -} - -// IsPreconditionFailed returns true if the provided error indicates -// a precondition failed. -func IsPreconditionFailed(err error) bool { - _, ok := err.(ErrPreconditionFailed) - return ok -} - -type ErrPreconditionFailed struct { - message string -} - -func NewErrPreconditionFailed(target map[string]interface{}) ErrPreconditionFailed { - s := fmt.Sprintf("precondition failed for: %v", target) - return ErrPreconditionFailed{s} -} - -func (err ErrPreconditionFailed) Error() string { - return err.message -} - -type ErrConflict struct { - message string -} - -func NewErrConflict(patch, current string) ErrConflict { - s := fmt.Sprintf("patch:\n%s\nconflicts with changes made from original to current:\n%s\n", patch, current) - return ErrConflict{s} -} - -func (err ErrConflict) Error() string { - return err.message -} - -// IsConflict returns true if the provided error indicates -// a conflict between the patch and the current configuration. -func IsConflict(err error) bool { - _, ok := err.(ErrConflict) - return ok -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go deleted file mode 100644 index d09a939b..00000000 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mergepatch - -import ( - "fmt" - "reflect" - - "github.com/davecgh/go-spew/spew" - "github.com/ghodss/yaml" -) - -// PreconditionFunc asserts that an incompatible change is not present within a patch. -type PreconditionFunc func(interface{}) bool - -// RequireKeyUnchanged returns a precondition function that fails if the provided key -// is present in the patch (indicating that its value has changed). -func RequireKeyUnchanged(key string) PreconditionFunc { - return func(patch interface{}) bool { - patchMap, ok := patch.(map[string]interface{}) - if !ok { - return true - } - - // The presence of key means that its value has been changed, so the test fails. - _, ok = patchMap[key] - return !ok - } -} - -// RequireMetadataKeyUnchanged creates a precondition function that fails -// if the metadata.key is present in the patch (indicating its value -// has changed). -func RequireMetadataKeyUnchanged(key string) PreconditionFunc { - return func(patch interface{}) bool { - patchMap, ok := patch.(map[string]interface{}) - if !ok { - return true - } - patchMap1, ok := patchMap["metadata"] - if !ok { - return true - } - patchMap2, ok := patchMap1.(map[string]interface{}) - if !ok { - return true - } - _, ok = patchMap2[key] - return !ok - } -} - -func ToYAMLOrError(v interface{}) string { - y, err := toYAML(v) - if err != nil { - return err.Error() - } - - return y -} - -func toYAML(v interface{}) (string, error) { - y, err := yaml.Marshal(v) - if err != nil { - return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, spew.Sdump(v)) - } - - return string(y), nil -} - -// HasConflicts returns true if the left and right JSON interface objects overlap with -// different values in any key. All keys are required to be strings. Since patches of the -// same Type have congruent keys, this is valid for multiple patch types. This method -// supports JSON merge patch semantics. -// -// NOTE: Numbers with different types (e.g. int(0) vs int64(0)) will be detected as conflicts. -// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). -func HasConflicts(left, right interface{}) (bool, error) { - switch typedLeft := left.(type) { - case map[string]interface{}: - switch typedRight := right.(type) { - case map[string]interface{}: - for key, leftValue := range typedLeft { - rightValue, ok := typedRight[key] - if !ok { - continue - } - if conflict, err := HasConflicts(leftValue, rightValue); err != nil || conflict { - return conflict, err - } - } - - return false, nil - default: - return true, nil - } - case []interface{}: - switch typedRight := right.(type) { - case []interface{}: - if len(typedLeft) != len(typedRight) { - return true, nil - } - - for i := range typedLeft { - if conflict, err := HasConflicts(typedLeft[i], typedRight[i]); err != nil || conflict { - return conflict, err - } - } - - return false, nil - default: - return true, nil - } - case string, float64, bool, int64, nil: - return !reflect.DeepEqual(left, right), nil - default: - return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) - } -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD deleted file mode 100644 index e0a947ec..00000000 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD +++ /dev/null @@ -1,61 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = ["patch_test.go"], - data = [ - "testdata/swagger-merge-item.json", - "testdata/swagger-precision-item.json", - ], - embed = [":go_default_library"], - deps = [ - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/mergepatch:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/testing:go_default_library", - "//vendor/github.com/davecgh/go-spew/spew:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", - ], -) - -go_library( - name = "go_default_library", - srcs = [ - "errors.go", - "meta.go", - "patch.go", - "types.go", - ], - importmap = "k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/strategicpatch", - importpath = "k8s.io/apimachinery/pkg/util/strategicpatch", - deps = [ - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/mergepatch:go_default_library", - "//staging/src/k8s.io/apimachinery/third_party/forked/golang/json:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/testing:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS deleted file mode 100644 index dbbe0de4..00000000 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -approvers: -- pwittrock -- mengqiy -reviewers: -- mengqiy -- apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go deleted file mode 100644 index ab66d045..00000000 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package strategicpatch - -import ( - "fmt" -) - -type LookupPatchMetaError struct { - Path string - Err error -} - -func (e LookupPatchMetaError) Error() string { - return fmt.Sprintf("LookupPatchMetaError(%s): %v", e.Path, e.Err) -} - -type FieldNotFoundError struct { - Path string - Field string -} - -func (e FieldNotFoundError) Error() string { - return fmt.Sprintf("unable to find api field %q in %s", e.Field, e.Path) -} - -type InvalidTypeError struct { - Path string - Expected string - Actual string -} - -func (e InvalidTypeError) Error() string { - return fmt.Sprintf("invalid type for %s: got %q, expected %q", e.Path, e.Actual, e.Expected) -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go deleted file mode 100644 index 1b9d0d3f..00000000 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go +++ /dev/null @@ -1,148 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package strategicpatch - -import ( - "errors" - "fmt" - "reflect" - - "k8s.io/apimachinery/pkg/util/mergepatch" - forkedjson "k8s.io/apimachinery/third_party/forked/golang/json" -) - -type PatchMeta struct { - patchStrategies []string - patchMergeKey string -} - -func (pm PatchMeta) GetPatchStrategies() []string { - if pm.patchStrategies == nil { - return []string{} - } - return pm.patchStrategies -} - -func (pm PatchMeta) SetPatchStrategies(ps []string) { - pm.patchStrategies = ps -} - -func (pm PatchMeta) GetPatchMergeKey() string { - return pm.patchMergeKey -} - -func (pm PatchMeta) SetPatchMergeKey(pmk string) { - pm.patchMergeKey = pmk -} - -type LookupPatchMeta interface { - // LookupPatchMetadataForStruct gets subschema and the patch metadata (e.g. patch strategy and merge key) for map. - LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) - // LookupPatchMetadataForSlice get subschema and the patch metadata for slice. - LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) - // Get the type name of the field - Name() string -} - -type PatchMetaFromStruct struct { - T reflect.Type -} - -func NewPatchMetaFromStruct(dataStruct interface{}) (PatchMetaFromStruct, error) { - t, err := getTagStructType(dataStruct) - return PatchMetaFromStruct{T: t}, err -} - -var _ LookupPatchMeta = PatchMetaFromStruct{} - -func (s PatchMetaFromStruct) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { - fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadataForStruct(s.T, key) - if err != nil { - return nil, PatchMeta{}, err - } - - return PatchMetaFromStruct{T: fieldType}, - PatchMeta{ - patchStrategies: fieldPatchStrategies, - patchMergeKey: fieldPatchMergeKey, - }, nil -} - -func (s PatchMetaFromStruct) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { - subschema, patchMeta, err := s.LookupPatchMetadataForStruct(key) - if err != nil { - return nil, PatchMeta{}, err - } - elemPatchMetaFromStruct := subschema.(PatchMetaFromStruct) - t := elemPatchMetaFromStruct.T - - var elemType reflect.Type - switch t.Kind() { - // If t is an array or a slice, get the element type. - // If element is still an array or a slice, return an error. - // Otherwise, return element type. - case reflect.Array, reflect.Slice: - elemType = t.Elem() - if elemType.Kind() == reflect.Array || elemType.Kind() == reflect.Slice { - return nil, PatchMeta{}, errors.New("unexpected slice of slice") - } - // If t is an pointer, get the underlying element. - // If the underlying element is neither an array nor a slice, the pointer is pointing to a slice, - // e.g. https://github.com/kubernetes/kubernetes/blob/bc22e206c79282487ea0bf5696d5ccec7e839a76/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go#L2782-L2822 - // If the underlying element is either an array or a slice, return its element type. - case reflect.Ptr: - t = t.Elem() - if t.Kind() == reflect.Array || t.Kind() == reflect.Slice { - t = t.Elem() - } - elemType = t - default: - return nil, PatchMeta{}, fmt.Errorf("expected slice or array type, but got: %s", s.T.Kind().String()) - } - - return PatchMetaFromStruct{T: elemType}, patchMeta, nil -} - -func (s PatchMetaFromStruct) Name() string { - return s.T.Kind().String() -} - -func getTagStructType(dataStruct interface{}) (reflect.Type, error) { - if dataStruct == nil { - return nil, mergepatch.ErrBadArgKind(struct{}{}, nil) - } - - t := reflect.TypeOf(dataStruct) - // Get the underlying type for pointers - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - - if t.Kind() != reflect.Struct { - return nil, mergepatch.ErrBadArgKind(struct{}{}, dataStruct) - } - - return t, nil -} - -func GetTagStructTypeOrDie(dataStruct interface{}) reflect.Type { - t, err := getTagStructType(dataStruct) - if err != nil { - panic(err) - } - return t -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go deleted file mode 100644 index ddf99817..00000000 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go +++ /dev/null @@ -1,2174 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package strategicpatch - -import ( - "fmt" - "reflect" - "sort" - "strings" - - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/json" - "k8s.io/apimachinery/pkg/util/mergepatch" -) - -// An alternate implementation of JSON Merge Patch -// (https://tools.ietf.org/html/rfc7386) which supports the ability to annotate -// certain fields with metadata that indicates whether the elements of JSON -// lists should be merged or replaced. -// -// For more information, see the PATCH section of docs/devel/api-conventions.md. -// -// Some of the content of this package was borrowed with minor adaptations from -// evanphx/json-patch and openshift/origin. - -const ( - directiveMarker = "$patch" - deleteDirective = "delete" - replaceDirective = "replace" - mergeDirective = "merge" - - retainKeysStrategy = "retainKeys" - - deleteFromPrimitiveListDirectivePrefix = "$deleteFromPrimitiveList" - retainKeysDirective = "$" + retainKeysStrategy - setElementOrderDirectivePrefix = "$setElementOrder" -) - -// JSONMap is a representations of JSON object encoded as map[string]interface{} -// where the children can be either map[string]interface{}, []interface{} or -// primitive type). -// Operating on JSONMap representation is much faster as it doesn't require any -// json marshaling and/or unmarshaling operations. -type JSONMap map[string]interface{} - -type DiffOptions struct { - // SetElementOrder determines whether we generate the $setElementOrder parallel list. - SetElementOrder bool - // IgnoreChangesAndAdditions indicates if we keep the changes and additions in the patch. - IgnoreChangesAndAdditions bool - // IgnoreDeletions indicates if we keep the deletions in the patch. - IgnoreDeletions bool - // We introduce a new value retainKeys for patchStrategy. - // It indicates that all fields needing to be preserved must be - // present in the `retainKeys` list. - // And the fields that are present will be merged with live object. - // All the missing fields will be cleared when patching. - BuildRetainKeysDirective bool -} - -type MergeOptions struct { - // MergeParallelList indicates if we are merging the parallel list. - // We don't merge parallel list when calling mergeMap() in CreateThreeWayMergePatch() - // which is called client-side. - // We merge parallel list iff when calling mergeMap() in StrategicMergeMapPatch() - // which is called server-side - MergeParallelList bool - // IgnoreUnmatchedNulls indicates if we should process the unmatched nulls. - IgnoreUnmatchedNulls bool -} - -// The following code is adapted from github.com/openshift/origin/pkg/util/jsonmerge. -// Instead of defining a Delta that holds an original, a patch and a set of preconditions, -// the reconcile method accepts a set of preconditions as an argument. - -// CreateTwoWayMergePatch creates a patch that can be passed to StrategicMergePatch from an original -// document and a modified document, which are passed to the method as json encoded content. It will -// return a patch that yields the modified document when applied to the original document, or an error -// if either of the two documents is invalid. -func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) ([]byte, error) { - schema, err := NewPatchMetaFromStruct(dataStruct) - if err != nil { - return nil, err - } - - return CreateTwoWayMergePatchUsingLookupPatchMeta(original, modified, schema, fns...) -} - -func CreateTwoWayMergePatchUsingLookupPatchMeta( - original, modified []byte, schema LookupPatchMeta, fns ...mergepatch.PreconditionFunc) ([]byte, error) { - originalMap := map[string]interface{}{} - if len(original) > 0 { - if err := json.Unmarshal(original, &originalMap); err != nil { - return nil, mergepatch.ErrBadJSONDoc - } - } - - modifiedMap := map[string]interface{}{} - if len(modified) > 0 { - if err := json.Unmarshal(modified, &modifiedMap); err != nil { - return nil, mergepatch.ErrBadJSONDoc - } - } - - patchMap, err := CreateTwoWayMergeMapPatchUsingLookupPatchMeta(originalMap, modifiedMap, schema, fns...) - if err != nil { - return nil, err - } - - return json.Marshal(patchMap) -} - -// CreateTwoWayMergeMapPatch creates a patch from an original and modified JSON objects, -// encoded JSONMap. -// The serialized version of the map can then be passed to StrategicMergeMapPatch. -func CreateTwoWayMergeMapPatch(original, modified JSONMap, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { - schema, err := NewPatchMetaFromStruct(dataStruct) - if err != nil { - return nil, err - } - - return CreateTwoWayMergeMapPatchUsingLookupPatchMeta(original, modified, schema, fns...) -} - -func CreateTwoWayMergeMapPatchUsingLookupPatchMeta(original, modified JSONMap, schema LookupPatchMeta, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { - diffOptions := DiffOptions{ - SetElementOrder: true, - } - patchMap, err := diffMaps(original, modified, schema, diffOptions) - if err != nil { - return nil, err - } - - // Apply the preconditions to the patch, and return an error if any of them fail. - for _, fn := range fns { - if !fn(patchMap) { - return nil, mergepatch.NewErrPreconditionFailed(patchMap) - } - } - - return patchMap, nil -} - -// Returns a (recursive) strategic merge patch that yields modified when applied to original. -// Including: -// - Adding fields to the patch present in modified, missing from original -// - Setting fields to the patch present in modified and original with different values -// - Delete fields present in original, missing from modified through -// - IFF map field - set to nil in patch -// - IFF list of maps && merge strategy - use deleteDirective for the elements -// - IFF list of primitives && merge strategy - use parallel deletion list -// - IFF list of maps or primitives with replace strategy (default) - set patch value to the value in modified -// - Build $retainKeys directive for fields with retainKeys patch strategy -func diffMaps(original, modified map[string]interface{}, schema LookupPatchMeta, diffOptions DiffOptions) (map[string]interface{}, error) { - patch := map[string]interface{}{} - - // This will be used to build the $retainKeys directive sent in the patch - retainKeysList := make([]interface{}, 0, len(modified)) - - // Compare each value in the modified map against the value in the original map - for key, modifiedValue := range modified { - // Get the underlying type for pointers - if diffOptions.BuildRetainKeysDirective && modifiedValue != nil { - retainKeysList = append(retainKeysList, key) - } - - originalValue, ok := original[key] - if !ok { - // Key was added, so add to patch - if !diffOptions.IgnoreChangesAndAdditions { - patch[key] = modifiedValue - } - continue - } - - // The patch may have a patch directive - // TODO: figure out if we need this. This shouldn't be needed by apply. When would the original map have patch directives in it? - foundDirectiveMarker, err := handleDirectiveMarker(key, originalValue, modifiedValue, patch) - if err != nil { - return nil, err - } - if foundDirectiveMarker { - continue - } - - if reflect.TypeOf(originalValue) != reflect.TypeOf(modifiedValue) { - // Types have changed, so add to patch - if !diffOptions.IgnoreChangesAndAdditions { - patch[key] = modifiedValue - } - continue - } - - // Types are the same, so compare values - switch originalValueTyped := originalValue.(type) { - case map[string]interface{}: - modifiedValueTyped := modifiedValue.(map[string]interface{}) - err = handleMapDiff(key, originalValueTyped, modifiedValueTyped, patch, schema, diffOptions) - case []interface{}: - modifiedValueTyped := modifiedValue.([]interface{}) - err = handleSliceDiff(key, originalValueTyped, modifiedValueTyped, patch, schema, diffOptions) - default: - replacePatchFieldIfNotEqual(key, originalValue, modifiedValue, patch, diffOptions) - } - if err != nil { - return nil, err - } - } - - updatePatchIfMissing(original, modified, patch, diffOptions) - // Insert the retainKeysList iff there are values present in the retainKeysList and - // either of the following is true: - // - the patch is not empty - // - there are additional field in original that need to be cleared - if len(retainKeysList) > 0 && - (len(patch) > 0 || hasAdditionalNewField(original, modified)) { - patch[retainKeysDirective] = sortScalars(retainKeysList) - } - return patch, nil -} - -// handleDirectiveMarker handles how to diff directive marker between 2 objects -func handleDirectiveMarker(key string, originalValue, modifiedValue interface{}, patch map[string]interface{}) (bool, error) { - if key == directiveMarker { - originalString, ok := originalValue.(string) - if !ok { - return false, fmt.Errorf("invalid value for special key: %s", directiveMarker) - } - modifiedString, ok := modifiedValue.(string) - if !ok { - return false, fmt.Errorf("invalid value for special key: %s", directiveMarker) - } - if modifiedString != originalString { - patch[directiveMarker] = modifiedValue - } - return true, nil - } - return false, nil -} - -// handleMapDiff diff between 2 maps `originalValueTyped` and `modifiedValue`, -// puts the diff in the `patch` associated with `key` -// key is the key associated with originalValue and modifiedValue. -// originalValue, modifiedValue are the old and new value respectively.They are both maps -// patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue -// diffOptions contains multiple options to control how we do the diff. -func handleMapDiff(key string, originalValue, modifiedValue, patch map[string]interface{}, - schema LookupPatchMeta, diffOptions DiffOptions) error { - subschema, patchMeta, err := schema.LookupPatchMetadataForStruct(key) - - if err != nil { - // We couldn't look up metadata for the field - // If the values are identical, this doesn't matter, no patch is needed - if reflect.DeepEqual(originalValue, modifiedValue) { - return nil - } - // Otherwise, return the error - return err - } - retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) - if err != nil { - return err - } - diffOptions.BuildRetainKeysDirective = retainKeys - switch patchStrategy { - // The patch strategic from metadata tells us to replace the entire object instead of diffing it - case replaceDirective: - if !diffOptions.IgnoreChangesAndAdditions { - patch[key] = modifiedValue - } - default: - patchValue, err := diffMaps(originalValue, modifiedValue, subschema, diffOptions) - if err != nil { - return err - } - // Maps were not identical, use provided patch value - if len(patchValue) > 0 { - patch[key] = patchValue - } - } - return nil -} - -// handleSliceDiff diff between 2 slices `originalValueTyped` and `modifiedValue`, -// puts the diff in the `patch` associated with `key` -// key is the key associated with originalValue and modifiedValue. -// originalValue, modifiedValue are the old and new value respectively.They are both slices -// patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue -// diffOptions contains multiple options to control how we do the diff. -func handleSliceDiff(key string, originalValue, modifiedValue []interface{}, patch map[string]interface{}, - schema LookupPatchMeta, diffOptions DiffOptions) error { - subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(key) - if err != nil { - // We couldn't look up metadata for the field - // If the values are identical, this doesn't matter, no patch is needed - if reflect.DeepEqual(originalValue, modifiedValue) { - return nil - } - // Otherwise, return the error - return err - } - retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) - if err != nil { - return err - } - switch patchStrategy { - // Merge the 2 slices using mergePatchKey - case mergeDirective: - diffOptions.BuildRetainKeysDirective = retainKeys - addList, deletionList, setOrderList, err := diffLists(originalValue, modifiedValue, subschema, patchMeta.GetPatchMergeKey(), diffOptions) - if err != nil { - return err - } - if len(addList) > 0 { - patch[key] = addList - } - // generate a parallel list for deletion - if len(deletionList) > 0 { - parallelDeletionListKey := fmt.Sprintf("%s/%s", deleteFromPrimitiveListDirectivePrefix, key) - patch[parallelDeletionListKey] = deletionList - } - if len(setOrderList) > 0 { - parallelSetOrderListKey := fmt.Sprintf("%s/%s", setElementOrderDirectivePrefix, key) - patch[parallelSetOrderListKey] = setOrderList - } - default: - replacePatchFieldIfNotEqual(key, originalValue, modifiedValue, patch, diffOptions) - } - return nil -} - -// replacePatchFieldIfNotEqual updates the patch if original and modified are not deep equal -// if diffOptions.IgnoreChangesAndAdditions is false. -// original is the old value, maybe either the live cluster object or the last applied configuration -// modified is the new value, is always the users new config -func replacePatchFieldIfNotEqual(key string, original, modified interface{}, - patch map[string]interface{}, diffOptions DiffOptions) { - if diffOptions.IgnoreChangesAndAdditions { - // Ignoring changes - do nothing - return - } - if reflect.DeepEqual(original, modified) { - // Contents are identical - do nothing - return - } - // Create a patch to replace the old value with the new one - patch[key] = modified -} - -// updatePatchIfMissing iterates over `original` when ignoreDeletions is false. -// Clear the field whose key is not present in `modified`. -// original is the old value, maybe either the live cluster object or the last applied configuration -// modified is the new value, is always the users new config -func updatePatchIfMissing(original, modified, patch map[string]interface{}, diffOptions DiffOptions) { - if diffOptions.IgnoreDeletions { - // Ignoring deletion - do nothing - return - } - // Add nils for deleted values - for key := range original { - if _, found := modified[key]; !found { - patch[key] = nil - } - } -} - -// validateMergeKeyInLists checks if each map in the list has the mentryerge key. -func validateMergeKeyInLists(mergeKey string, lists ...[]interface{}) error { - for _, list := range lists { - for _, item := range list { - m, ok := item.(map[string]interface{}) - if !ok { - return mergepatch.ErrBadArgType(m, item) - } - if _, ok = m[mergeKey]; !ok { - return mergepatch.ErrNoMergeKey(m, mergeKey) - } - } - } - return nil -} - -// normalizeElementOrder sort `patch` list by `patchOrder` and sort `serverOnly` list by `serverOrder`. -// Then it merges the 2 sorted lists. -// It guarantee the relative order in the patch list and in the serverOnly list is kept. -// `patch` is a list of items in the patch, and `serverOnly` is a list of items in the live object. -// `patchOrder` is the order we want `patch` list to have and -// `serverOrder` is the order we want `serverOnly` list to have. -// kind is the kind of each item in the lists `patch` and `serverOnly`. -func normalizeElementOrder(patch, serverOnly, patchOrder, serverOrder []interface{}, mergeKey string, kind reflect.Kind) ([]interface{}, error) { - patch, err := normalizeSliceOrder(patch, patchOrder, mergeKey, kind) - if err != nil { - return nil, err - } - serverOnly, err = normalizeSliceOrder(serverOnly, serverOrder, mergeKey, kind) - if err != nil { - return nil, err - } - all := mergeSortedSlice(serverOnly, patch, serverOrder, mergeKey, kind) - - return all, nil -} - -// mergeSortedSlice merges the 2 sorted lists by serverOrder with best effort. -// It will insert each item in `left` list to `right` list. In most cases, the 2 lists will be interleaved. -// The relative order of left and right are guaranteed to be kept. -// They have higher precedence than the order in the live list. -// The place for a item in `left` is found by: -// scan from the place of last insertion in `right` to the end of `right`, -// the place is before the first item that is greater than the item we want to insert. -// example usage: using server-only items as left and patch items as right. We insert server-only items -// to patch list. We use the order of live object as record for comparison. -func mergeSortedSlice(left, right, serverOrder []interface{}, mergeKey string, kind reflect.Kind) []interface{} { - // Returns if l is less than r, and if both have been found. - // If l and r both present and l is in front of r, l is less than r. - less := func(l, r interface{}) (bool, bool) { - li := index(serverOrder, l, mergeKey, kind) - ri := index(serverOrder, r, mergeKey, kind) - if li >= 0 && ri >= 0 { - return li < ri, true - } else { - return false, false - } - } - - // left and right should be non-overlapping. - size := len(left) + len(right) - i, j := 0, 0 - s := make([]interface{}, size, size) - - for k := 0; k < size; k++ { - if i >= len(left) && j < len(right) { - // have items left in `right` list - s[k] = right[j] - j++ - } else if j >= len(right) && i < len(left) { - // have items left in `left` list - s[k] = left[i] - i++ - } else { - // compare them if i and j are both in bound - less, foundBoth := less(left[i], right[j]) - if foundBoth && less { - s[k] = left[i] - i++ - } else { - s[k] = right[j] - j++ - } - } - } - return s -} - -// index returns the index of the item in the given items, or -1 if it doesn't exist -// l must NOT be a slice of slices, this should be checked before calling. -func index(l []interface{}, valToLookUp interface{}, mergeKey string, kind reflect.Kind) int { - var getValFn func(interface{}) interface{} - // Get the correct `getValFn` based on item `kind`. - // It should return the value of merge key for maps and - // return the item for other kinds. - switch kind { - case reflect.Map: - getValFn = func(item interface{}) interface{} { - typedItem, ok := item.(map[string]interface{}) - if !ok { - return nil - } - val := typedItem[mergeKey] - return val - } - default: - getValFn = func(item interface{}) interface{} { - return item - } - } - - for i, v := range l { - if getValFn(valToLookUp) == getValFn(v) { - return i - } - } - return -1 -} - -// extractToDeleteItems takes a list and -// returns 2 lists: one contains items that should be kept and the other contains items to be deleted. -func extractToDeleteItems(l []interface{}) ([]interface{}, []interface{}, error) { - var nonDelete, toDelete []interface{} - for _, v := range l { - m, ok := v.(map[string]interface{}) - if !ok { - return nil, nil, mergepatch.ErrBadArgType(m, v) - } - - directive, foundDirective := m[directiveMarker] - if foundDirective && directive == deleteDirective { - toDelete = append(toDelete, v) - } else { - nonDelete = append(nonDelete, v) - } - } - return nonDelete, toDelete, nil -} - -// normalizeSliceOrder sort `toSort` list by `order` -func normalizeSliceOrder(toSort, order []interface{}, mergeKey string, kind reflect.Kind) ([]interface{}, error) { - var toDelete []interface{} - if kind == reflect.Map { - // make sure each item in toSort, order has merge key - err := validateMergeKeyInLists(mergeKey, toSort, order) - if err != nil { - return nil, err - } - toSort, toDelete, err = extractToDeleteItems(toSort) - if err != nil { - return nil, err - } - } - - sort.SliceStable(toSort, func(i, j int) bool { - if ii := index(order, toSort[i], mergeKey, kind); ii >= 0 { - if ij := index(order, toSort[j], mergeKey, kind); ij >= 0 { - return ii < ij - } - } - return true - }) - toSort = append(toSort, toDelete...) - return toSort, nil -} - -// Returns a (recursive) strategic merge patch, a parallel deletion list if necessary and -// another list to set the order of the list -// Only list of primitives with merge strategy will generate a parallel deletion list. -// These two lists should yield modified when applied to original, for lists with merge semantics. -func diffLists(original, modified []interface{}, schema LookupPatchMeta, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, []interface{}, error) { - if len(original) == 0 { - // Both slices are empty - do nothing - if len(modified) == 0 || diffOptions.IgnoreChangesAndAdditions { - return nil, nil, nil, nil - } - - // Old slice was empty - add all elements from the new slice - return modified, nil, nil, nil - } - - elementType, err := sliceElementType(original, modified) - if err != nil { - return nil, nil, nil, err - } - - var patchList, deleteList, setOrderList []interface{} - kind := elementType.Kind() - switch kind { - case reflect.Map: - patchList, deleteList, err = diffListsOfMaps(original, modified, schema, mergeKey, diffOptions) - if err != nil { - return nil, nil, nil, err - } - patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind) - if err != nil { - return nil, nil, nil, err - } - orderSame, err := isOrderSame(original, modified, mergeKey) - if err != nil { - return nil, nil, nil, err - } - // append the deletions to the end of the patch list. - patchList = append(patchList, deleteList...) - deleteList = nil - // generate the setElementOrder list when there are content changes or order changes - if diffOptions.SetElementOrder && - ((!diffOptions.IgnoreChangesAndAdditions && (len(patchList) > 0 || !orderSame)) || - (!diffOptions.IgnoreDeletions && len(patchList) > 0)) { - // Generate a list of maps that each item contains only the merge key. - setOrderList = make([]interface{}, len(modified)) - for i, v := range modified { - typedV := v.(map[string]interface{}) - setOrderList[i] = map[string]interface{}{ - mergeKey: typedV[mergeKey], - } - } - } - case reflect.Slice: - // Lists of Lists are not permitted by the api - return nil, nil, nil, mergepatch.ErrNoListOfLists - default: - patchList, deleteList, err = diffListsOfScalars(original, modified, diffOptions) - if err != nil { - return nil, nil, nil, err - } - patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind) - // generate the setElementOrder list when there are content changes or order changes - if diffOptions.SetElementOrder && ((!diffOptions.IgnoreDeletions && len(deleteList) > 0) || - (!diffOptions.IgnoreChangesAndAdditions && !reflect.DeepEqual(original, modified))) { - setOrderList = modified - } - } - return patchList, deleteList, setOrderList, err -} - -// isOrderSame checks if the order in a list has changed -func isOrderSame(original, modified []interface{}, mergeKey string) (bool, error) { - if len(original) != len(modified) { - return false, nil - } - for i, modifiedItem := range modified { - equal, err := mergeKeyValueEqual(original[i], modifiedItem, mergeKey) - if err != nil || !equal { - return equal, err - } - } - return true, nil -} - -// diffListsOfScalars returns 2 lists, the first one is addList and the second one is deletionList. -// Argument diffOptions.IgnoreChangesAndAdditions controls if calculate addList. true means not calculate. -// Argument diffOptions.IgnoreDeletions controls if calculate deletionList. true means not calculate. -// original may be changed, but modified is guaranteed to not be changed -func diffListsOfScalars(original, modified []interface{}, diffOptions DiffOptions) ([]interface{}, []interface{}, error) { - modifiedCopy := make([]interface{}, len(modified)) - copy(modifiedCopy, modified) - // Sort the scalars for easier calculating the diff - originalScalars := sortScalars(original) - modifiedScalars := sortScalars(modifiedCopy) - - originalIndex, modifiedIndex := 0, 0 - addList := []interface{}{} - deletionList := []interface{}{} - - for { - originalInBounds := originalIndex < len(originalScalars) - modifiedInBounds := modifiedIndex < len(modifiedScalars) - if !originalInBounds && !modifiedInBounds { - break - } - // we need to compare the string representation of the scalar, - // because the scalar is an interface which doesn't support either < or > - // And that's how func sortScalars compare scalars. - var originalString, modifiedString string - var originalValue, modifiedValue interface{} - if originalInBounds { - originalValue = originalScalars[originalIndex] - originalString = fmt.Sprintf("%v", originalValue) - } - if modifiedInBounds { - modifiedValue = modifiedScalars[modifiedIndex] - modifiedString = fmt.Sprintf("%v", modifiedValue) - } - - originalV, modifiedV := compareListValuesAtIndex(originalInBounds, modifiedInBounds, originalString, modifiedString) - switch { - case originalV == nil && modifiedV == nil: - originalIndex++ - modifiedIndex++ - case originalV != nil && modifiedV == nil: - if !diffOptions.IgnoreDeletions { - deletionList = append(deletionList, originalValue) - } - originalIndex++ - case originalV == nil && modifiedV != nil: - if !diffOptions.IgnoreChangesAndAdditions { - addList = append(addList, modifiedValue) - } - modifiedIndex++ - default: - return nil, nil, fmt.Errorf("Unexpected returned value from compareListValuesAtIndex: %v and %v", originalV, modifiedV) - } - } - - return addList, deduplicateScalars(deletionList), nil -} - -// If first return value is non-nil, list1 contains an element not present in list2 -// If second return value is non-nil, list2 contains an element not present in list1 -func compareListValuesAtIndex(list1Inbounds, list2Inbounds bool, list1Value, list2Value string) (interface{}, interface{}) { - bothInBounds := list1Inbounds && list2Inbounds - switch { - // scalars are identical - case bothInBounds && list1Value == list2Value: - return nil, nil - // only list2 is in bound - case !list1Inbounds: - fallthrough - // list2 has additional scalar - case bothInBounds && list1Value > list2Value: - return nil, list2Value - // only original is in bound - case !list2Inbounds: - fallthrough - // original has additional scalar - case bothInBounds && list1Value < list2Value: - return list1Value, nil - default: - return nil, nil - } -} - -// diffListsOfMaps takes a pair of lists and -// returns a (recursive) strategic merge patch list contains additions and changes and -// a deletion list contains deletions -func diffListsOfMaps(original, modified []interface{}, schema LookupPatchMeta, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, error) { - patch := make([]interface{}, 0, len(modified)) - deletionList := make([]interface{}, 0, len(original)) - - originalSorted, err := sortMergeListsByNameArray(original, schema, mergeKey, false) - if err != nil { - return nil, nil, err - } - modifiedSorted, err := sortMergeListsByNameArray(modified, schema, mergeKey, false) - if err != nil { - return nil, nil, err - } - - originalIndex, modifiedIndex := 0, 0 - for { - originalInBounds := originalIndex < len(originalSorted) - modifiedInBounds := modifiedIndex < len(modifiedSorted) - bothInBounds := originalInBounds && modifiedInBounds - if !originalInBounds && !modifiedInBounds { - break - } - - var originalElementMergeKeyValueString, modifiedElementMergeKeyValueString string - var originalElementMergeKeyValue, modifiedElementMergeKeyValue interface{} - var originalElement, modifiedElement map[string]interface{} - if originalInBounds { - originalElement, originalElementMergeKeyValue, err = getMapAndMergeKeyValueByIndex(originalIndex, mergeKey, originalSorted) - if err != nil { - return nil, nil, err - } - originalElementMergeKeyValueString = fmt.Sprintf("%v", originalElementMergeKeyValue) - } - if modifiedInBounds { - modifiedElement, modifiedElementMergeKeyValue, err = getMapAndMergeKeyValueByIndex(modifiedIndex, mergeKey, modifiedSorted) - if err != nil { - return nil, nil, err - } - modifiedElementMergeKeyValueString = fmt.Sprintf("%v", modifiedElementMergeKeyValue) - } - - switch { - case bothInBounds && ItemMatchesOriginalAndModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): - // Merge key values are equal, so recurse - patchValue, err := diffMaps(originalElement, modifiedElement, schema, diffOptions) - if err != nil { - return nil, nil, err - } - if len(patchValue) > 0 { - patchValue[mergeKey] = modifiedElementMergeKeyValue - patch = append(patch, patchValue) - } - originalIndex++ - modifiedIndex++ - // only modified is in bound - case !originalInBounds: - fallthrough - // modified has additional map - case bothInBounds && ItemAddedToModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): - if !diffOptions.IgnoreChangesAndAdditions { - patch = append(patch, modifiedElement) - } - modifiedIndex++ - // only original is in bound - case !modifiedInBounds: - fallthrough - // original has additional map - case bothInBounds && ItemRemovedFromModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): - if !diffOptions.IgnoreDeletions { - // Item was deleted, so add delete directive - deletionList = append(deletionList, CreateDeleteDirective(mergeKey, originalElementMergeKeyValue)) - } - originalIndex++ - } - } - - return patch, deletionList, nil -} - -// getMapAndMergeKeyValueByIndex return a map in the list and its merge key value given the index of the map. -func getMapAndMergeKeyValueByIndex(index int, mergeKey string, listOfMaps []interface{}) (map[string]interface{}, interface{}, error) { - m, ok := listOfMaps[index].(map[string]interface{}) - if !ok { - return nil, nil, mergepatch.ErrBadArgType(m, listOfMaps[index]) - } - - val, ok := m[mergeKey] - if !ok { - return nil, nil, mergepatch.ErrNoMergeKey(m, mergeKey) - } - return m, val, nil -} - -// StrategicMergePatch applies a strategic merge patch. The patch and the original document -// must be json encoded content. A patch can be created from an original and a modified document -// by calling CreateStrategicMergePatch. -func StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte, error) { - schema, err := NewPatchMetaFromStruct(dataStruct) - if err != nil { - return nil, err - } - - return StrategicMergePatchUsingLookupPatchMeta(original, patch, schema) -} - -func StrategicMergePatchUsingLookupPatchMeta(original, patch []byte, schema LookupPatchMeta) ([]byte, error) { - originalMap, err := handleUnmarshal(original) - if err != nil { - return nil, err - } - patchMap, err := handleUnmarshal(patch) - if err != nil { - return nil, err - } - - result, err := StrategicMergeMapPatchUsingLookupPatchMeta(originalMap, patchMap, schema) - if err != nil { - return nil, err - } - - return json.Marshal(result) -} - -func handleUnmarshal(j []byte) (map[string]interface{}, error) { - if j == nil { - j = []byte("{}") - } - - m := map[string]interface{}{} - err := json.Unmarshal(j, &m) - if err != nil { - return nil, mergepatch.ErrBadJSONDoc - } - return m, nil -} - -// StrategicMergeMapPatch applies a strategic merge patch. The original and patch documents -// must be JSONMap. A patch can be created from an original and modified document by -// calling CreateTwoWayMergeMapPatch. -// Warning: the original and patch JSONMap objects are mutated by this function and should not be reused. -func StrategicMergeMapPatch(original, patch JSONMap, dataStruct interface{}) (JSONMap, error) { - schema, err := NewPatchMetaFromStruct(dataStruct) - if err != nil { - return nil, err - } - - // We need the go struct tags `patchMergeKey` and `patchStrategy` for fields that support a strategic merge patch. - // For native resources, we can easily figure out these tags since we know the fields. - - // Because custom resources are decoded as Unstructured and because we're missing the metadata about how to handle - // each field in a strategic merge patch, we can't find the go struct tags. Hence, we can't easily do a strategic merge - // for custom resources. So we should fail fast and return an error. - if _, ok := dataStruct.(*unstructured.Unstructured); ok { - return nil, mergepatch.ErrUnsupportedStrategicMergePatchFormat - } - - return StrategicMergeMapPatchUsingLookupPatchMeta(original, patch, schema) -} - -func StrategicMergeMapPatchUsingLookupPatchMeta(original, patch JSONMap, schema LookupPatchMeta) (JSONMap, error) { - mergeOptions := MergeOptions{ - MergeParallelList: true, - IgnoreUnmatchedNulls: true, - } - return mergeMap(original, patch, schema, mergeOptions) -} - -// MergeStrategicMergeMapPatchUsingLookupPatchMeta merges strategic merge -// patches retaining `null` fields and parallel lists. If 2 patches change the -// same fields and the latter one will override the former one. If you don't -// want that happen, you need to run func MergingMapsHaveConflicts before -// merging these patches. Applying the resulting merged merge patch to a JSONMap -// yields the same as merging each strategic merge patch to the JSONMap in -// succession. -func MergeStrategicMergeMapPatchUsingLookupPatchMeta(schema LookupPatchMeta, patches ...JSONMap) (JSONMap, error) { - mergeOptions := MergeOptions{ - MergeParallelList: false, - IgnoreUnmatchedNulls: false, - } - merged := JSONMap{} - var err error - for _, patch := range patches { - merged, err = mergeMap(merged, patch, schema, mergeOptions) - if err != nil { - return nil, err - } - } - return merged, nil -} - -// handleDirectiveInMergeMap handles the patch directive when merging 2 maps. -func handleDirectiveInMergeMap(directive interface{}, patch map[string]interface{}) (map[string]interface{}, error) { - if directive == replaceDirective { - // If the patch contains "$patch: replace", don't merge it, just use the - // patch directly. Later on, we can add a single level replace that only - // affects the map that the $patch is in. - delete(patch, directiveMarker) - return patch, nil - } - - if directive == deleteDirective { - // If the patch contains "$patch: delete", don't merge it, just return - // an empty map. - return map[string]interface{}{}, nil - } - - return nil, mergepatch.ErrBadPatchType(directive, patch) -} - -func containsDirectiveMarker(item interface{}) bool { - m, ok := item.(map[string]interface{}) - if ok { - if _, foundDirectiveMarker := m[directiveMarker]; foundDirectiveMarker { - return true - } - } - return false -} - -func mergeKeyValueEqual(left, right interface{}, mergeKey string) (bool, error) { - if len(mergeKey) == 0 { - return left == right, nil - } - typedLeft, ok := left.(map[string]interface{}) - if !ok { - return false, mergepatch.ErrBadArgType(typedLeft, left) - } - typedRight, ok := right.(map[string]interface{}) - if !ok { - return false, mergepatch.ErrBadArgType(typedRight, right) - } - mergeKeyLeft, ok := typedLeft[mergeKey] - if !ok { - return false, mergepatch.ErrNoMergeKey(typedLeft, mergeKey) - } - mergeKeyRight, ok := typedRight[mergeKey] - if !ok { - return false, mergepatch.ErrNoMergeKey(typedRight, mergeKey) - } - return mergeKeyLeft == mergeKeyRight, nil -} - -// extractKey trims the prefix and return the original key -func extractKey(s, prefix string) (string, error) { - substrings := strings.SplitN(s, "/", 2) - if len(substrings) <= 1 || substrings[0] != prefix { - switch prefix { - case deleteFromPrimitiveListDirectivePrefix: - return "", mergepatch.ErrBadPatchFormatForPrimitiveList - case setElementOrderDirectivePrefix: - return "", mergepatch.ErrBadPatchFormatForSetElementOrderList - default: - return "", fmt.Errorf("fail to find unknown prefix %q in %s\n", prefix, s) - } - } - return substrings[1], nil -} - -// validatePatchUsingSetOrderList verifies: -// the relative order of any two items in the setOrderList list matches that in the patch list. -// the items in the patch list must be a subset or the same as the $setElementOrder list (deletions are ignored). -func validatePatchWithSetOrderList(patchList, setOrderList interface{}, mergeKey string) error { - typedSetOrderList, ok := setOrderList.([]interface{}) - if !ok { - return mergepatch.ErrBadPatchFormatForSetElementOrderList - } - typedPatchList, ok := patchList.([]interface{}) - if !ok { - return mergepatch.ErrBadPatchFormatForSetElementOrderList - } - if len(typedSetOrderList) == 0 || len(typedPatchList) == 0 { - return nil - } - - var nonDeleteList, toDeleteList []interface{} - var err error - if len(mergeKey) > 0 { - nonDeleteList, toDeleteList, err = extractToDeleteItems(typedPatchList) - if err != nil { - return err - } - } else { - nonDeleteList = typedPatchList - } - - patchIndex, setOrderIndex := 0, 0 - for patchIndex < len(nonDeleteList) && setOrderIndex < len(typedSetOrderList) { - if containsDirectiveMarker(nonDeleteList[patchIndex]) { - patchIndex++ - continue - } - mergeKeyEqual, err := mergeKeyValueEqual(nonDeleteList[patchIndex], typedSetOrderList[setOrderIndex], mergeKey) - if err != nil { - return err - } - if mergeKeyEqual { - patchIndex++ - } - setOrderIndex++ - } - // If patchIndex is inbound but setOrderIndex if out of bound mean there are items mismatching between the patch list and setElementOrder list. - // the second check is is a sanity check, and should always be true if the first is true. - if patchIndex < len(nonDeleteList) && setOrderIndex >= len(typedSetOrderList) { - return fmt.Errorf("The order in patch list:\n%v\n doesn't match %s list:\n%v\n", typedPatchList, setElementOrderDirectivePrefix, setOrderList) - } - typedPatchList = append(nonDeleteList, toDeleteList...) - return nil -} - -// preprocessDeletionListForMerging preprocesses the deletion list. -// it returns shouldContinue, isDeletionList, noPrefixKey -func preprocessDeletionListForMerging(key string, original map[string]interface{}, - patchVal interface{}, mergeDeletionList bool) (bool, bool, string, error) { - // If found a parallel list for deletion and we are going to merge the list, - // overwrite the key to the original key and set flag isDeleteList - foundParallelListPrefix := strings.HasPrefix(key, deleteFromPrimitiveListDirectivePrefix) - if foundParallelListPrefix { - if !mergeDeletionList { - original[key] = patchVal - return true, false, "", nil - } - originalKey, err := extractKey(key, deleteFromPrimitiveListDirectivePrefix) - return false, true, originalKey, err - } - return false, false, "", nil -} - -// applyRetainKeysDirective looks for a retainKeys directive and applies to original -// - if no directive exists do nothing -// - if directive is found, clear keys in original missing from the directive list -// - validate that all keys present in the patch are present in the retainKeys directive -// note: original may be another patch request, e.g. applying the add+modified patch to the deletions patch. In this case it may have directives -func applyRetainKeysDirective(original, patch map[string]interface{}, options MergeOptions) error { - retainKeysInPatch, foundInPatch := patch[retainKeysDirective] - if !foundInPatch { - return nil - } - // cleanup the directive - delete(patch, retainKeysDirective) - - if !options.MergeParallelList { - // If original is actually a patch, make sure the retainKeys directives are the same in both patches if present in both. - // If not present in the original patch, copy from the modified patch. - retainKeysInOriginal, foundInOriginal := original[retainKeysDirective] - if foundInOriginal { - if !reflect.DeepEqual(retainKeysInOriginal, retainKeysInPatch) { - // This error actually should never happen. - return fmt.Errorf("%v and %v are not deep equal: this may happen when calculating the 3-way diff patch", retainKeysInOriginal, retainKeysInPatch) - } - } else { - original[retainKeysDirective] = retainKeysInPatch - } - return nil - } - - retainKeysList, ok := retainKeysInPatch.([]interface{}) - if !ok { - return mergepatch.ErrBadPatchFormatForRetainKeys - } - - // validate patch to make sure all fields in the patch are present in the retainKeysList. - // The map is used only as a set, the value is never referenced - m := map[interface{}]struct{}{} - for _, v := range retainKeysList { - m[v] = struct{}{} - } - for k, v := range patch { - if v == nil || strings.HasPrefix(k, deleteFromPrimitiveListDirectivePrefix) || - strings.HasPrefix(k, setElementOrderDirectivePrefix) { - continue - } - // If there is an item present in the patch but not in the retainKeys list, - // the patch is invalid. - if _, found := m[k]; !found { - return mergepatch.ErrBadPatchFormatForRetainKeys - } - } - - // clear not present fields - for k := range original { - if _, found := m[k]; !found { - delete(original, k) - } - } - return nil -} - -// mergePatchIntoOriginal processes $setElementOrder list. -// When not merging the directive, it will make sure $setElementOrder list exist only in original. -// When merging the directive, it will try to find the $setElementOrder list and -// its corresponding patch list, validate it and merge it. -// Then, sort them by the relative order in setElementOrder, patch list and live list. -// The precedence is $setElementOrder > order in patch list > order in live list. -// This function will delete the item after merging it to prevent process it again in the future. -// Ref: https://git.k8s.io/community/contributors/design-proposals/cli/preserve-order-in-strategic-merge-patch.md -func mergePatchIntoOriginal(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) error { - for key, patchV := range patch { - // Do nothing if there is no ordering directive - if !strings.HasPrefix(key, setElementOrderDirectivePrefix) { - continue - } - - setElementOrderInPatch := patchV - // Copies directive from the second patch (`patch`) to the first patch (`original`) - // and checks they are equal and delete the directive in the second patch - if !mergeOptions.MergeParallelList { - setElementOrderListInOriginal, ok := original[key] - if ok { - // check if the setElementOrder list in original and the one in patch matches - if !reflect.DeepEqual(setElementOrderListInOriginal, setElementOrderInPatch) { - return mergepatch.ErrBadPatchFormatForSetElementOrderList - } - } else { - // move the setElementOrder list from patch to original - original[key] = setElementOrderInPatch - } - } - delete(patch, key) - - var ( - ok bool - originalFieldValue, patchFieldValue, merged []interface{} - patchStrategy string - patchMeta PatchMeta - subschema LookupPatchMeta - ) - typedSetElementOrderList, ok := setElementOrderInPatch.([]interface{}) - if !ok { - return mergepatch.ErrBadArgType(typedSetElementOrderList, setElementOrderInPatch) - } - // Trim the setElementOrderDirectivePrefix to get the key of the list field in original. - originalKey, err := extractKey(key, setElementOrderDirectivePrefix) - if err != nil { - return err - } - // try to find the list with `originalKey` in `original` and `modified` and merge them. - originalList, foundOriginal := original[originalKey] - patchList, foundPatch := patch[originalKey] - if foundOriginal { - originalFieldValue, ok = originalList.([]interface{}) - if !ok { - return mergepatch.ErrBadArgType(originalFieldValue, originalList) - } - } - if foundPatch { - patchFieldValue, ok = patchList.([]interface{}) - if !ok { - return mergepatch.ErrBadArgType(patchFieldValue, patchList) - } - } - subschema, patchMeta, err = schema.LookupPatchMetadataForSlice(originalKey) - if err != nil { - return err - } - _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) - if err != nil { - return err - } - // Check for consistency between the element order list and the field it applies to - err = validatePatchWithSetOrderList(patchFieldValue, typedSetElementOrderList, patchMeta.GetPatchMergeKey()) - if err != nil { - return err - } - - switch { - case foundOriginal && !foundPatch: - // no change to list contents - merged = originalFieldValue - case !foundOriginal && foundPatch: - // list was added - merged = patchFieldValue - case foundOriginal && foundPatch: - merged, err = mergeSliceHandler(originalList, patchList, subschema, - patchStrategy, patchMeta.GetPatchMergeKey(), false, mergeOptions) - if err != nil { - return err - } - case !foundOriginal && !foundPatch: - continue - } - - // Split all items into patch items and server-only items and then enforce the order. - var patchItems, serverOnlyItems []interface{} - if len(patchMeta.GetPatchMergeKey()) == 0 { - // Primitives doesn't need merge key to do partitioning. - patchItems, serverOnlyItems = partitionPrimitivesByPresentInList(merged, typedSetElementOrderList) - - } else { - // Maps need merge key to do partitioning. - patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, typedSetElementOrderList, patchMeta.GetPatchMergeKey()) - if err != nil { - return err - } - } - - elementType, err := sliceElementType(originalFieldValue, patchFieldValue) - if err != nil { - return err - } - kind := elementType.Kind() - // normalize merged list - // typedSetElementOrderList contains all the relative order in typedPatchList, - // so don't need to use typedPatchList - both, err := normalizeElementOrder(patchItems, serverOnlyItems, typedSetElementOrderList, originalFieldValue, patchMeta.GetPatchMergeKey(), kind) - if err != nil { - return err - } - original[originalKey] = both - // delete patch list from patch to prevent process again in the future - delete(patch, originalKey) - } - return nil -} - -// partitionPrimitivesByPresentInList partitions elements into 2 slices, the first containing items present in partitionBy, the other not. -func partitionPrimitivesByPresentInList(original, partitionBy []interface{}) ([]interface{}, []interface{}) { - patch := make([]interface{}, 0, len(original)) - serverOnly := make([]interface{}, 0, len(original)) - inPatch := map[interface{}]bool{} - for _, v := range partitionBy { - inPatch[v] = true - } - for _, v := range original { - if !inPatch[v] { - serverOnly = append(serverOnly, v) - } else { - patch = append(patch, v) - } - } - return patch, serverOnly -} - -// partitionMapsByPresentInList partitions elements into 2 slices, the first containing items present in partitionBy, the other not. -func partitionMapsByPresentInList(original, partitionBy []interface{}, mergeKey string) ([]interface{}, []interface{}, error) { - patch := make([]interface{}, 0, len(original)) - serverOnly := make([]interface{}, 0, len(original)) - for _, v := range original { - typedV, ok := v.(map[string]interface{}) - if !ok { - return nil, nil, mergepatch.ErrBadArgType(typedV, v) - } - mergeKeyValue, foundMergeKey := typedV[mergeKey] - if !foundMergeKey { - return nil, nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) - } - _, _, found, err := findMapInSliceBasedOnKeyValue(partitionBy, mergeKey, mergeKeyValue) - if err != nil { - return nil, nil, err - } - if !found { - serverOnly = append(serverOnly, v) - } else { - patch = append(patch, v) - } - } - return patch, serverOnly, nil -} - -// Merge fields from a patch map into the original map. Note: This may modify -// both the original map and the patch because getting a deep copy of a map in -// golang is highly non-trivial. -// flag mergeOptions.MergeParallelList controls if using the parallel list to delete or keeping the list. -// If patch contains any null field (e.g. field_1: null) that is not -// present in original, then to propagate it to the end result use -// mergeOptions.IgnoreUnmatchedNulls == false. -func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) (map[string]interface{}, error) { - if v, ok := patch[directiveMarker]; ok { - return handleDirectiveInMergeMap(v, patch) - } - - // nil is an accepted value for original to simplify logic in other places. - // If original is nil, replace it with an empty map and then apply the patch. - if original == nil { - original = map[string]interface{}{} - } - - err := applyRetainKeysDirective(original, patch, mergeOptions) - if err != nil { - return nil, err - } - - // Process $setElementOrder list and other lists sharing the same key. - // When not merging the directive, it will make sure $setElementOrder list exist only in original. - // When merging the directive, it will process $setElementOrder and its patch list together. - // This function will delete the merged elements from patch so they will not be reprocessed - err = mergePatchIntoOriginal(original, patch, schema, mergeOptions) - if err != nil { - return nil, err - } - - // Start merging the patch into the original. - for k, patchV := range patch { - skipProcessing, isDeleteList, noPrefixKey, err := preprocessDeletionListForMerging(k, original, patchV, mergeOptions.MergeParallelList) - if err != nil { - return nil, err - } - if skipProcessing { - continue - } - if len(noPrefixKey) > 0 { - k = noPrefixKey - } - - // If the value of this key is null, delete the key if it exists in the - // original. Otherwise, check if we want to preserve it or skip it. - // Preserving the null value is useful when we want to send an explicit - // delete to the API server. - if patchV == nil { - if _, ok := original[k]; ok { - delete(original, k) - } - if mergeOptions.IgnoreUnmatchedNulls { - continue - } - } - - _, ok := original[k] - if !ok { - // If it's not in the original document, just take the patch value. - original[k] = patchV - continue - } - - originalType := reflect.TypeOf(original[k]) - patchType := reflect.TypeOf(patchV) - if originalType != patchType { - original[k] = patchV - continue - } - // If they're both maps or lists, recurse into the value. - switch originalType.Kind() { - case reflect.Map: - subschema, patchMeta, err2 := schema.LookupPatchMetadataForStruct(k) - if err2 != nil { - return nil, err2 - } - _, patchStrategy, err2 := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) - if err2 != nil { - return nil, err2 - } - original[k], err = mergeMapHandler(original[k], patchV, subschema, patchStrategy, mergeOptions) - case reflect.Slice: - subschema, patchMeta, err2 := schema.LookupPatchMetadataForSlice(k) - if err2 != nil { - return nil, err2 - } - _, patchStrategy, err2 := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) - if err2 != nil { - return nil, err2 - } - original[k], err = mergeSliceHandler(original[k], patchV, subschema, patchStrategy, patchMeta.GetPatchMergeKey(), isDeleteList, mergeOptions) - default: - original[k] = patchV - } - if err != nil { - return nil, err - } - } - return original, nil -} - -// mergeMapHandler handles how to merge `patchV` whose key is `key` with `original` respecting -// fieldPatchStrategy and mergeOptions. -func mergeMapHandler(original, patch interface{}, schema LookupPatchMeta, - fieldPatchStrategy string, mergeOptions MergeOptions) (map[string]interface{}, error) { - typedOriginal, typedPatch, err := mapTypeAssertion(original, patch) - if err != nil { - return nil, err - } - - if fieldPatchStrategy != replaceDirective { - return mergeMap(typedOriginal, typedPatch, schema, mergeOptions) - } else { - return typedPatch, nil - } -} - -// mergeSliceHandler handles how to merge `patchV` whose key is `key` with `original` respecting -// fieldPatchStrategy, fieldPatchMergeKey, isDeleteList and mergeOptions. -func mergeSliceHandler(original, patch interface{}, schema LookupPatchMeta, - fieldPatchStrategy, fieldPatchMergeKey string, isDeleteList bool, mergeOptions MergeOptions) ([]interface{}, error) { - typedOriginal, typedPatch, err := sliceTypeAssertion(original, patch) - if err != nil { - return nil, err - } - - if fieldPatchStrategy == mergeDirective { - return mergeSlice(typedOriginal, typedPatch, schema, fieldPatchMergeKey, mergeOptions, isDeleteList) - } else { - return typedPatch, nil - } -} - -// Merge two slices together. Note: This may modify both the original slice and -// the patch because getting a deep copy of a slice in golang is highly -// non-trivial. -func mergeSlice(original, patch []interface{}, schema LookupPatchMeta, mergeKey string, mergeOptions MergeOptions, isDeleteList bool) ([]interface{}, error) { - if len(original) == 0 && len(patch) == 0 { - return original, nil - } - - // All the values must be of the same type, but not a list. - t, err := sliceElementType(original, patch) - if err != nil { - return nil, err - } - - var merged []interface{} - kind := t.Kind() - // If the elements are not maps, merge the slices of scalars. - if kind != reflect.Map { - if mergeOptions.MergeParallelList && isDeleteList { - return deleteFromSlice(original, patch), nil - } - // Maybe in the future add a "concat" mode that doesn't - // deduplicate. - both := append(original, patch...) - merged = deduplicateScalars(both) - - } else { - if mergeKey == "" { - return nil, fmt.Errorf("cannot merge lists without merge key for %s", schema.Name()) - } - - original, patch, err = mergeSliceWithSpecialElements(original, patch, mergeKey) - if err != nil { - return nil, err - } - - merged, err = mergeSliceWithoutSpecialElements(original, patch, mergeKey, schema, mergeOptions) - if err != nil { - return nil, err - } - } - - // enforce the order - var patchItems, serverOnlyItems []interface{} - if len(mergeKey) == 0 { - patchItems, serverOnlyItems = partitionPrimitivesByPresentInList(merged, patch) - } else { - patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, patch, mergeKey) - if err != nil { - return nil, err - } - } - return normalizeElementOrder(patchItems, serverOnlyItems, patch, original, mergeKey, kind) -} - -// mergeSliceWithSpecialElements handles special elements with directiveMarker -// before merging the slices. It returns a updated `original` and a patch without special elements. -// original and patch must be slices of maps, they should be checked before calling this function. -func mergeSliceWithSpecialElements(original, patch []interface{}, mergeKey string) ([]interface{}, []interface{}, error) { - patchWithoutSpecialElements := []interface{}{} - replace := false - for _, v := range patch { - typedV := v.(map[string]interface{}) - patchType, ok := typedV[directiveMarker] - if !ok { - patchWithoutSpecialElements = append(patchWithoutSpecialElements, v) - } else { - switch patchType { - case deleteDirective: - mergeValue, ok := typedV[mergeKey] - if ok { - var err error - original, err = deleteMatchingEntries(original, mergeKey, mergeValue) - if err != nil { - return nil, nil, err - } - } else { - return nil, nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) - } - case replaceDirective: - replace = true - // Continue iterating through the array to prune any other $patch elements. - case mergeDirective: - return nil, nil, fmt.Errorf("merging lists cannot yet be specified in the patch") - default: - return nil, nil, mergepatch.ErrBadPatchType(patchType, typedV) - } - } - } - if replace { - return patchWithoutSpecialElements, nil, nil - } - return original, patchWithoutSpecialElements, nil -} - -// delete all matching entries (based on merge key) from a merging list -func deleteMatchingEntries(original []interface{}, mergeKey string, mergeValue interface{}) ([]interface{}, error) { - for { - _, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) - if err != nil { - return nil, err - } - - if !found { - break - } - // Delete the element at originalKey. - original = append(original[:originalKey], original[originalKey+1:]...) - } - return original, nil -} - -// mergeSliceWithoutSpecialElements merges slices with non-special elements. -// original and patch must be slices of maps, they should be checked before calling this function. -func mergeSliceWithoutSpecialElements(original, patch []interface{}, mergeKey string, schema LookupPatchMeta, mergeOptions MergeOptions) ([]interface{}, error) { - for _, v := range patch { - typedV := v.(map[string]interface{}) - mergeValue, ok := typedV[mergeKey] - if !ok { - return nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) - } - - // If we find a value with this merge key value in original, merge the - // maps. Otherwise append onto original. - originalMap, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) - if err != nil { - return nil, err - } - - if found { - var mergedMaps interface{} - var err error - // Merge into original. - mergedMaps, err = mergeMap(originalMap, typedV, schema, mergeOptions) - if err != nil { - return nil, err - } - - original[originalKey] = mergedMaps - } else { - original = append(original, v) - } - } - return original, nil -} - -// deleteFromSlice uses the parallel list to delete the items in a list of scalars -func deleteFromSlice(current, toDelete []interface{}) []interface{} { - toDeleteMap := map[interface{}]interface{}{} - processed := make([]interface{}, 0, len(current)) - for _, v := range toDelete { - toDeleteMap[v] = true - } - for _, v := range current { - if _, found := toDeleteMap[v]; !found { - processed = append(processed, v) - } - } - return processed -} - -// This method no longer panics if any element of the slice is not a map. -func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{}) (map[string]interface{}, int, bool, error) { - for k, v := range m { - typedV, ok := v.(map[string]interface{}) - if !ok { - return nil, 0, false, fmt.Errorf("value for key %v is not a map", k) - } - - valueToMatch, ok := typedV[key] - if ok && valueToMatch == value { - return typedV, k, true, nil - } - } - - return nil, 0, false, nil -} - -// This function takes a JSON map and sorts all the lists that should be merged -// by key. This is needed by tests because in JSON, list order is significant, -// but in Strategic Merge Patch, merge lists do not have significant order. -// Sorting the lists allows for order-insensitive comparison of patched maps. -func sortMergeListsByName(mapJSON []byte, schema LookupPatchMeta) ([]byte, error) { - var m map[string]interface{} - err := json.Unmarshal(mapJSON, &m) - if err != nil { - return nil, mergepatch.ErrBadJSONDoc - } - - newM, err := sortMergeListsByNameMap(m, schema) - if err != nil { - return nil, err - } - - return json.Marshal(newM) -} - -// Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in a map. -func sortMergeListsByNameMap(s map[string]interface{}, schema LookupPatchMeta) (map[string]interface{}, error) { - newS := map[string]interface{}{} - for k, v := range s { - if k == retainKeysDirective { - typedV, ok := v.([]interface{}) - if !ok { - return nil, mergepatch.ErrBadPatchFormatForRetainKeys - } - v = sortScalars(typedV) - } else if strings.HasPrefix(k, deleteFromPrimitiveListDirectivePrefix) { - typedV, ok := v.([]interface{}) - if !ok { - return nil, mergepatch.ErrBadPatchFormatForPrimitiveList - } - v = sortScalars(typedV) - } else if strings.HasPrefix(k, setElementOrderDirectivePrefix) { - _, ok := v.([]interface{}) - if !ok { - return nil, mergepatch.ErrBadPatchFormatForSetElementOrderList - } - } else if k != directiveMarker { - // recurse for map and slice. - switch typedV := v.(type) { - case map[string]interface{}: - subschema, _, err := schema.LookupPatchMetadataForStruct(k) - if err != nil { - return nil, err - } - v, err = sortMergeListsByNameMap(typedV, subschema) - if err != nil { - return nil, err - } - case []interface{}: - subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(k) - if err != nil { - return nil, err - } - _, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) - if err != nil { - return nil, err - } - if patchStrategy == mergeDirective { - var err error - v, err = sortMergeListsByNameArray(typedV, subschema, patchMeta.GetPatchMergeKey(), true) - if err != nil { - return nil, err - } - } - } - } - - newS[k] = v - } - - return newS, nil -} - -// Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in an array. -func sortMergeListsByNameArray(s []interface{}, schema LookupPatchMeta, mergeKey string, recurse bool) ([]interface{}, error) { - if len(s) == 0 { - return s, nil - } - - // We don't support lists of lists yet. - t, err := sliceElementType(s) - if err != nil { - return nil, err - } - - // If the elements are not maps... - if t.Kind() != reflect.Map { - // Sort the elements, because they may have been merged out of order. - return deduplicateAndSortScalars(s), nil - } - - // Elements are maps - if one of the keys of the map is a map or a - // list, we may need to recurse into it. - newS := []interface{}{} - for _, elem := range s { - if recurse { - typedElem := elem.(map[string]interface{}) - newElem, err := sortMergeListsByNameMap(typedElem, schema) - if err != nil { - return nil, err - } - - newS = append(newS, newElem) - } else { - newS = append(newS, elem) - } - } - - // Sort the maps. - newS = sortMapsBasedOnField(newS, mergeKey) - return newS, nil -} - -func sortMapsBasedOnField(m []interface{}, fieldName string) []interface{} { - mapM := mapSliceFromSlice(m) - ss := SortableSliceOfMaps{mapM, fieldName} - sort.Sort(ss) - newS := sliceFromMapSlice(ss.s) - return newS -} - -func mapSliceFromSlice(m []interface{}) []map[string]interface{} { - newM := []map[string]interface{}{} - for _, v := range m { - vt := v.(map[string]interface{}) - newM = append(newM, vt) - } - - return newM -} - -func sliceFromMapSlice(s []map[string]interface{}) []interface{} { - newS := []interface{}{} - for _, v := range s { - newS = append(newS, v) - } - - return newS -} - -type SortableSliceOfMaps struct { - s []map[string]interface{} - k string // key to sort on -} - -func (ss SortableSliceOfMaps) Len() int { - return len(ss.s) -} - -func (ss SortableSliceOfMaps) Less(i, j int) bool { - iStr := fmt.Sprintf("%v", ss.s[i][ss.k]) - jStr := fmt.Sprintf("%v", ss.s[j][ss.k]) - return sort.StringsAreSorted([]string{iStr, jStr}) -} - -func (ss SortableSliceOfMaps) Swap(i, j int) { - tmp := ss.s[i] - ss.s[i] = ss.s[j] - ss.s[j] = tmp -} - -func deduplicateAndSortScalars(s []interface{}) []interface{} { - s = deduplicateScalars(s) - return sortScalars(s) -} - -func sortScalars(s []interface{}) []interface{} { - ss := SortableSliceOfScalars{s} - sort.Sort(ss) - return ss.s -} - -func deduplicateScalars(s []interface{}) []interface{} { - // Clever algorithm to deduplicate. - length := len(s) - 1 - for i := 0; i < length; i++ { - for j := i + 1; j <= length; j++ { - if s[i] == s[j] { - s[j] = s[length] - s = s[0:length] - length-- - j-- - } - } - } - - return s -} - -type SortableSliceOfScalars struct { - s []interface{} -} - -func (ss SortableSliceOfScalars) Len() int { - return len(ss.s) -} - -func (ss SortableSliceOfScalars) Less(i, j int) bool { - iStr := fmt.Sprintf("%v", ss.s[i]) - jStr := fmt.Sprintf("%v", ss.s[j]) - return sort.StringsAreSorted([]string{iStr, jStr}) -} - -func (ss SortableSliceOfScalars) Swap(i, j int) { - tmp := ss.s[i] - ss.s[i] = ss.s[j] - ss.s[j] = tmp -} - -// Returns the type of the elements of N slice(s). If the type is different, -// another slice or undefined, returns an error. -func sliceElementType(slices ...[]interface{}) (reflect.Type, error) { - var prevType reflect.Type - for _, s := range slices { - // Go through elements of all given slices and make sure they are all the same type. - for _, v := range s { - currentType := reflect.TypeOf(v) - if prevType == nil { - prevType = currentType - // We don't support lists of lists yet. - if prevType.Kind() == reflect.Slice { - return nil, mergepatch.ErrNoListOfLists - } - } else { - if prevType != currentType { - return nil, fmt.Errorf("list element types are not identical: %v", fmt.Sprint(slices)) - } - prevType = currentType - } - } - } - - if prevType == nil { - return nil, fmt.Errorf("no elements in any of the given slices") - } - - return prevType, nil -} - -// MergingMapsHaveConflicts returns true if the left and right JSON interface -// objects overlap with different values in any key. All keys are required to be -// strings. Since patches of the same Type have congruent keys, this is valid -// for multiple patch types. This method supports strategic merge patch semantics. -func MergingMapsHaveConflicts(left, right map[string]interface{}, schema LookupPatchMeta) (bool, error) { - return mergingMapFieldsHaveConflicts(left, right, schema, "", "") -} - -func mergingMapFieldsHaveConflicts( - left, right interface{}, - schema LookupPatchMeta, - fieldPatchStrategy, fieldPatchMergeKey string, -) (bool, error) { - switch leftType := left.(type) { - case map[string]interface{}: - rightType, ok := right.(map[string]interface{}) - if !ok { - return true, nil - } - leftMarker, okLeft := leftType[directiveMarker] - rightMarker, okRight := rightType[directiveMarker] - // if one or the other has a directive marker, - // then we need to consider that before looking at the individual keys, - // since a directive operates on the whole map. - if okLeft || okRight { - // if one has a directive marker and the other doesn't, - // then we have a conflict, since one is deleting or replacing the whole map, - // and the other is doing things to individual keys. - if okLeft != okRight { - return true, nil - } - // if they both have markers, but they are not the same directive, - // then we have a conflict because they're doing different things to the map. - if leftMarker != rightMarker { - return true, nil - } - } - if fieldPatchStrategy == replaceDirective { - return false, nil - } - // Check the individual keys. - return mapsHaveConflicts(leftType, rightType, schema) - - case []interface{}: - rightType, ok := right.([]interface{}) - if !ok { - return true, nil - } - return slicesHaveConflicts(leftType, rightType, schema, fieldPatchStrategy, fieldPatchMergeKey) - case string, float64, bool, int64, nil: - return !reflect.DeepEqual(left, right), nil - default: - return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) - } -} - -func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, schema LookupPatchMeta) (bool, error) { - for key, leftValue := range typedLeft { - if key != directiveMarker && key != retainKeysDirective { - if rightValue, ok := typedRight[key]; ok { - var subschema LookupPatchMeta - var patchMeta PatchMeta - var patchStrategy string - var err error - switch leftValue.(type) { - case []interface{}: - subschema, patchMeta, err = schema.LookupPatchMetadataForSlice(key) - if err != nil { - return true, err - } - _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.patchStrategies) - if err != nil { - return true, err - } - case map[string]interface{}: - subschema, patchMeta, err = schema.LookupPatchMetadataForStruct(key) - if err != nil { - return true, err - } - _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.patchStrategies) - if err != nil { - return true, err - } - } - - if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, - subschema, patchStrategy, patchMeta.GetPatchMergeKey()); hasConflicts { - return true, err - } - } - } - } - - return false, nil -} - -func slicesHaveConflicts( - typedLeft, typedRight []interface{}, - schema LookupPatchMeta, - fieldPatchStrategy, fieldPatchMergeKey string, -) (bool, error) { - elementType, err := sliceElementType(typedLeft, typedRight) - if err != nil { - return true, err - } - - if fieldPatchStrategy == mergeDirective { - // Merging lists of scalars have no conflicts by definition - // So we only need to check further if the elements are maps - if elementType.Kind() != reflect.Map { - return false, nil - } - - // Build a map for each slice and then compare the two maps - leftMap, err := sliceOfMapsToMapOfMaps(typedLeft, fieldPatchMergeKey) - if err != nil { - return true, err - } - - rightMap, err := sliceOfMapsToMapOfMaps(typedRight, fieldPatchMergeKey) - if err != nil { - return true, err - } - - return mapsOfMapsHaveConflicts(leftMap, rightMap, schema) - } - - // Either we don't have type information, or these are non-merging lists - if len(typedLeft) != len(typedRight) { - return true, nil - } - - // Sort scalar slices to prevent ordering issues - // We have no way to sort non-merging lists of maps - if elementType.Kind() != reflect.Map { - typedLeft = deduplicateAndSortScalars(typedLeft) - typedRight = deduplicateAndSortScalars(typedRight) - } - - // Compare the slices element by element in order - // This test will fail if the slices are not sorted - for i := range typedLeft { - if hasConflicts, err := mergingMapFieldsHaveConflicts(typedLeft[i], typedRight[i], schema, "", ""); hasConflicts { - return true, err - } - } - - return false, nil -} - -func sliceOfMapsToMapOfMaps(slice []interface{}, mergeKey string) (map[string]interface{}, error) { - result := make(map[string]interface{}, len(slice)) - for _, value := range slice { - typedValue, ok := value.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("invalid element type in merging list:%v", slice) - } - - mergeValue, ok := typedValue[mergeKey] - if !ok { - return nil, fmt.Errorf("cannot find merge key `%s` in merging list element:%v", mergeKey, typedValue) - } - - result[fmt.Sprintf("%s", mergeValue)] = typedValue - } - - return result, nil -} - -func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, schema LookupPatchMeta) (bool, error) { - for key, leftValue := range typedLeft { - if rightValue, ok := typedRight[key]; ok { - if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, schema, "", ""); hasConflicts { - return true, err - } - } - } - - return false, nil -} - -// CreateThreeWayMergePatch reconciles a modified configuration with an original configuration, -// while preserving any changes or deletions made to the original configuration in the interim, -// and not overridden by the current configuration. All three documents must be passed to the -// method as json encoded content. It will return a strategic merge patch, or an error if any -// of the documents is invalid, or if there are any preconditions that fail against the modified -// configuration, or, if overwrite is false and there are conflicts between the modified and current -// configurations. Conflicts are defined as keys changed differently from original to modified -// than from original to current. In other words, a conflict occurs if modified changes any key -// in a way that is different from how it is changed in current (e.g., deleting it, changing its -// value). We also propagate values fields that do not exist in original but are explicitly -// defined in modified. -func CreateThreeWayMergePatch(original, modified, current []byte, schema LookupPatchMeta, overwrite bool, fns ...mergepatch.PreconditionFunc) ([]byte, error) { - originalMap := map[string]interface{}{} - if len(original) > 0 { - if err := json.Unmarshal(original, &originalMap); err != nil { - return nil, mergepatch.ErrBadJSONDoc - } - } - - modifiedMap := map[string]interface{}{} - if len(modified) > 0 { - if err := json.Unmarshal(modified, &modifiedMap); err != nil { - return nil, mergepatch.ErrBadJSONDoc - } - } - - currentMap := map[string]interface{}{} - if len(current) > 0 { - if err := json.Unmarshal(current, ¤tMap); err != nil { - return nil, mergepatch.ErrBadJSONDoc - } - } - - // The patch is the difference from current to modified without deletions, plus deletions - // from original to modified. To find it, we compute deletions, which are the deletions from - // original to modified, and delta, which is the difference from current to modified without - // deletions, and then apply delta to deletions as a patch, which should be strictly additive. - deltaMapDiffOptions := DiffOptions{ - IgnoreDeletions: true, - SetElementOrder: true, - } - deltaMap, err := diffMaps(currentMap, modifiedMap, schema, deltaMapDiffOptions) - if err != nil { - return nil, err - } - deletionsMapDiffOptions := DiffOptions{ - SetElementOrder: true, - IgnoreChangesAndAdditions: true, - } - deletionsMap, err := diffMaps(originalMap, modifiedMap, schema, deletionsMapDiffOptions) - if err != nil { - return nil, err - } - - mergeOptions := MergeOptions{} - patchMap, err := mergeMap(deletionsMap, deltaMap, schema, mergeOptions) - if err != nil { - return nil, err - } - - // Apply the preconditions to the patch, and return an error if any of them fail. - for _, fn := range fns { - if !fn(patchMap) { - return nil, mergepatch.NewErrPreconditionFailed(patchMap) - } - } - - // If overwrite is false, and the patch contains any keys that were changed differently, - // then return a conflict error. - if !overwrite { - changeMapDiffOptions := DiffOptions{} - changedMap, err := diffMaps(originalMap, currentMap, schema, changeMapDiffOptions) - if err != nil { - return nil, err - } - - hasConflicts, err := MergingMapsHaveConflicts(patchMap, changedMap, schema) - if err != nil { - return nil, err - } - - if hasConflicts { - return nil, mergepatch.NewErrConflict(mergepatch.ToYAMLOrError(patchMap), mergepatch.ToYAMLOrError(changedMap)) - } - } - - return json.Marshal(patchMap) -} - -func ItemAddedToModifiedSlice(original, modified string) bool { return original > modified } - -func ItemRemovedFromModifiedSlice(original, modified string) bool { return original < modified } - -func ItemMatchesOriginalAndModifiedSlice(original, modified string) bool { return original == modified } - -func CreateDeleteDirective(mergeKey string, mergeKeyValue interface{}) map[string]interface{} { - return map[string]interface{}{mergeKey: mergeKeyValue, directiveMarker: deleteDirective} -} - -func mapTypeAssertion(original, patch interface{}) (map[string]interface{}, map[string]interface{}, error) { - typedOriginal, ok := original.(map[string]interface{}) - if !ok { - return nil, nil, mergepatch.ErrBadArgType(typedOriginal, original) - } - typedPatch, ok := patch.(map[string]interface{}) - if !ok { - return nil, nil, mergepatch.ErrBadArgType(typedPatch, patch) - } - return typedOriginal, typedPatch, nil -} - -func sliceTypeAssertion(original, patch interface{}) ([]interface{}, []interface{}, error) { - typedOriginal, ok := original.([]interface{}) - if !ok { - return nil, nil, mergepatch.ErrBadArgType(typedOriginal, original) - } - typedPatch, ok := patch.([]interface{}) - if !ok { - return nil, nil, mergepatch.ErrBadArgType(typedPatch, patch) - } - return typedOriginal, typedPatch, nil -} - -// extractRetainKeysPatchStrategy process patch strategy, which is a string may contains multiple -// patch strategies separated by ",". It returns a boolean var indicating if it has -// retainKeys strategies and a string for the other strategy. -func extractRetainKeysPatchStrategy(strategies []string) (bool, string, error) { - switch len(strategies) { - case 0: - return false, "", nil - case 1: - singleStrategy := strategies[0] - switch singleStrategy { - case retainKeysStrategy: - return true, "", nil - default: - return false, singleStrategy, nil - } - case 2: - switch { - case strategies[0] == retainKeysStrategy: - return true, strategies[1], nil - case strategies[1] == retainKeysStrategy: - return true, strategies[0], nil - default: - return false, "", fmt.Errorf("unexpected patch strategy: %v", strategies) - } - default: - return false, "", fmt.Errorf("unexpected patch strategy: %v", strategies) - } -} - -// hasAdditionalNewField returns if original map has additional key with non-nil value than modified. -func hasAdditionalNewField(original, modified map[string]interface{}) bool { - for k, v := range original { - if v == nil { - continue - } - if _, found := modified[k]; !found { - return true - } - } - return false -} diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD deleted file mode 100644 index 6dc06d4e..00000000 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD +++ /dev/null @@ -1,33 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = ["fields.go"], - importmap = "k8s.io/kubernetes/vendor/k8s.io/apimachinery/third_party/forked/golang/json", - importpath = "k8s.io/apimachinery/third_party/forked/golang/json", -) - -go_test( - name = "go_default_test", - srcs = ["fields_test.go"], - embed = [":go_default_library"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS deleted file mode 100644 index 8e8d9fce..00000000 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -approvers: -- pwittrock -reviewers: -- mengqiy -- apelisse diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go deleted file mode 100644 index 8205a4dd..00000000 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go +++ /dev/null @@ -1,513 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package json is forked from the Go standard library to enable us to find the -// field of a struct that a given JSON key maps to. -package json - -import ( - "bytes" - "fmt" - "reflect" - "sort" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -const ( - patchStrategyTagKey = "patchStrategy" - patchMergeKeyTagKey = "patchMergeKey" -) - -// Finds the patchStrategy and patchMergeKey struct tag fields on a given -// struct field given the struct type and the JSON name of the field. -// It returns field type, a slice of patch strategies, merge key and error. -// TODO: fix the returned errors to be introspectable. -func LookupPatchMetadataForStruct(t reflect.Type, jsonField string) ( - elemType reflect.Type, patchStrategies []string, patchMergeKey string, e error) { - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - - if t.Kind() != reflect.Struct { - e = fmt.Errorf("merging an object in json but data type is not struct, instead is: %s", - t.Kind().String()) - return - } - jf := []byte(jsonField) - // Find the field that the JSON library would use. - var f *field - fields := cachedTypeFields(t) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, jf) { - f = ff - break - } - // Do case-insensitive comparison. - if f == nil && ff.equalFold(ff.nameBytes, jf) { - f = ff - } - } - if f != nil { - // Find the reflect.Value of the most preferential struct field. - tjf := t.Field(f.index[0]) - // we must navigate down all the anonymously included structs in the chain - for i := 1; i < len(f.index); i++ { - tjf = tjf.Type.Field(f.index[i]) - } - patchStrategy := tjf.Tag.Get(patchStrategyTagKey) - patchMergeKey = tjf.Tag.Get(patchMergeKeyTagKey) - patchStrategies = strings.Split(patchStrategy, ",") - elemType = tjf.Type - return - } - e = fmt.Errorf("unable to find api field in struct %s for the json field %q", t.Name(), jsonField) - return -} - -// A field represents a single field found in a struct. -type field struct { - name string - nameBytes []byte // []byte(name) - equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent - - tag bool - // index is the sequence of indexes from the containing type fields to this field. - // it is a slice because anonymous structs will need multiple navigation steps to correctly - // resolve the proper fields - index []int - typ reflect.Type - omitEmpty bool - quoted bool -} - -func (f field) String() string { - return fmt.Sprintf("{name: %s, type: %v, tag: %v, index: %v, omitEmpty: %v, quoted: %v}", f.name, f.typ, f.tag, f.index, f.omitEmpty, f.quoted) -} - -func fillField(f field) field { - f.nameBytes = []byte(f.name) - f.equalFold = foldFunc(f.nameBytes) - return f -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from json tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that JSON should recognize for the given type. -// The algorithm is breadth-first search over the set of structs to include - the top struct -// and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" { // unexported - continue - } - tag := sf.Tag.Get("json") - if tag == "-" { - continue - } - name, opts := parseTag(tag) - if !isValidTag(name) { - name = "" - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" - if name == "" { - name = sf.Name - } - fields = append(fields, fillField(field{ - name: name, - tag: tagged, - index: index, - typ: ft, - omitEmpty: opts.Contains("omitempty"), - quoted: opts.Contains("string"), - })) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with JSON tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// JSON tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -const ( - caseMask = ^byte(0x20) // Mask to ignore case in ASCII. - kelvin = '\u212a' - smallLongEss = '\u017f' -) - -// foldFunc returns one of four different case folding equivalence -// functions, from most general (and slow) to fastest: -// -// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 -// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') -// 3) asciiEqualFold, no special, but includes non-letters (including _) -// 4) simpleLetterEqualFold, no specials, no non-letters. -// -// The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'ſ' Latin small letter long s -// * k maps to K and to U+212A 'K' Kelvin sign -// See http://play.golang.org/p/tTxjOc0OGo -// -// The returned function is specialized for matching against s and -// should only be given s. It's not curried for performance reasons. -func foldFunc(s []byte) func(s, t []byte) bool { - nonLetter := false - special := false // special letter - for _, b := range s { - if b >= utf8.RuneSelf { - return bytes.EqualFold - } - upper := b & caseMask - if upper < 'A' || upper > 'Z' { - nonLetter = true - } else if upper == 'K' || upper == 'S' { - // See above for why these letters are special. - special = true - } - } - if special { - return equalFoldRight - } - if nonLetter { - return asciiEqualFold - } - return simpleLetterEqualFold -} - -// equalFoldRight is a specialization of bytes.EqualFold when s is -// known to be all ASCII (including punctuation), but contains an 's', -// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. -// See comments on foldFunc. -func equalFoldRight(s, t []byte) bool { - for _, sb := range s { - if len(t) == 0 { - return false - } - tb := t[0] - if tb < utf8.RuneSelf { - if sb != tb { - sbUpper := sb & caseMask - if 'A' <= sbUpper && sbUpper <= 'Z' { - if sbUpper != tb&caseMask { - return false - } - } else { - return false - } - } - t = t[1:] - continue - } - // sb is ASCII and t is not. t must be either kelvin - // sign or long s; sb must be s, S, k, or K. - tr, size := utf8.DecodeRune(t) - switch sb { - case 's', 'S': - if tr != smallLongEss { - return false - } - case 'k', 'K': - if tr != kelvin { - return false - } - default: - return false - } - t = t[size:] - - } - if len(t) > 0 { - return false - } - return true -} - -// asciiEqualFold is a specialization of bytes.EqualFold for use when -// s is all ASCII (but may contain non-letters) and contains no -// special-folding letters. -// See comments on foldFunc. -func asciiEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, sb := range s { - tb := t[i] - if sb == tb { - continue - } - if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { - if sb&caseMask != tb&caseMask { - return false - } - } else { - return false - } - } - return true -} - -// simpleLetterEqualFold is a specialization of bytes.EqualFold for -// use when s is all ASCII letters (no underscores, etc) and also -// doesn't contain 'k', 'K', 's', or 'S'. -// See comments on foldFunc. -func simpleLetterEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, b := range s { - if b&caseMask != t[i]&caseMask { - return false - } - } - return true -} - -// tagOptions is the string following a comma in a struct field's "json" -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -// parseTag splits a struct field's json tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} diff --git a/vendor/k8s.io/client-go/pkg/version/base.go b/vendor/k8s.io/client-go/pkg/version/base.go index 3d01012c..c9dae68b 100644 --- a/vendor/k8s.io/client-go/pkg/version/base.go +++ b/vendor/k8s.io/client-go/pkg/version/base.go @@ -3,8 +3,8 @@ package version var ( gitMajor = "1" gitMinor = "12" - gitVersion = "v1.12.1-lite5" - gitCommit = "720ebdc505688ceea8c452397d80a917a9e96bcd" + gitVersion = "v1.12.1-lite7" + gitCommit = "b5d55f02488df0daa1399df41777aeeeaa36eabb" gitTreeState = "clean" - buildDate = "2018-10-24T05:26+00:00Z" + buildDate = "2018-10-24T23:26+00:00Z" ) diff --git a/vendor/k8s.io/client-go/tools/record/BUILD b/vendor/k8s.io/client-go/tools/record/BUILD deleted file mode 100644 index fc1eaf2e..00000000 --- a/vendor/k8s.io/client-go/tools/record/BUILD +++ /dev/null @@ -1,69 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = [ - "event_test.go", - "events_cache_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//staging/src/k8s.io/client-go/rest:go_default_library", - "//staging/src/k8s.io/client-go/tools/reference:go_default_library", - ], -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "event.go", - "events_cache.go", - "fake.go", - ], - importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/tools/record", - importpath = "k8s.io/client-go/tools/record", - deps = [ - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", - "//staging/src/k8s.io/client-go/rest:go_default_library", - "//staging/src/k8s.io/client-go/tools/reference:go_default_library", - "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/golang/groupcache/lru:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/client-go/tools/record/OWNERS b/vendor/k8s.io/client-go/tools/record/OWNERS deleted file mode 100755 index 4dd54bbc..00000000 --- a/vendor/k8s.io/client-go/tools/record/OWNERS +++ /dev/null @@ -1,27 +0,0 @@ -reviewers: -- lavalamp -- smarterclayton -- wojtek-t -- deads2k -- derekwaynecarr -- caesarxuchao -- vishh -- mikedanese -- liggitt -- nikhiljindal -- erictune -- pmorie -- dchen1107 -- saad-ali -- luxas -- yifan-gu -- eparis -- mwielgus -- timothysc -- jsafrane -- dims -- krousey -- a-robinson -- aveshagarwal -- resouer -- cjcullen diff --git a/vendor/k8s.io/client-go/tools/record/doc.go b/vendor/k8s.io/client-go/tools/record/doc.go deleted file mode 100644 index 657ddecb..00000000 --- a/vendor/k8s.io/client-go/tools/record/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package record has all client logic for recording and reporting events. -package record // import "k8s.io/client-go/tools/record" diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go deleted file mode 100644 index 168dfa80..00000000 --- a/vendor/k8s.io/client-go/tools/record/event.go +++ /dev/null @@ -1,322 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package record - -import ( - "fmt" - "math/rand" - "time" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/clock" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/watch" - restclient "k8s.io/client-go/rest" - ref "k8s.io/client-go/tools/reference" - - "net/http" - - "github.com/golang/glog" -) - -const maxTriesPerEvent = 12 - -var defaultSleepDuration = 10 * time.Second - -const maxQueuedEvents = 1000 - -// EventSink knows how to store events (client.Client implements it.) -// EventSink must respect the namespace that will be embedded in 'event'. -// It is assumed that EventSink will return the same sorts of errors as -// pkg/client's REST client. -type EventSink interface { - Create(event *v1.Event) (*v1.Event, error) - Update(event *v1.Event) (*v1.Event, error) - Patch(oldEvent *v1.Event, data []byte) (*v1.Event, error) -} - -// EventRecorder knows how to record events on behalf of an EventSource. -type EventRecorder interface { - // Event constructs an event from the given information and puts it in the queue for sending. - // 'object' is the object this event is about. Event will make a reference-- or you may also - // pass a reference to the object directly. - // 'type' of this event, and can be one of Normal, Warning. New types could be added in future - // 'reason' is the reason this event is generated. 'reason' should be short and unique; it - // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used - // to automate handling of events, so imagine people writing switch statements to handle them. - // You want to make that easy. - // 'message' is intended to be human readable. - // - // The resulting event will be created in the same namespace as the reference object. - Event(object runtime.Object, eventtype, reason, message string) - - // Eventf is just like Event, but with Sprintf for the message field. - Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) - - // PastEventf is just like Eventf, but with an option to specify the event's 'timestamp' field. - PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) - - // AnnotatedEventf is just like eventf, but with annotations attached - AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) -} - -// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. -type EventBroadcaster interface { - // StartEventWatcher starts sending events received from this EventBroadcaster to the given - // event handler function. The return value can be ignored or used to stop recording, if - // desired. - StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface - - // StartRecordingToSink starts sending events received from this EventBroadcaster to the given - // sink. The return value can be ignored or used to stop recording, if desired. - StartRecordingToSink(sink EventSink) watch.Interface - - // StartLogging starts sending events received from this EventBroadcaster to the given logging - // function. The return value can be ignored or used to stop recording, if desired. - StartLogging(logf func(format string, args ...interface{})) watch.Interface - - // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster - // with the event source set to the given event source. - NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder -} - -// Creates a new event broadcaster. -func NewBroadcaster() EventBroadcaster { - return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration} -} - -func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster { - return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration} -} - -type eventBroadcasterImpl struct { - *watch.Broadcaster - sleepDuration time.Duration -} - -// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. -// The return value can be ignored or used to stop recording, if desired. -// TODO: make me an object with parameterizable queue length and retry interval -func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface { - // The default math/rand package functions aren't thread safe, so create a - // new Rand object for each StartRecording call. - randGen := rand.New(rand.NewSource(time.Now().UnixNano())) - eventCorrelator := NewEventCorrelator(clock.RealClock{}) - return eventBroadcaster.StartEventWatcher( - func(event *v1.Event) { - recordToSink(sink, event, eventCorrelator, randGen, eventBroadcaster.sleepDuration) - }) -} - -func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator, randGen *rand.Rand, sleepDuration time.Duration) { - // Make a copy before modification, because there could be multiple listeners. - // Events are safe to copy like this. - eventCopy := *event - event = &eventCopy - result, err := eventCorrelator.EventCorrelate(event) - if err != nil { - utilruntime.HandleError(err) - } - if result.Skip { - return - } - tries := 0 - for { - if recordEvent(sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) { - break - } - tries++ - if tries >= maxTriesPerEvent { - glog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) - break - } - // Randomize the first sleep so that various clients won't all be - // synced up if the master goes down. - if tries == 1 { - time.Sleep(time.Duration(float64(sleepDuration) * randGen.Float64())) - } else { - time.Sleep(sleepDuration) - } - } -} - -func isKeyNotFoundError(err error) bool { - statusErr, _ := err.(*errors.StatusError) - - if statusErr != nil && statusErr.Status().Code == http.StatusNotFound { - return true - } - - return false -} - -// recordEvent attempts to write event to a sink. It returns true if the event -// was successfully recorded or discarded, false if it should be retried. -// If updateExistingEvent is false, it creates a new event, otherwise it updates -// existing event. -func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool { - var newEvent *v1.Event - var err error - if updateExistingEvent { - newEvent, err = sink.Patch(event, patch) - } - // Update can fail because the event may have been removed and it no longer exists. - if !updateExistingEvent || (updateExistingEvent && isKeyNotFoundError(err)) { - // Making sure that ResourceVersion is empty on creation - event.ResourceVersion = "" - newEvent, err = sink.Create(event) - } - if err == nil { - // we need to update our event correlator with the server returned state to handle name/resourceversion - eventCorrelator.UpdateState(newEvent) - return true - } - - // If we can't contact the server, then hold everything while we keep trying. - // Otherwise, something about the event is malformed and we should abandon it. - switch err.(type) { - case *restclient.RequestConstructionError: - // We will construct the request the same next time, so don't keep trying. - glog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) - return true - case *errors.StatusError: - if errors.IsAlreadyExists(err) { - glog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) - } else { - glog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) - } - return true - case *errors.UnexpectedObjectError: - // We don't expect this; it implies the server's response didn't match a - // known pattern. Go ahead and retry. - default: - // This case includes actual http transport errors. Go ahead and retry. - } - glog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) - return false -} - -// StartLogging starts sending events received from this EventBroadcaster to the given logging function. -// The return value can be ignored or used to stop recording, if desired. -func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface { - return eventBroadcaster.StartEventWatcher( - func(e *v1.Event) { - logf("Event(%#v): type: '%v' reason: '%v' %v", e.InvolvedObject, e.Type, e.Reason, e.Message) - }) -} - -// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function. -// The return value can be ignored or used to stop recording, if desired. -func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface { - watcher := eventBroadcaster.Watch() - go func() { - defer utilruntime.HandleCrash() - for watchEvent := range watcher.ResultChan() { - event, ok := watchEvent.Object.(*v1.Event) - if !ok { - // This is all local, so there's no reason this should - // ever happen. - continue - } - eventHandler(event) - } - }() - return watcher -} - -// NewRecorder returns an EventRecorder that records events with the given event source. -func (eventBroadcaster *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder { - return &recorderImpl{scheme, source, eventBroadcaster.Broadcaster, clock.RealClock{}} -} - -type recorderImpl struct { - scheme *runtime.Scheme - source v1.EventSource - *watch.Broadcaster - clock clock.Clock -} - -func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, timestamp metav1.Time, eventtype, reason, message string) { - ref, err := ref.GetReference(recorder.scheme, object) - if err != nil { - glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) - return - } - - if !validateEventType(eventtype) { - glog.Errorf("Unsupported event type: '%v'", eventtype) - return - } - - event := recorder.makeEvent(ref, annotations, eventtype, reason, message) - event.Source = recorder.source - - go func() { - // NOTE: events should be a non-blocking operation - defer utilruntime.HandleCrash() - recorder.Action(watch.Added, event) - }() -} - -func validateEventType(eventtype string) bool { - switch eventtype { - case v1.EventTypeNormal, v1.EventTypeWarning: - return true - } - return false -} - -func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) { - recorder.generateEvent(object, nil, metav1.Now(), eventtype, reason, message) -} - -func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { - recorder.Event(object, eventtype, reason, fmt.Sprintf(messageFmt, args...)) -} - -func (recorder *recorderImpl) PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) { - recorder.generateEvent(object, nil, timestamp, eventtype, reason, fmt.Sprintf(messageFmt, args...)) -} - -func (recorder *recorderImpl) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { - recorder.generateEvent(object, annotations, metav1.Now(), eventtype, reason, fmt.Sprintf(messageFmt, args...)) -} - -func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, annotations map[string]string, eventtype, reason, message string) *v1.Event { - t := metav1.Time{Time: recorder.clock.Now()} - namespace := ref.Namespace - if namespace == "" { - namespace = metav1.NamespaceDefault - } - return &v1.Event{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), - Namespace: namespace, - Annotations: annotations, - }, - InvolvedObject: *ref, - Reason: reason, - Message: message, - FirstTimestamp: t, - LastTimestamp: t, - Count: 1, - Type: eventtype, - } -} diff --git a/vendor/k8s.io/client-go/tools/record/events_cache.go b/vendor/k8s.io/client-go/tools/record/events_cache.go deleted file mode 100644 index a42084f3..00000000 --- a/vendor/k8s.io/client-go/tools/record/events_cache.go +++ /dev/null @@ -1,462 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package record - -import ( - "encoding/json" - "fmt" - "strings" - "sync" - "time" - - "github.com/golang/groupcache/lru" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/clock" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/strategicpatch" - "k8s.io/client-go/util/flowcontrol" -) - -const ( - maxLruCacheEntries = 4096 - - // if we see the same event that varies only by message - // more than 10 times in a 10 minute period, aggregate the event - defaultAggregateMaxEvents = 10 - defaultAggregateIntervalInSeconds = 600 - - // by default, allow a source to send 25 events about an object - // but control the refill rate to 1 new event every 5 minutes - // this helps control the long-tail of events for things that are always - // unhealthy - defaultSpamBurst = 25 - defaultSpamQPS = 1. / 300. -) - -// getEventKey builds unique event key based on source, involvedObject, reason, message -func getEventKey(event *v1.Event) string { - return strings.Join([]string{ - event.Source.Component, - event.Source.Host, - event.InvolvedObject.Kind, - event.InvolvedObject.Namespace, - event.InvolvedObject.Name, - event.InvolvedObject.FieldPath, - string(event.InvolvedObject.UID), - event.InvolvedObject.APIVersion, - event.Type, - event.Reason, - event.Message, - }, - "") -} - -// getSpamKey builds unique event key based on source, involvedObject -func getSpamKey(event *v1.Event) string { - return strings.Join([]string{ - event.Source.Component, - event.Source.Host, - event.InvolvedObject.Kind, - event.InvolvedObject.Namespace, - event.InvolvedObject.Name, - string(event.InvolvedObject.UID), - event.InvolvedObject.APIVersion, - }, - "") -} - -// EventFilterFunc is a function that returns true if the event should be skipped -type EventFilterFunc func(event *v1.Event) bool - -// EventSourceObjectSpamFilter is responsible for throttling -// the amount of events a source and object can produce. -type EventSourceObjectSpamFilter struct { - sync.RWMutex - - // the cache that manages last synced state - cache *lru.Cache - - // burst is the amount of events we allow per source + object - burst int - - // qps is the refill rate of the token bucket in queries per second - qps float32 - - // clock is used to allow for testing over a time interval - clock clock.Clock -} - -// NewEventSourceObjectSpamFilter allows burst events from a source about an object with the specified qps refill. -func NewEventSourceObjectSpamFilter(lruCacheSize, burst int, qps float32, clock clock.Clock) *EventSourceObjectSpamFilter { - return &EventSourceObjectSpamFilter{ - cache: lru.New(lruCacheSize), - burst: burst, - qps: qps, - clock: clock, - } -} - -// spamRecord holds data used to perform spam filtering decisions. -type spamRecord struct { - // rateLimiter controls the rate of events about this object - rateLimiter flowcontrol.RateLimiter -} - -// Filter controls that a given source+object are not exceeding the allowed rate. -func (f *EventSourceObjectSpamFilter) Filter(event *v1.Event) bool { - var record spamRecord - - // controls our cached information about this event (source+object) - eventKey := getSpamKey(event) - - // do we have a record of similar events in our cache? - f.Lock() - defer f.Unlock() - value, found := f.cache.Get(eventKey) - if found { - record = value.(spamRecord) - } - - // verify we have a rate limiter for this record - if record.rateLimiter == nil { - record.rateLimiter = flowcontrol.NewTokenBucketRateLimiterWithClock(f.qps, f.burst, f.clock) - } - - // ensure we have available rate - filter := !record.rateLimiter.TryAccept() - - // update the cache - f.cache.Add(eventKey, record) - - return filter -} - -// EventAggregatorKeyFunc is responsible for grouping events for aggregation -// It returns a tuple of the following: -// aggregateKey - key the identifies the aggregate group to bucket this event -// localKey - key that makes this event in the local group -type EventAggregatorKeyFunc func(event *v1.Event) (aggregateKey string, localKey string) - -// EventAggregatorByReasonFunc aggregates events by exact match on event.Source, event.InvolvedObject, event.Type and event.Reason -func EventAggregatorByReasonFunc(event *v1.Event) (string, string) { - return strings.Join([]string{ - event.Source.Component, - event.Source.Host, - event.InvolvedObject.Kind, - event.InvolvedObject.Namespace, - event.InvolvedObject.Name, - string(event.InvolvedObject.UID), - event.InvolvedObject.APIVersion, - event.Type, - event.Reason, - }, - ""), event.Message -} - -// EventAggregatorMessageFunc is responsible for producing an aggregation message -type EventAggregatorMessageFunc func(event *v1.Event) string - -// EventAggregratorByReasonMessageFunc returns an aggregate message by prefixing the incoming message -func EventAggregatorByReasonMessageFunc(event *v1.Event) string { - return "(combined from similar events): " + event.Message -} - -// EventAggregator identifies similar events and aggregates them into a single event -type EventAggregator struct { - sync.RWMutex - - // The cache that manages aggregation state - cache *lru.Cache - - // The function that groups events for aggregation - keyFunc EventAggregatorKeyFunc - - // The function that generates a message for an aggregate event - messageFunc EventAggregatorMessageFunc - - // The maximum number of events in the specified interval before aggregation occurs - maxEvents uint - - // The amount of time in seconds that must transpire since the last occurrence of a similar event before it's considered new - maxIntervalInSeconds uint - - // clock is used to allow for testing over a time interval - clock clock.Clock -} - -// NewEventAggregator returns a new instance of an EventAggregator -func NewEventAggregator(lruCacheSize int, keyFunc EventAggregatorKeyFunc, messageFunc EventAggregatorMessageFunc, - maxEvents int, maxIntervalInSeconds int, clock clock.Clock) *EventAggregator { - return &EventAggregator{ - cache: lru.New(lruCacheSize), - keyFunc: keyFunc, - messageFunc: messageFunc, - maxEvents: uint(maxEvents), - maxIntervalInSeconds: uint(maxIntervalInSeconds), - clock: clock, - } -} - -// aggregateRecord holds data used to perform aggregation decisions -type aggregateRecord struct { - // we track the number of unique local keys we have seen in the aggregate set to know when to actually aggregate - // if the size of this set exceeds the max, we know we need to aggregate - localKeys sets.String - // The last time at which the aggregate was recorded - lastTimestamp metav1.Time -} - -// EventAggregate checks if a similar event has been seen according to the -// aggregation configuration (max events, max interval, etc) and returns: -// -// - The (potentially modified) event that should be created -// - The cache key for the event, for correlation purposes. This will be set to -// the full key for normal events, and to the result of -// EventAggregatorMessageFunc for aggregate events. -func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, string) { - now := metav1.NewTime(e.clock.Now()) - var record aggregateRecord - // eventKey is the full cache key for this event - eventKey := getEventKey(newEvent) - // aggregateKey is for the aggregate event, if one is needed. - aggregateKey, localKey := e.keyFunc(newEvent) - - // Do we have a record of similar events in our cache? - e.Lock() - defer e.Unlock() - value, found := e.cache.Get(aggregateKey) - if found { - record = value.(aggregateRecord) - } - - // Is the previous record too old? If so, make a fresh one. Note: if we didn't - // find a similar record, its lastTimestamp will be the zero value, so we - // create a new one in that case. - maxInterval := time.Duration(e.maxIntervalInSeconds) * time.Second - interval := now.Time.Sub(record.lastTimestamp.Time) - if interval > maxInterval { - record = aggregateRecord{localKeys: sets.NewString()} - } - - // Write the new event into the aggregation record and put it on the cache - record.localKeys.Insert(localKey) - record.lastTimestamp = now - e.cache.Add(aggregateKey, record) - - // If we are not yet over the threshold for unique events, don't correlate them - if uint(record.localKeys.Len()) < e.maxEvents { - return newEvent, eventKey - } - - // do not grow our local key set any larger than max - record.localKeys.PopAny() - - // create a new aggregate event, and return the aggregateKey as the cache key - // (so that it can be overwritten.) - eventCopy := &v1.Event{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%v.%x", newEvent.InvolvedObject.Name, now.UnixNano()), - Namespace: newEvent.Namespace, - }, - Count: 1, - FirstTimestamp: now, - InvolvedObject: newEvent.InvolvedObject, - LastTimestamp: now, - Message: e.messageFunc(newEvent), - Type: newEvent.Type, - Reason: newEvent.Reason, - Source: newEvent.Source, - } - return eventCopy, aggregateKey -} - -// eventLog records data about when an event was observed -type eventLog struct { - // The number of times the event has occurred since first occurrence. - count uint - - // The time at which the event was first recorded. - firstTimestamp metav1.Time - - // The unique name of the first occurrence of this event - name string - - // Resource version returned from previous interaction with server - resourceVersion string -} - -// eventLogger logs occurrences of an event -type eventLogger struct { - sync.RWMutex - cache *lru.Cache - clock clock.Clock -} - -// newEventLogger observes events and counts their frequencies -func newEventLogger(lruCacheEntries int, clock clock.Clock) *eventLogger { - return &eventLogger{cache: lru.New(lruCacheEntries), clock: clock} -} - -// eventObserve records an event, or updates an existing one if key is a cache hit -func (e *eventLogger) eventObserve(newEvent *v1.Event, key string) (*v1.Event, []byte, error) { - var ( - patch []byte - err error - ) - eventCopy := *newEvent - event := &eventCopy - - e.Lock() - defer e.Unlock() - - // Check if there is an existing event we should update - lastObservation := e.lastEventObservationFromCache(key) - - // If we found a result, prepare a patch - if lastObservation.count > 0 { - // update the event based on the last observation so patch will work as desired - event.Name = lastObservation.name - event.ResourceVersion = lastObservation.resourceVersion - event.FirstTimestamp = lastObservation.firstTimestamp - event.Count = int32(lastObservation.count) + 1 - - eventCopy2 := *event - eventCopy2.Count = 0 - eventCopy2.LastTimestamp = metav1.NewTime(time.Unix(0, 0)) - eventCopy2.Message = "" - - newData, _ := json.Marshal(event) - oldData, _ := json.Marshal(eventCopy2) - patch, err = strategicpatch.CreateTwoWayMergePatch(oldData, newData, event) - } - - // record our new observation - e.cache.Add( - key, - eventLog{ - count: uint(event.Count), - firstTimestamp: event.FirstTimestamp, - name: event.Name, - resourceVersion: event.ResourceVersion, - }, - ) - return event, patch, err -} - -// updateState updates its internal tracking information based on latest server state -func (e *eventLogger) updateState(event *v1.Event) { - key := getEventKey(event) - e.Lock() - defer e.Unlock() - // record our new observation - e.cache.Add( - key, - eventLog{ - count: uint(event.Count), - firstTimestamp: event.FirstTimestamp, - name: event.Name, - resourceVersion: event.ResourceVersion, - }, - ) -} - -// lastEventObservationFromCache returns the event from the cache, reads must be protected via external lock -func (e *eventLogger) lastEventObservationFromCache(key string) eventLog { - value, ok := e.cache.Get(key) - if ok { - observationValue, ok := value.(eventLog) - if ok { - return observationValue - } - } - return eventLog{} -} - -// EventCorrelator processes all incoming events and performs analysis to avoid overwhelming the system. It can filter all -// incoming events to see if the event should be filtered from further processing. It can aggregate similar events that occur -// frequently to protect the system from spamming events that are difficult for users to distinguish. It performs de-duplication -// to ensure events that are observed multiple times are compacted into a single event with increasing counts. -type EventCorrelator struct { - // the function to filter the event - filterFunc EventFilterFunc - // the object that performs event aggregation - aggregator *EventAggregator - // the object that observes events as they come through - logger *eventLogger -} - -// EventCorrelateResult is the result of a Correlate -type EventCorrelateResult struct { - // the event after correlation - Event *v1.Event - // if provided, perform a strategic patch when updating the record on the server - Patch []byte - // if true, do no further processing of the event - Skip bool -} - -// NewEventCorrelator returns an EventCorrelator configured with default values. -// -// The EventCorrelator is responsible for event filtering, aggregating, and counting -// prior to interacting with the API server to record the event. -// -// The default behavior is as follows: -// * Aggregation is performed if a similar event is recorded 10 times in a -// in a 10 minute rolling interval. A similar event is an event that varies only by -// the Event.Message field. Rather than recording the precise event, aggregation -// will create a new event whose message reports that it has combined events with -// the same reason. -// * Events are incrementally counted if the exact same event is encountered multiple -// times. -// * A source may burst 25 events about an object, but has a refill rate budget -// per object of 1 event every 5 minutes to control long-tail of spam. -func NewEventCorrelator(clock clock.Clock) *EventCorrelator { - cacheSize := maxLruCacheEntries - spamFilter := NewEventSourceObjectSpamFilter(cacheSize, defaultSpamBurst, defaultSpamQPS, clock) - return &EventCorrelator{ - filterFunc: spamFilter.Filter, - aggregator: NewEventAggregator( - cacheSize, - EventAggregatorByReasonFunc, - EventAggregatorByReasonMessageFunc, - defaultAggregateMaxEvents, - defaultAggregateIntervalInSeconds, - clock), - - logger: newEventLogger(cacheSize, clock), - } -} - -// EventCorrelate filters, aggregates, counts, and de-duplicates all incoming events -func (c *EventCorrelator) EventCorrelate(newEvent *v1.Event) (*EventCorrelateResult, error) { - if newEvent == nil { - return nil, fmt.Errorf("event is nil") - } - aggregateEvent, ckey := c.aggregator.EventAggregate(newEvent) - observedEvent, patch, err := c.logger.eventObserve(aggregateEvent, ckey) - if c.filterFunc(observedEvent) { - return &EventCorrelateResult{Skip: true}, nil - } - return &EventCorrelateResult{Event: observedEvent, Patch: patch}, err -} - -// UpdateState based on the latest observed state from server -func (c *EventCorrelator) UpdateState(event *v1.Event) { - c.logger.updateState(event) -} diff --git a/vendor/k8s.io/client-go/tools/record/fake.go b/vendor/k8s.io/client-go/tools/record/fake.go deleted file mode 100644 index 6e031daa..00000000 --- a/vendor/k8s.io/client-go/tools/record/fake.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package record - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// FakeRecorder is used as a fake during tests. It is thread safe. It is usable -// when created manually and not by NewFakeRecorder, however all events may be -// thrown away in this case. -type FakeRecorder struct { - Events chan string -} - -func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) { - if f.Events != nil { - f.Events <- fmt.Sprintf("%s %s %s", eventtype, reason, message) - } -} - -func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { - if f.Events != nil { - f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) - } -} - -func (f *FakeRecorder) PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) { -} - -func (f *FakeRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { - f.Eventf(object, eventtype, reason, messageFmt, args) -} - -// NewFakeRecorder creates new fake event recorder with event channel with -// buffer of given size. -func NewFakeRecorder(bufferSize int) *FakeRecorder { - return &FakeRecorder{ - Events: make(chan string, bufferSize), - } -}