Merge pull request #82847 from apelisse/improve-managed-fields

Improve fieldmanager tests and benchmarks
This commit is contained in:
Kubernetes Prow Robot 2019-09-26 14:40:06 -07:00 committed by GitHub
commit 112ed56f1a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 7575 additions and 257 deletions

View File

@ -48,6 +48,12 @@ go_test(
"fieldmanager_test.go",
"skipnonapplied_test.go",
],
data = [
"endpoints.yaml",
"node.yaml",
"pod.yaml",
"//api/openapi-spec",
],
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
@ -57,6 +63,8 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library",
"//vendor/k8s.io/kube-openapi/pkg/util/proto/testing:go_default_library",
"//vendor/sigs.k8s.io/yaml:go_default_library",
],
)

File diff suppressed because it is too large Load Diff

View File

@ -19,10 +19,14 @@ package fieldmanager_test
import (
"errors"
"fmt"
"io/ioutil"
"net/http"
"path/filepath"
"strings"
"testing"
"time"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -30,9 +34,17 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager"
"k8s.io/kube-openapi/pkg/util/proto"
prototesting "k8s.io/kube-openapi/pkg/util/proto/testing"
"sigs.k8s.io/yaml"
)
var fakeSchema = prototesting.Fake{
Path: filepath.Join(
strings.Repeat(".."+string(filepath.Separator), 8),
"api", "openapi-spec", "swagger.json"),
}
type fakeObjectConvertor struct{}
func (c *fakeObjectConvertor) Convert(in, out, context interface{}) error {
@ -54,34 +66,42 @@ func (d *fakeObjectDefaulter) Default(in runtime.Object) {}
type TestFieldManager struct {
fieldManager fieldmanager.FieldManager
emptyObj runtime.Object
liveObj runtime.Object
}
func NewTestFieldManager() TestFieldManager {
gv := schema.GroupVersion{
Group: "apps",
Version: "v1",
func NewTestFieldManager(gvk schema.GroupVersionKind) TestFieldManager {
d, err := fakeSchema.OpenAPISchema()
if err != nil {
panic(err)
}
m, err := proto.NewOpenAPIData(d)
if err != nil {
panic(err)
}
f, err := fieldmanager.NewCRDFieldManager(
nil,
f, err := fieldmanager.NewFieldManager(
m,
&fakeObjectConvertor{},
&fakeObjectDefaulter{},
gv,
gv,
true,
gvk.GroupVersion(),
gvk.GroupVersion(),
)
if err != nil {
panic(err)
}
live := &unstructured.Unstructured{}
live.SetKind(gvk.Kind)
live.SetAPIVersion(gvk.GroupVersion().String())
return TestFieldManager{
fieldManager: f,
liveObj: &unstructured.Unstructured{},
emptyObj: live,
liveObj: live.DeepCopyObject(),
}
}
func (f *TestFieldManager) Reset() {
f.liveObj = &unstructured.Unstructured{}
f.liveObj = f.emptyObj.DeepCopyObject()
}
func (f *TestFieldManager) Apply(obj []byte, manager string, force bool) error {
@ -109,9 +129,10 @@ func (f *TestFieldManager) ManagedFields() []metav1.ManagedFieldsEntry {
return accessor.GetManagedFields()
}
// TestUpdateApplyConflict tests that applying to an object, which wasn't created by apply, will give conflicts
// TestUpdateApplyConflict tests that applying to an object, which
// wasn't created by apply, will give conflicts
func TestUpdateApplyConflict(t *testing.T) {
f := NewTestFieldManager()
f := NewTestFieldManager(schema.FromAPIVersionAndKind("apps/v1", "Deployment"))
patch := []byte(`{
"apiVersion": "apps/v1",
@ -167,7 +188,7 @@ func TestUpdateApplyConflict(t *testing.T) {
}
func TestApplyStripsFields(t *testing.T) {
f := NewTestFieldManager()
f := NewTestFieldManager(schema.FromAPIVersionAndKind("apps/v1", "Deployment"))
newObj := &unstructured.Unstructured{
Object: map[string]interface{}{
@ -200,7 +221,7 @@ func TestApplyStripsFields(t *testing.T) {
}
func TestVersionCheck(t *testing.T) {
f := NewTestFieldManager()
f := NewTestFieldManager(schema.FromAPIVersionAndKind("apps/v1", "Deployment"))
// patch has 'apiVersion: apps/v1' and live version is apps/v1 -> no errors
err := f.Apply([]byte(`{
@ -213,7 +234,7 @@ func TestVersionCheck(t *testing.T) {
// patch has 'apiVersion: apps/v2' but live version is apps/v1 -> error
err = f.Apply([]byte(`{
"apiVersion": "apps/v2",
"apiVersion": "apps/v1beta1",
"kind": "Deployment",
}`), "fieldmanager_test", false)
if err == nil {
@ -231,10 +252,10 @@ func TestVersionCheck(t *testing.T) {
}
func TestApplyDoesNotStripLabels(t *testing.T) {
f := NewTestFieldManager()
f := NewTestFieldManager(schema.FromAPIVersionAndKind("v1", "Pod"))
err := f.Apply([]byte(`{
"apiVersion": "apps/v1",
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"labels": {
@ -251,241 +272,121 @@ func TestApplyDoesNotStripLabels(t *testing.T) {
}
}
func BenchmarkApplyNewObject(b *testing.B) {
f := NewTestFieldManager()
func getObjectBytes(file string) []byte {
s, err := ioutil.ReadFile(file)
if err != nil {
panic(err)
}
return s
}
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
err := f.Apply([]byte(`{
"apiVersion": "apps/v1",
"kind": "Pod",
"metadata": {
"name": "b",
"namespace": "b",
"creationTimestamp": "2016-05-19T09:59:00Z",
func TestApplyNewObject(t *testing.T) {
tests := []struct {
gvk schema.GroupVersionKind
obj []byte
}{
{
gvk: schema.FromAPIVersionAndKind("v1", "Pod"),
obj: getObjectBytes("pod.yaml"),
},
"map": {
"fieldA": 1,
"fieldB": 1,
"fieldC": 1,
"fieldD": 1,
"fieldE": 1,
"fieldF": 1,
"fieldG": 1,
"fieldH": 1,
"fieldI": 1,
"fieldJ": 1,
"fieldK": 1,
"fieldL": 1,
"fieldM": 1,
"fieldN": {
"fieldN": {
"fieldN": {
"fieldN": {
"fieldN": {
"value": true
},
},
},
},
},
}
}`), "fieldmanager_test", false)
if err != nil {
b.Fatal(err)
}
f.Reset()
{
gvk: schema.FromAPIVersionAndKind("v1", "Node"),
obj: getObjectBytes("node.yaml"),
},
{
gvk: schema.FromAPIVersionAndKind("v1", "Endpoints"),
obj: getObjectBytes("endpoints.yaml"),
},
}
for _, test := range tests {
t.Run(test.gvk.String(), func(t *testing.T) {
f := NewTestFieldManager(test.gvk)
if err := f.Apply(test.obj, "fieldmanager_test", false); err != nil {
t.Fatal(err)
}
})
}
}
func BenchmarkUpdateNewObject(b *testing.B) {
f := NewTestFieldManager()
y := `{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"name": "b",
"namespace": "b",
"creationTimestamp": "2016-05-19T09:59:00Z",
func BenchmarkNewObject(b *testing.B) {
tests := []struct {
gvk schema.GroupVersionKind
obj []byte
}{
{
gvk: schema.FromAPIVersionAndKind("v1", "Pod"),
obj: getObjectBytes("pod.yaml"),
},
"map": {
"fieldA": 1,
"fieldB": 1,
"fieldC": 1,
"fieldD": 1,
"fieldE": 1,
"fieldF": 1,
"fieldG": 1,
"fieldH": 1,
"fieldI": 1,
"fieldJ": 1,
"fieldK": 1,
"fieldL": 1,
"fieldM": 1,
"fieldN": {
"fieldN": {
"fieldN": {
"fieldN": {
"fieldN": {
"value": true
},
},
},
},
},
{
gvk: schema.FromAPIVersionAndKind("v1", "Node"),
obj: getObjectBytes("node.yaml"),
},
{
gvk: schema.FromAPIVersionAndKind("v1", "Endpoints"),
obj: getObjectBytes("endpoints.yaml"),
},
}`
newObj := &unstructured.Unstructured{Object: map[string]interface{}{}}
if err := yaml.Unmarshal([]byte(y), &newObj.Object); err != nil {
b.Fatalf("Failed to parse yaml object: %v", err)
}
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
err := f.Update(newObj, "fieldmanager_test")
if err != nil {
b.Fatal(err)
}
f.Reset()
for _, test := range tests {
b.Run(test.gvk.Kind, func(b *testing.B) {
f := NewTestFieldManager(test.gvk)
newObj := &unstructured.Unstructured{Object: map[string]interface{}{}}
if err := yaml.Unmarshal(test.obj, &newObj.Object); err != nil {
b.Fatalf("Failed to parse yaml object: %v", err)
}
newObj.SetManagedFields([]metav1.ManagedFieldsEntry{
{
Manager: "default",
Operation: "Update",
APIVersion: "v1",
},
})
b.Run("Update", func(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
err := f.Update(newObj, "fieldmanager_test")
if err != nil {
b.Fatal(err)
}
f.Reset()
}
})
b.Run("Apply", func(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
err := f.Apply(test.obj, "fieldmanager_test", false)
if err != nil {
b.Fatal(err)
}
f.Reset()
}
})
})
}
}
func BenchmarkRepeatedUpdate(b *testing.B) {
f := NewTestFieldManager()
f := NewTestFieldManager(schema.FromAPIVersionAndKind("v1", "Pod"))
podBytes := getObjectBytes("pod.yaml")
y1 := `{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"name": "b",
"namespace": "b",
"creationTimestamp": "2016-05-19T09:59:00Z",
},
"map": {
"fieldA": 1,
"fieldB": 1,
"fieldC": 1,
"fieldD": 1,
"fieldE": 1,
"fieldF": 1,
"fieldG": 1,
"fieldH": 1,
"fieldI": 1,
"fieldJ": 1,
"fieldK": 1,
"fieldL": 1,
"fieldM": 1,
"fieldN": {
"fieldN": {
"fieldN": {
"fieldN": {
"fieldN": {
"value": true
},
},
},
},
},
},
}`
obj1 := &unstructured.Unstructured{Object: map[string]interface{}{}}
if err := yaml.Unmarshal([]byte(y1), &obj1.Object); err != nil {
var obj *corev1.Pod
if err := yaml.Unmarshal(podBytes, &obj); err != nil {
b.Fatalf("Failed to parse yaml object: %v", err)
}
y2 := `{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"name": "b",
"namespace": "b",
"creationTimestamp": "2016-05-19T09:59:00Z",
},
"map": {
"fieldA": 1,
"fieldB": 1,
"fieldC": 1,
"fieldD": 1,
"fieldE": 1,
"fieldF": 1,
"fieldG": 1,
"fieldH": 1,
"fieldI": 1,
"fieldJ": 1,
"fieldK": 1,
"fieldL": 1,
"fieldM": 1,
"fieldN": {
"fieldN": {
"fieldN": {
"fieldN": {
"fieldN": {
"value": false
},
},
},
},
},
},
obj.Spec.Containers[0].Image = "nginx:latest"
objs := []*corev1.Pod{obj}
obj = obj.DeepCopy()
obj.Spec.Containers[0].Image = "nginx:4.3"
objs = append(objs, obj)
}`
obj2 := &unstructured.Unstructured{Object: map[string]interface{}{}}
if err := yaml.Unmarshal([]byte(y2), &obj2.Object); err != nil {
b.Fatalf("Failed to parse yaml object: %v", err)
}
y3 := `{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"name": "b",
"namespace": "b",
"creationTimestamp": "2016-05-19T09:59:00Z",
},
"map": {
"fieldA": 1,
"fieldB": 1,
"fieldC": 1,
"fieldD": 1,
"fieldE": 1,
"fieldF": 1,
"fieldG": 1,
"fieldH": 1,
"fieldI": 1,
"fieldJ": 1,
"fieldK": 1,
"fieldL": 1,
"fieldM": 1,
"fieldN": {
"fieldN": {
"fieldN": {
"fieldN": {
"fieldN": {
"value": true
},
},
},
},
},
"fieldO": 1,
"fieldP": 1,
"fieldQ": 1,
"fieldR": 1,
"fieldS": 1,
},
}`
obj3 := &unstructured.Unstructured{Object: map[string]interface{}{}}
if err := yaml.Unmarshal([]byte(y3), &obj3.Object); err != nil {
b.Fatalf("Failed to parse yaml object: %v", err)
}
objs := []*unstructured.Unstructured{obj1, obj2, obj3}
if err := f.Update(objs[0], "fieldmanager_0"); err != nil {
err := f.Apply(podBytes, "fieldmanager_apply", false)
if err != nil {
b.Fatal(err)
}
@ -493,14 +394,10 @@ func BenchmarkRepeatedUpdate(b *testing.B) {
b.Fatal(err)
}
if err := f.Update(objs[2], "fieldmanager_2"); err != nil {
b.Fatal(err)
}
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
err := f.Update(objs[n%3], fmt.Sprintf("fieldmanager_%d", n%3))
err := f.Update(objs[n%len(objs)], fmt.Sprintf("fieldmanager_%d", n%len(objs)))
if err != nil {
b.Fatal(err)
}
@ -509,10 +406,10 @@ func BenchmarkRepeatedUpdate(b *testing.B) {
}
func TestApplyFailsWithManagedFields(t *testing.T) {
f := NewTestFieldManager()
f := NewTestFieldManager(schema.FromAPIVersionAndKind("v1", "Pod"))
err := f.Apply([]byte(`{
"apiVersion": "apps/v1",
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"managedFields": [
@ -529,10 +426,10 @@ func TestApplyFailsWithManagedFields(t *testing.T) {
}
func TestApplySuccessWithNoManagedFields(t *testing.T) {
f := NewTestFieldManager()
f := NewTestFieldManager(schema.FromAPIVersionAndKind("v1", "Pod"))
err := f.Apply([]byte(`{
"apiVersion": "apps/v1",
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"labels": {

View File

@ -0,0 +1,259 @@
apiVersion: v1
kind: Node
metadata:
annotations:
container.googleapis.com/instance_id: "123456789321654789"
node.alpha.kubernetes.io/ttl: "0"
volumes.kubernetes.io/controller-managed-attach-detach: "true"
creationTimestamp: "2019-07-09T16:17:29Z"
labels:
beta.kubernetes.io/arch: amd64
beta.kubernetes.io/fluentd-ds-ready: "true"
beta.kubernetes.io/instance-type: n1-standard-4
beta.kubernetes.io/os: linux
cloud.google.com/gke-nodepool: default-pool
cloud.google.com/gke-os-distribution: cos
failure-domain.beta.kubernetes.io/region: us-central1
failure-domain.beta.kubernetes.io/zone: us-central1-b
kubernetes.io/hostname: node-default-pool-something
name: node-default-pool-something
resourceVersion: "211582541"
selfLink: /api/v1/nodes/node-default-pool-something
uid: 0c24d0e1-a265-11e9-abe4-42010a80026b
spec:
podCIDR: 10.0.0.1/24
providerID: some-provider-id-of-some-sort
status:
addresses:
- address: 10.0.0.1
type: InternalIP
- address: 192.168.0.1
type: ExternalIP
- address: node-default-pool-something
type: Hostname
allocatable:
cpu: 3920m
ephemeral-storage: "104638878617"
hugepages-2Mi: "0"
memory: 12700100Ki
pods: "110"
capacity:
cpu: "4"
ephemeral-storage: 202086868Ki
hugepages-2Mi: "0"
memory: 15399364Ki
pods: "110"
conditions:
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
lastTransitionTime: "2019-07-09T16:22:08Z"
message: containerd is functioning properly
reason: FrequentContainerdRestart
status: "False"
type: FrequentContainerdRestart
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
lastTransitionTime: "2019-07-09T16:22:06Z"
message: docker overlay2 is functioning properly
reason: CorruptDockerOverlay2
status: "False"
type: CorruptDockerOverlay2
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
lastTransitionTime: "2019-07-09T16:22:06Z"
message: node is functioning properly
reason: UnregisterNetDevice
status: "False"
type: FrequentUnregisterNetDevice
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
lastTransitionTime: "2019-07-09T16:17:04Z"
message: kernel has no deadlock
reason: KernelHasNoDeadlock
status: "False"
type: KernelDeadlock
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
lastTransitionTime: "2019-07-09T16:17:04Z"
message: Filesystem is not read-only
reason: FilesystemIsNotReadOnly
status: "False"
type: ReadonlyFilesystem
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
lastTransitionTime: "2019-07-09T16:22:05Z"
message: kubelet is functioning properly
reason: FrequentKubeletRestart
status: "False"
type: FrequentKubeletRestart
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
lastTransitionTime: "2019-07-09T16:22:06Z"
message: docker is functioning properly
reason: FrequentDockerRestart
status: "False"
type: FrequentDockerRestart
- lastHeartbeatTime: "2019-07-09T16:17:47Z"
lastTransitionTime: "2019-07-09T16:17:47Z"
message: RouteController created a route
reason: RouteCreated
status: "False"
type: NetworkUnavailable
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
lastTransitionTime: "2019-07-09T16:17:29Z"
message: kubelet has sufficient disk space available
reason: KubeletHasSufficientDisk
status: "False"
type: OutOfDisk
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
lastTransitionTime: "2019-07-09T16:17:29Z"
message: kubelet has sufficient memory available
reason: KubeletHasSufficientMemory
status: "False"
type: MemoryPressure
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
lastTransitionTime: "2019-07-09T16:17:29Z"
message: kubelet has no disk pressure
reason: KubeletHasNoDiskPressure
status: "False"
type: DiskPressure
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
lastTransitionTime: "2019-07-09T16:17:29Z"
message: kubelet has sufficient PID available
reason: KubeletHasSufficientPID
status: "False"
type: PIDPressure
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
lastTransitionTime: "2019-07-09T16:17:49Z"
message: kubelet is posting ready status. AppArmor enabled
reason: KubeletReady
status: "True"
type: Ready
daemonEndpoints:
kubeletEndpoint:
Port: 10250
images:
- names:
- grafana/grafana@sha256:80e5e113a984d74836aa16f5b4524012099436b1a50df293f00ac6377fb512c8
- grafana/grafana:4.4.2
sizeBytes: 287008013
- names:
- k8s.gcr.io/node-problem-detector@sha256:f95cab985c26b2f46e9bd43283e0bfa88860c14e0fb0649266babe8b65e9eb2b
- k8s.gcr.io/node-problem-detector:v0.4.1
sizeBytes: 286572743
- names:
- grafana/grafana@sha256:7ff7f9b2501a5d55b55ce3f58d21771b1c5af1f2a4ab7dbf11bef7142aae7033
- grafana/grafana:4.2.0
sizeBytes: 277940263
- names:
- influxdb@sha256:7dddf03376348876ed4bdf33d6dfa3326f45a2bae0930dbd80781a374eb519bc
- influxdb:1.2.2
sizeBytes: 223948571
- names:
- gcr.io/stackdriver-agents/stackdriver-logging-agent@sha256:f8d5231b67b9c53f60068b535a11811d29d1b3efd53d2b79f2a2591ea338e4f2
- gcr.io/stackdriver-agents/stackdriver-logging-agent:0.6-1.6.0-1
sizeBytes: 223242132
- names:
- nginx@sha256:35779791c05d119df4fe476db8f47c0bee5943c83eba5656a15fc046db48178b
- nginx:1.10.1
sizeBytes: 180708613
- names:
- k8s.gcr.io/fluentd-elasticsearch@sha256:b8c94527b489fb61d3d81ce5ad7f3ddbb7be71e9620a3a36e2bede2f2e487d73
- k8s.gcr.io/fluentd-elasticsearch:v2.0.4
sizeBytes: 135716379
- names:
- nginx@sha256:00be67d6ba53d5318cd91c57771530f5251cfbe028b7be2c4b70526f988cfc9f
- nginx:latest
sizeBytes: 109357355
- names:
- k8s.gcr.io/kubernetes-dashboard-amd64@sha256:dc4026c1b595435ef5527ca598e1e9c4343076926d7d62b365c44831395adbd0
- k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
sizeBytes: 102319441
- names:
- gcr.io/google_containers/kube-proxy:v1.11.10-gke.5
- k8s.gcr.io/kube-proxy:v1.11.10-gke.5
sizeBytes: 102279340
- names:
- k8s.gcr.io/event-exporter@sha256:7f9cd7cb04d6959b0aa960727d04fa86759008048c785397b7b0d9dff0007516
- k8s.gcr.io/event-exporter:v0.2.3
sizeBytes: 94171943
- names:
- k8s.gcr.io/prometheus-to-sd@sha256:6c0c742475363d537ff059136e5d5e4ab1f512ee0fd9b7ca42ea48bc309d1662
- k8s.gcr.io/prometheus-to-sd:v0.3.1
sizeBytes: 88077694
- names:
- k8s.gcr.io/fluentd-gcp-scaler@sha256:a5ace7506d393c4ed65eb2cbb6312c64ab357fcea16dff76b9055bc6e498e5ff
- k8s.gcr.io/fluentd-gcp-scaler:0.5.1
sizeBytes: 86637208
- names:
- k8s.gcr.io/heapster-amd64@sha256:9fae0af136ce0cf4f88393b3670f7139ffc464692060c374d2ae748e13144521
- k8s.gcr.io/heapster-amd64:v1.6.0-beta.1
sizeBytes: 76016169
- names:
- k8s.gcr.io/ingress-glbc-amd64@sha256:31d36bbd9c44caffa135fc78cf0737266fcf25e3cf0cd1c2fcbfbc4f7309cc52
- k8s.gcr.io/ingress-glbc-amd64:v1.1.1
sizeBytes: 67801919
- names:
- k8s.gcr.io/kube-addon-manager@sha256:d53486c3a0b49ebee019932878dc44232735d5622a51dbbdcec7124199020d09
- k8s.gcr.io/kube-addon-manager:v8.7
sizeBytes: 63322109
- names:
- nginx@sha256:4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315
- nginx:1.10-alpine
sizeBytes: 54042627
- names:
- k8s.gcr.io/cpvpa-amd64@sha256:cfe7b0a11c9c8e18c87b1eb34fef9a7cbb8480a8da11fc2657f78dbf4739f869
- k8s.gcr.io/cpvpa-amd64:v0.6.0
sizeBytes: 51785854
- names:
- k8s.gcr.io/cluster-proportional-autoscaler-amd64@sha256:003f98d9f411ddfa6ff6d539196355e03ddd69fa4ed38c7ffb8fec6f729afe2d
- k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2
sizeBytes: 49648481
- names:
- k8s.gcr.io/ip-masq-agent-amd64@sha256:1ffda57d87901bc01324c82ceb2145fe6a0448d3f0dd9cb65aa76a867cd62103
- k8s.gcr.io/ip-masq-agent-amd64:v2.1.1
sizeBytes: 49612505
- names:
- k8s.gcr.io/k8s-dns-kube-dns-amd64@sha256:b99fc3eee2a9f052f7eb4cc00f15eb12fc405fa41019baa2d6b79847ae7284a8
- k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10
sizeBytes: 49549457
- names:
- k8s.gcr.io/rescheduler@sha256:156cfbfd05a5a815206fd2eeb6cbdaf1596d71ea4b415d3a6c43071dd7b99450
- k8s.gcr.io/rescheduler:v0.4.0
sizeBytes: 48973149
- names:
- k8s.gcr.io/event-exporter@sha256:16ca66e2b5dc7a1ce6a5aafcb21d0885828b75cdfc08135430480f7ad2364adc
- k8s.gcr.io/event-exporter:v0.2.4
sizeBytes: 47261019
- names:
- k8s.gcr.io/coredns@sha256:db2bf53126ed1c761d5a41f24a1b82a461c85f736ff6e90542e9522be4757848
- k8s.gcr.io/coredns:1.1.3
sizeBytes: 45587362
- names:
- prom/prometheus@sha256:483f4c9d7733699ba79facca9f8bcce1cef1af43dfc3e7c5a1882aa85f53cb74
- prom/prometheus:v1.1.3
sizeBytes: 45493941
nodeInfo:
architecture: amd64
bootID: a32eca78-4ad4-4b76-9252-f143d6c2ae61
containerRuntimeVersion: docker://17.3.2
kernelVersion: 4.14.127+
kubeProxyVersion: v1.11.10-gke.5
kubeletVersion: v1.11.10-gke.5
machineID: 1739555e5b231057f0f9a0b5fa29511b
operatingSystem: linux
osImage: Container-Optimized OS from Google
systemUUID: 1739555E-5B23-1057-F0F9-A0B5FA29511B
volumesAttached:
- devicePath: /dev/disk/by-id/b9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049
name: kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049
- devicePath: /dev/disk/by-id/b9772-pvc-8895a852-fd42-11e6-94d4-42010a800049
name: kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049
- devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049
name: kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049
- devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049
name: kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049
- devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049
name: kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049
- devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283
name: kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283
volumesInUse:
- kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049
- kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049
- kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049
- kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283
- kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049
- kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049

View File

@ -0,0 +1,121 @@
apiVersion: v1
kind: Pod
metadata:
labels:
app: some-app
plugin1: some-value
plugin2: some-value
plugin3: some-value
plugin4: some-value
name: some-name
namespace: default
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: some-name
uid: 0a9d2b9e-779e-11e7-b422-42010a8001be
spec:
containers:
- args:
- one
- two
- three
- four
- five
- six
- seven
- eight
- nine
env:
- name: VAR_3
valueFrom:
secretKeyRef:
key: some-other-key
name: some-oher-name
- name: VAR_2
valueFrom:
secretKeyRef:
key: other-key
name: other-name
- name: VAR_1
valueFrom:
secretKeyRef:
key: some-key
name: some-name
image: some-image-name
imagePullPolicy: IfNotPresent
name: some-name
resources:
requests:
cpu: '0'
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: default-token-hu5jz
readOnly: true
dnsPolicy: ClusterFirst
nodeName: node-name
priority: 0
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- name: default-token-hu5jz
secret:
defaultMode: 420
secretName: default-token-hu5jz
status:
conditions:
- lastProbeTime: null
lastTransitionTime: '2019-07-08T09:31:18Z'
status: 'True'
type: Initialized
- lastProbeTime: null
lastTransitionTime: '2019-07-08T09:41:59Z'
status: 'True'
type: Ready
- lastProbeTime: null
lastTransitionTime: null
status: 'True'
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: '2019-07-08T09:31:18Z'
status: 'True'
type: PodScheduled
containerStatuses:
- containerID: docker://885e82a1ed0b7356541bb410a0126921ac42439607c09875cd8097dd5d7b5376
image: some-image-name
imageID: docker-pullable://some-image-id
lastState:
terminated:
containerID: docker://d57290f9e00fad626b20d2dd87a3cf69bbc22edae07985374f86a8b2b4e39565
exitCode: 255
finishedAt: '2019-07-08T09:39:09Z'
reason: Error
startedAt: '2019-07-08T09:38:54Z'
name: name
ready: true
restartCount: 6
state:
running:
startedAt: '2019-07-08T09:41:59Z'
hostIP: 10.0.0.1
phase: Running
podIP: 10.0.0.1
qosClass: BestEffort
startTime: '2019-07-08T09:31:18Z'

View File

@ -28,20 +28,29 @@ import (
"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager"
)
type fakeObjectCreater struct{}
type fakeObjectCreater struct {
gvk schema.GroupVersionKind
}
var _ runtime.ObjectCreater = &fakeObjectCreater{}
func (*fakeObjectCreater) New(_ schema.GroupVersionKind) (runtime.Object, error) {
return &unstructured.Unstructured{Object: map[string]interface{}{}}, nil
func (f *fakeObjectCreater) New(_ schema.GroupVersionKind) (runtime.Object, error) {
u := unstructured.Unstructured{Object: map[string]interface{}{}}
u.SetAPIVersion(f.gvk.GroupVersion().String())
u.SetKind(f.gvk.Kind)
return &u, nil
}
func TestNoUpdateBeforeFirstApply(t *testing.T) {
f := NewTestFieldManager()
f.fieldManager = fieldmanager.NewSkipNonAppliedManager(f.fieldManager, &fakeObjectCreater{}, schema.GroupVersionKind{})
f := NewTestFieldManager(schema.FromAPIVersionAndKind("v1", "Pod"))
f.fieldManager = fieldmanager.NewSkipNonAppliedManager(
f.fieldManager,
&fakeObjectCreater{gvk: schema.GroupVersionKind{Version: "v1", Kind: "Pod"}},
schema.GroupVersionKind{},
)
if err := f.Apply([]byte(`{
"apiVersion": "apps/v1",
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "pod",
@ -66,12 +75,18 @@ func TestNoUpdateBeforeFirstApply(t *testing.T) {
}
}
func TestUpateBeforeFirstApply(t *testing.T) {
f := NewTestFieldManager()
f.fieldManager = fieldmanager.NewSkipNonAppliedManager(f.fieldManager, &fakeObjectCreater{}, schema.GroupVersionKind{})
func TestUpdateBeforeFirstApply(t *testing.T) {
f := NewTestFieldManager(schema.FromAPIVersionAndKind("v1", "Pod"))
f.fieldManager = fieldmanager.NewSkipNonAppliedManager(
f.fieldManager,
&fakeObjectCreater{gvk: schema.GroupVersionKind{Version: "v1", Kind: "Pod"}},
schema.GroupVersionKind{},
)
updatedObj := &corev1.Pod{}
updatedObj.ObjectMeta.Labels = map[string]string{"app": "nginx"}
updatedObj.Kind = "Pod"
updatedObj.APIVersion = "v1"
updatedObj.ObjectMeta.Labels = map[string]string{"app": "my-nginx"}
if err := f.Update(updatedObj, "fieldmanager_test_update"); err != nil {
t.Fatalf("failed to update object: %v", err)
@ -82,7 +97,7 @@ func TestUpateBeforeFirstApply(t *testing.T) {
}
appliedBytes := []byte(`{
"apiVersion": "apps/v1",
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "pod",
@ -102,7 +117,7 @@ func TestUpateBeforeFirstApply(t *testing.T) {
t.Fatalf("Expecting to get one conflict but got %v", err)
}
if e, a := ".spec.containers", apiStatus.Status().Details.Causes[0].Field; e != a {
if e, a := ".metadata.labels.app", apiStatus.Status().Details.Causes[0].Field; e != a {
t.Fatalf("Expecting to conflict on field %q but conflicted on field %q: %v", e, a, err)
}