From a9ea98b3b9272a7f7788a0d37891e4b13b9be38d Mon Sep 17 00:00:00 2001 From: Andrea Nodari Date: Sat, 23 Jan 2021 18:50:14 +0100 Subject: [PATCH 1/6] Track ownership of deployments scale subresource --- .../apps/deployment/storage/storage.go | 25 + .../handlers/fieldmanager/scalehander.go | 160 ++++ .../handlers/fieldmanager/scalehander_test.go | 684 ++++++++++++++++++ .../integration/apiserver/apply/apply_test.go | 309 +++++++- 4 files changed, 1171 insertions(+), 7 deletions(-) create mode 100644 staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehander.go create mode 100644 staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehander_test.go diff --git a/pkg/registry/apps/deployment/storage/storage.go b/pkg/registry/apps/deployment/storage/storage.go index 8b2b48c6a54..409d946929a 100644 --- a/pkg/registry/apps/deployment/storage/storage.go +++ b/pkg/registry/apps/deployment/storage/storage.go @@ -25,6 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" @@ -54,6 +55,11 @@ type DeploymentStorage struct { Rollback *RollbackREST } +// maps a group version to the replicas path in a deployment object +var replicasPathInDeployment = fieldmanager.ResourcePathMappings{ + schema.GroupVersion{Group: "apps", Version: "v1"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), +} + // NewStorage returns new instance of DeploymentStorage. func NewStorage(optsGetter generic.RESTOptionsGetter) (DeploymentStorage, error) { deploymentRest, deploymentStatusRest, deploymentRollbackRest, err := NewREST(optsGetter) @@ -337,6 +343,7 @@ func scaleFromDeployment(deployment *apps.Deployment) (*autoscaling.Scale, error if err != nil { return nil, err } + return &autoscaling.Scale{ // TODO: Create a variant of ObjectMeta type that only contains the fields below. ObjectMeta: metav1.ObjectMeta{ @@ -376,11 +383,22 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti return nil, errors.NewNotFound(apps.Resource("deployments/scale"), i.name) } + managedFieldsHandler := fieldmanager.NewScaleHandler( + deployment.ManagedFields, + schema.GroupVersion{Group: "apps", Version: "v1"}, + replicasPathInDeployment, + ) + // deployment -> old scale oldScale, err := scaleFromDeployment(deployment) if err != nil { return nil, err } + scaleManagedFields, err := managedFieldsHandler.ToSubresource() + if err != nil { + return nil, err + } + oldScale.ManagedFields = scaleManagedFields // old scale -> new scale newScaleObj, err := i.reqObjInfo.UpdatedObject(ctx, oldScale) @@ -412,5 +430,12 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti // move replicas/resourceVersion fields to object and return deployment.Spec.Replicas = scale.Spec.Replicas deployment.ResourceVersion = scale.ResourceVersion + + updatedEntries, err := managedFieldsHandler.ToParent(scale.ManagedFields) + if err != nil { + return nil, err + } + deployment.ManagedFields = updatedEntries + return deployment, nil } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehander.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehander.go new file mode 100644 index 00000000000..d551a43cb59 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehander.go @@ -0,0 +1,160 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +var ( + scaleGroupVersion = schema.GroupVersion{Group: "autoscaling", Version: "v1"} + replicasPathInScale = fieldpath.MakePathOrDie("spec", "replicas") +) + +// ResourcePathMappings maps a group/version to its replicas path. The +// assumption is that all the paths correspond to leaf fields. +type ResourcePathMappings map[string]fieldpath.Path + +// ScaleHandler manages the conversion of managed fields between a main +// resource and the scale subresource +type ScaleHandler struct { + parentEntries []metav1.ManagedFieldsEntry + defaultGroupVersion schema.GroupVersion + mappings ResourcePathMappings +} + +// NewScaleHandler creates a new ScaleHandler +func NewScaleHandler(parentEntries []metav1.ManagedFieldsEntry, defaultGroupVersion schema.GroupVersion, mappings ResourcePathMappings) *ScaleHandler { + return &ScaleHandler{ + parentEntries: parentEntries, + defaultGroupVersion: defaultGroupVersion, + mappings: mappings, + } +} + +// ToSubresource filter the managed fields of the main resource and convert +// them so that they can be handled by scale. +// For the managed fields that have a replicas path it performs two changes: +// 1. APIVersion is changed to the APIVersion of the scale subresource +// 2. Replicas path of the main resource is transformed to the replicas path of +// the scale subresource +func (h *ScaleHandler) ToSubresource() ([]metav1.ManagedFieldsEntry, error) { + managed, err := DecodeManagedFields(h.parentEntries) + if err != nil { + return nil, err + } + + f := fieldpath.ManagedFields{} + t := map[string]*metav1.Time{} + for manager, versionedSet := range managed.Fields() { + path := h.mappings[string(versionedSet.APIVersion())] + if versionedSet.Set().Has(path) { + newVersionedSet := fieldpath.NewVersionedSet( + fieldpath.NewSet(replicasPathInScale), + fieldpath.APIVersion(scaleGroupVersion.String()), + versionedSet.Applied(), + ) + + f[manager] = newVersionedSet + t[manager] = managed.Times()[manager] + } + } + + return managedFieldsEntries(internal.NewManaged(f, t)) +} + +// ToParent merges `scaleEntries` with the entries of the main resource and +// transforms them accordingly +func (h *ScaleHandler) ToParent(scaleEntries []metav1.ManagedFieldsEntry) ([]metav1.ManagedFieldsEntry, error) { + decodedParentEntries, err := DecodeManagedFields(h.parentEntries) + if err != nil { + return nil, err + } + parentFields := decodedParentEntries.Fields() + + decodedScaleEntries, err := DecodeManagedFields(scaleEntries) + if err != nil { + return nil, err + } + scaleFields := decodedScaleEntries.Fields() + + f := fieldpath.ManagedFields{} + t := map[string]*metav1.Time{} + + for manager, versionedSet := range parentFields { + // Get the main resource "replicas" path + path := h.mappings[string(versionedSet.APIVersion())] + + // If the parent entry does not have the replicas path, just keep it as it is + if !versionedSet.Set().Has(path) { + f[manager] = versionedSet + t[manager] = decodedParentEntries.Times()[manager] + continue + } + + if _, ok := scaleFields[manager]; !ok { + // "Steal" the replicas path from the main resource entry + newSet := versionedSet.Set().Difference(fieldpath.NewSet(path)) + + if !newSet.Empty() { + newVersionedSet := fieldpath.NewVersionedSet( + newSet, + versionedSet.APIVersion(), + versionedSet.Applied(), + ) + f[manager] = newVersionedSet + t[manager] = decodedParentEntries.Times()[manager] + } + } else { + // Field wasn't stolen, let's keep the entry as it is. + f[manager] = versionedSet + t[manager] = decodedParentEntries.Times()[manager] + delete(scaleFields, manager) + } + } + + for manager, versionedSet := range scaleFields { + newVersionedSet := fieldpath.NewVersionedSet( + fieldpath.NewSet(h.mappings[h.defaultGroupVersion.String()]), + fieldpath.APIVersion(h.defaultGroupVersion.String()), + versionedSet.Applied(), + ) + f[manager] = newVersionedSet + t[manager] = decodedParentEntries.Times()[manager] + } + + return managedFieldsEntries(internal.NewManaged(f, t)) +} + +func managedFieldsEntries(entries internal.ManagedInterface) ([]metav1.ManagedFieldsEntry, error) { + obj := &unstructured.Unstructured{Object: map[string]interface{}{}} + if err := internal.EncodeObjectManagedFields(obj, entries); err != nil { + return nil, err + } + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + return accessor.GetManagedFields(), nil +} diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehander_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehander_test.go new file mode 100644 index 00000000000..6ea8a8c7649 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehander_test.go @@ -0,0 +1,684 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "reflect" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +func TestTransformManagedFieldsToSubresource(t *testing.T) { + testTime, _ := time.ParseInLocation("2006-Jan-02", "2013-Feb-03", time.Local) + managedFieldTime := v1.NewTime(testTime) + + tests := []struct { + desc string + input []metav1.ManagedFieldsEntry + expected []metav1.ManagedFieldsEntry + }{ + { + desc: "filter one entry and transform it into a subresource entry", + input: []metav1.ManagedFieldsEntry{ + { + Manager: "manager-1", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:another-field":{}}}`)}, + }, + { + Manager: "manager-2", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Time: &managedFieldTime, + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "manager-2", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Time: &managedFieldTime, + }, + }, + }, + { + desc: "transform all entries", + input: []metav1.ManagedFieldsEntry{ + { + Manager: "manager-1", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + { + Manager: "manager-2", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + { + Manager: "manager-3", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "manager-1", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + { + Manager: "manager-2", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + { + Manager: "manager-3", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + }, + } + + for _, test := range tests { + handler := NewScaleHandler( + test.input, + schema.GroupVersion{Group: "apps", Version: "v1"}, + defaultMappings(), + ) + subresourceEntries, err := handler.ToSubresource() + if err != nil { + t.Fatalf("test %q - expected no error but got %v", test.desc, err) + } + + if !reflect.DeepEqual(subresourceEntries, test.expected) { + t.Fatalf("test %q - expected output to be:\n%v\n\nbut got:\n%v", test.desc, test.expected, subresourceEntries) + } + } +} + +func TestTransformingManagedFieldsToParent(t *testing.T) { + tests := []struct { + desc string + parent []metav1.ManagedFieldsEntry + subresource []metav1.ManagedFieldsEntry + expected []metav1.ManagedFieldsEntry + }{ + { + desc: "different-managers: apply -> update", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{},"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + }, + { + desc: "different-managers: apply -> apply", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{},"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + }, + }, + { + desc: "different-managers: update -> update", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{},"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + }, + }, + { + desc: "different-managers: update -> apply", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{},"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + }, + }, + { + desc: "same manager: apply -> apply", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{},"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + }, + { + desc: "same manager: update -> update", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{},"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + }, + { + desc: "same manager: update -> apply", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{},"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + }, + }, + { + desc: "same manager: apply -> update", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{},"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + }, + { + desc: "subresource doesn't own the path anymore", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{},"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:another":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + }, + { + desc: "Subresource steals all the fields of the parent resource", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + }, + { + desc: "apply without stealing", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{},"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{},"f:selector":{}}}`)}, + }, + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + handler := NewScaleHandler( + test.parent, + schema.GroupVersion{Group: "apps", Version: "v1"}, + defaultMappings(), + ) + parentEntries, err := handler.ToParent(test.subresource) + if err != nil { + t.Fatalf("test: %q - expected no error but got %v", test.desc, err) + } + if !reflect.DeepEqual(parentEntries, test.expected) { + t.Fatalf("test: %q - expected output to be:\n%v\n\nbut got:\n%v", test.desc, test.expected, parentEntries) + } + }) + } +} + +func TestTransformingManagedFieldsToParentMultiVersion(t *testing.T) { + tests := []struct { + desc string + mappings ResourcePathMappings + parent []metav1.ManagedFieldsEntry + subresource []metav1.ManagedFieldsEntry + expected []metav1.ManagedFieldsEntry + }{ + { + desc: "multi-version", + mappings: ResourcePathMappings{ + "apps/v1": fieldpath.MakePathOrDie("spec", "the-replicas"), + "apps/v2": fieldpath.MakePathOrDie("spec", "not-the-replicas"), + }, + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:the-replicas":{},"f:selector":{}}}`)}, + }, + { + Manager: "test-other", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v2", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:not-the-replicas":{},"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + { + Manager: "test-other", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v2", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:the-replicas":{}}}`)}, + Subresource: "scale", + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + handler := NewScaleHandler( + test.parent, + schema.GroupVersion{Group: "apps", Version: "v1"}, + test.mappings, + ) + parentEntries, err := handler.ToParent(test.subresource) + if err != nil { + t.Fatalf("test: %q - expected no error but got %v", test.desc, err) + } + if !reflect.DeepEqual(parentEntries, test.expected) { + t.Fatalf("test: %q - expected output to be:\n%v\n\nbut got:\n%v", test.desc, test.expected, parentEntries) + } + }) + } +} + +func defaultMappings() ResourcePathMappings { + return ResourcePathMappings{ + "apps/v1": fieldpath.MakePathOrDie("spec", "replicas"), + } +} diff --git a/test/integration/apiserver/apply/apply_test.go b/test/integration/apiserver/apply/apply_test.go index 71622b637af..f9859233d2e 100644 --- a/test/integration/apiserver/apply/apply_test.go +++ b/test/integration/apiserver/apply/apply_test.go @@ -3395,9 +3395,9 @@ func TestSubresourceField(t *testing.T) { AbsPath("/apis/apps/v1"). Namespace("default"). Resource("deployments"). - SubResource("status"). + SubResource("scale"). Name("deployment"). - Body([]byte(`{"status":{"unavailableReplicas":32}}`)). + Body([]byte(`{"spec":{"replicas":32}}`)). Param("fieldManager", "manager"). Do(context.TODO()). Get() @@ -3405,8 +3405,6 @@ func TestSubresourceField(t *testing.T) { t.Fatalf("Failed to update status: %v", err) } - // TODO (nodo): add test for "scale" once we start tracking managed fields (#82046) - deployment, err := client.AppsV1().Deployments("default").Get(context.TODO(), "deployment", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get object: %v", err) @@ -3414,15 +3412,312 @@ func TestSubresourceField(t *testing.T) { managedFields := deployment.GetManagedFields() if len(managedFields) != 2 { - t.Fatalf("Expected object to have 3 managed fields entries, got: %d", len(managedFields)) + t.Fatalf("Expected object to have 2 managed fields entries, got: %d", len(managedFields)) } if managedFields[0].Manager != "manager" || managedFields[0].Operation != "Apply" || managedFields[0].Subresource != "" { t.Fatalf(`Unexpected entry, got: %v`, managedFields[0]) } if managedFields[1].Manager != "manager" || managedFields[1].Operation != "Update" || - managedFields[1].Subresource != "status" || - string(managedFields[1].FieldsV1.Raw) != `{"f:status":{"f:unavailableReplicas":{}}}` { + managedFields[1].Subresource != "scale" || + string(managedFields[1].FieldsV1.Raw) != `{"f:spec":{"f:replicas":{}}}` { t.Fatalf(`Unexpected entry, got: %v`, managedFields[1]) } } + +func TestApplyOnScaleDeployment(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)() + + _, client, closeFn := setup(t) + defer closeFn() + + validDeployment := []byte(`{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": "deployment" + }, + "spec": { + "replicas": 1, + "selector": { + "matchLabels": { + "app": "nginx" + } + }, + "template": { + "metadata": { + "labels": { + "app": "nginx" + } + }, + "spec": { + "containers": [{ + "name": "nginx", + "image": "nginx:latest" + }] + } + } + } + }`) + + // Create deployment + _, err := client.CoreV1().RESTClient().Patch(types.ApplyPatchType). + AbsPath("/apis/apps/v1"). + Namespace("default"). + Resource("deployments"). + Name("deployment"). + Param("fieldManager", "apply_test"). + Body(validDeployment). + Do(context.TODO()). + Get() + if err != nil { + t.Fatalf("Failed to create object using Apply patch: %v", err) + } + deployment, err := client.AppsV1().Deployments("default").Get(context.TODO(), "deployment", metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to retrieve object: %v", err) + } + if *deployment.Spec.Replicas != 1 { + t.Fatalf("Expected replicas to be 1, but got: %d", *deployment.Spec.Replicas) + } + + // Call scale subresource to update replicas + _, err = client.CoreV1().RESTClient(). + Patch(types.MergePatchType). + AbsPath("/apis/apps/v1"). + Name("deployment"). + Resource("deployments"). + SubResource("scale"). + Namespace("default"). + Param("fieldManager", "scale_test"). + Body([]byte(`{"spec":{"replicas": 5}}`)). + Do(context.TODO()). + Get() + if err != nil { + t.Fatalf("Error updating scale subresource: %v ", err) + } + + deployment, err = client.AppsV1().Deployments("default").Get(context.TODO(), "deployment", metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to retrieve object: %v", err) + } + + if *deployment.Spec.Replicas != 5 { + t.Fatalf("Expected replicas to be 5, but got: %d", *deployment.Spec.Replicas) + } + + assertReplicasOwnership(t, (*deployment).GetManagedFields(), "scale_test") + + // Re-apply the original object, it should fail with conflict because replicas have changed + _, err = client.CoreV1().RESTClient().Patch(types.ApplyPatchType). + AbsPath("/apis/apps/v1"). + Namespace("default"). + Resource("deployments"). + Name("deployment"). + Param("fieldManager", "apply_test"). + Body(validDeployment). + Do(context.TODO()). + Get() + if !apierrors.IsConflict(err) { + t.Fatalf("Expected conflict error but got: %v", err) + } + + // Re-apply forcing the changes should succeed + _, err = client.CoreV1().RESTClient().Patch(types.ApplyPatchType). + AbsPath("/apis/apps/v1"). + Namespace("default"). + Resource("deployments"). + Name("deployment"). + Param("fieldManager", "apply_test"). + Param("force", "true"). + Body(validDeployment). + Do(context.TODO()). + Get() + if err != nil { + t.Fatalf("Error updating deployment: %v ", err) + } + + deployment, err = client.AppsV1().Deployments("default").Get(context.TODO(), "deployment", metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to retrieve object: %v", err) + } + + if *deployment.Spec.Replicas != 1 { + t.Fatalf("Expected replicas to be 1, but got: %d", *deployment.Spec.Replicas) + } + + assertReplicasOwnership(t, (*deployment).GetManagedFields(), "apply_test") + + // Run "Apply" with a scale object with a different number of replicas. It should generate a conflict. + _, err = client.CoreV1().RESTClient(). + Patch(types.ApplyPatchType). + AbsPath("/apis/apps/v1"). + Namespace("default"). + Resource("deployments"). + SubResource("scale"). + Name("deployment"). + Param("fieldManager", "apply_scale"). + Body([]byte(`{"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"deployment","namespace":"default"},"spec":{"replicas":17}}`)). + Do(context.TODO()). + Get() + if !apierrors.IsConflict(err) { + t.Fatalf("Expected conflict error but got: %v", err) + } + if !strings.Contains(err.Error(), "apply_test") { + t.Fatalf("Expected conflict with `apply_test` manager but got: %v", err) + } + + // Same as before but force. Only the new manager should own .spec.replicas + _, err = client.CoreV1().RESTClient(). + Patch(types.ApplyPatchType). + AbsPath("/apis/apps/v1"). + Namespace("default"). + Resource("deployments"). + SubResource("scale"). + Name("deployment"). + Param("fieldManager", "apply_scale"). + Param("force", "true"). + Body([]byte(`{"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"deployment","namespace":"default"},"spec":{"replicas":17}}`)). + Do(context.TODO()). + Get() + if err != nil { + t.Fatalf("Error updating deployment: %v ", err) + } + + deployment, err = client.AppsV1().Deployments("default").Get(context.TODO(), "deployment", metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to retrieve object: %v", err) + } + + if *deployment.Spec.Replicas != 17 { + t.Fatalf("Expected to replicas to be 17, but got: %d", *deployment.Spec.Replicas) + } + + assertReplicasOwnership(t, (*deployment).GetManagedFields(), "apply_scale") + + // Replace scale object + _, err = client.CoreV1().RESTClient(). + Put(). + AbsPath("/apis/apps/v1"). + Namespace("default"). + Resource("deployments"). + SubResource("scale"). + Name("deployment"). + Param("fieldManager", "replace_test"). + Body([]byte(`{"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"deployment","namespace":"default"},"spec":{"replicas":7}}`)). + Do(context.TODO()). + Get() + if err != nil { + t.Fatalf("Error updating deployment: %v ", err) + } + + deployment, err = client.AppsV1().Deployments("default").Get(context.TODO(), "deployment", metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to retrieve object: %v", err) + } + + if *deployment.Spec.Replicas != 7 { + t.Fatalf("Expected to replicas to be 7, but got: %d", *deployment.Spec.Replicas) + } + + assertReplicasOwnership(t, (*deployment).GetManagedFields(), "replace_test") + + // Apply the same number of replicas, both managers should own the field + _, err = client.CoreV1().RESTClient(). + Patch(types.ApplyPatchType). + AbsPath("/apis/apps/v1"). + Namespace("default"). + Resource("deployments"). + SubResource("scale"). + Name("deployment"). + Param("fieldManager", "co_owning_test"). + Param("force", "true"). + Body([]byte(`{"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"deployment","namespace":"default"},"spec":{"replicas":7}}`)). + Do(context.TODO()). + Get() + if err != nil { + t.Fatalf("Error updating deployment: %v ", err) + } + + deployment, err = client.AppsV1().Deployments("default").Get(context.TODO(), "deployment", metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to retrieve object: %v", err) + } + + if *deployment.Spec.Replicas != 7 { + t.Fatalf("Expected to replicas to be 7, but got: %d", *deployment.Spec.Replicas) + } + + assertReplicasOwnership(t, (*deployment).GetManagedFields(), "replace_test", "co_owning_test") + + // Scaling again should make this manager the only owner of replicas + _, err = client.CoreV1().RESTClient(). + Patch(types.MergePatchType). + AbsPath("/apis/apps/v1"). + Name("deployment"). + Resource("deployments"). + SubResource("scale"). + Namespace("default"). + Param("fieldManager", "scale_test"). + Body([]byte(`{"spec":{"replicas": 5}}`)). + Do(context.TODO()). + Get() + if err != nil { + t.Fatalf("Error updating scale subresource: %v ", err) + } + + deployment, err = client.AppsV1().Deployments("default").Get(context.TODO(), "deployment", metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to retrieve object: %v", err) + } + + if *deployment.Spec.Replicas != 5 { + t.Fatalf("Expected replicas to be 5, but got: %d", *deployment.Spec.Replicas) + } + + assertReplicasOwnership(t, (*deployment).GetManagedFields(), "scale_test") +} + +func assertReplicasOwnership(t *testing.T, managedFields []metav1.ManagedFieldsEntry, fieldManagers ...string) { + t.Helper() + + seen := make(map[string]bool) + for _, m := range fieldManagers { + seen[m] = false + } + + for _, managedField := range managedFields { + var entryJSON map[string]interface{} + if err := json.Unmarshal(managedField.FieldsV1.Raw, &entryJSON); err != nil { + t.Fatalf("failed to read into json") + } + + spec, ok := entryJSON["f:spec"].(map[string]interface{}) + if !ok { + // continue with the next managedField, as we this field does not hold the spec entry + continue + } + + if _, ok := spec["f:replicas"]; !ok { + // continue with the next managedField, as we this field does not hold the spec.replicas entry + continue + } + + // check if the manager is one of the ones we expect + if _, ok := seen[managedField.Manager]; !ok { + t.Fatalf("Unexpected field manager, found %q, expected to be in: %v", managedField.Manager, seen) + } + + seen[managedField.Manager] = true + } + + var missingManagers []string + for manager, managerSeen := range seen { + if !managerSeen { + missingManagers = append(missingManagers, manager) + } + } + if len(missingManagers) > 0 { + t.Fatalf("replicas fields should be owned by %v", missingManagers) + } +} From 816e80206c169006de9d0a76cd385ee31c5aff39 Mon Sep 17 00:00:00 2001 From: Antoine Pelisse Date: Thu, 11 Mar 2021 11:05:05 -0800 Subject: [PATCH 2/6] Use ScaleHandler for all scalable resources --- .../apps/replicaset/storage/storage.go | 25 ++ .../apps/statefulset/storage/storage.go | 24 ++ .../replicationcontroller/storage/storage.go | 24 ++ .../pkg/apiserver/conversion/converter.go | 7 + .../pkg/apiserver/customresource_handler.go | 37 +- .../pkg/registry/customresource/etcd.go | 81 ++-- .../pkg/registry/customresource/etcd_test.go | 2 + .../test/integration/subresources_test.go | 103 +++++ .../{scalehander.go => scalehandler.go} | 0 ...alehander_test.go => scalehandler_test.go} | 0 .../handlers/fieldmanager/structuredmerge.go | 2 +- .../integration/apiserver/apply/apply_test.go | 297 -------------- .../integration/apiserver/apply/scale_test.go | 371 ++++++++++++++++++ 13 files changed, 647 insertions(+), 326 deletions(-) rename staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/{scalehander.go => scalehandler.go} (100%) rename staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/{scalehander_test.go => scalehandler_test.go} (100%) create mode 100644 test/integration/apiserver/apply/scale_test.go diff --git a/pkg/registry/apps/replicaset/storage/storage.go b/pkg/registry/apps/replicaset/storage/storage.go index be129c7ea82..e16a4664608 100644 --- a/pkg/registry/apps/replicaset/storage/storage.go +++ b/pkg/registry/apps/replicaset/storage/storage.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" @@ -50,6 +51,11 @@ type ReplicaSetStorage struct { Scale *ScaleREST } +// maps a group version to the replicas path in a replicaset object +var replicasPathInReplicaSet = fieldmanager.ResourcePathMappings{ + schema.GroupVersion{Group: "apps", Version: "v1"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), +} + // NewStorage returns new instance of ReplicaSetStorage. func NewStorage(optsGetter generic.RESTOptionsGetter) (ReplicaSetStorage, error) { replicaSetRest, replicaSetStatusRest, err := NewREST(optsGetter) @@ -279,12 +285,24 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti return nil, errors.NewNotFound(apps.Resource("replicasets/scale"), i.name) } + managedFieldsHandler := fieldmanager.NewScaleHandler( + replicaset.ManagedFields, + schema.GroupVersion{Group: "apps", Version: "v1"}, + replicasPathInReplicaSet, + ) + // replicaset -> old scale oldScale, err := scaleFromReplicaSet(replicaset) if err != nil { return nil, err } + scaleManagedFields, err := managedFieldsHandler.ToSubresource() + if err != nil { + return nil, err + } + oldScale.ManagedFields = scaleManagedFields + // old scale -> new scale newScaleObj, err := i.reqObjInfo.UpdatedObject(ctx, oldScale) if err != nil { @@ -315,5 +333,12 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti // move replicas/resourceVersion fields to object and return replicaset.Spec.Replicas = scale.Spec.Replicas replicaset.ResourceVersion = scale.ResourceVersion + + updatedEntries, err := managedFieldsHandler.ToParent(scale.ManagedFields) + if err != nil { + return nil, err + } + replicaset.ManagedFields = updatedEntries + return replicaset, nil } diff --git a/pkg/registry/apps/statefulset/storage/storage.go b/pkg/registry/apps/statefulset/storage/storage.go index 0afa6b69098..50b9954784c 100644 --- a/pkg/registry/apps/statefulset/storage/storage.go +++ b/pkg/registry/apps/statefulset/storage/storage.go @@ -24,6 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" @@ -47,6 +48,11 @@ type StatefulSetStorage struct { Scale *ScaleREST } +// maps a group version to the replicas path in a statefulset object +var replicasPathInStatefulSet = fieldmanager.ResourcePathMappings{ + schema.GroupVersion{Group: "apps", Version: "v1"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), +} + // NewStorage returns new instance of StatefulSetStorage. func NewStorage(optsGetter generic.RESTOptionsGetter) (StatefulSetStorage, error) { statefulSetRest, statefulSetStatusRest, err := NewREST(optsGetter) @@ -265,11 +271,22 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti return nil, errors.NewNotFound(apps.Resource("statefulsets/scale"), i.name) } + managedFieldsHandler := fieldmanager.NewScaleHandler( + statefulset.ManagedFields, + schema.GroupVersion{Group: "apps", Version: "v1"}, + replicasPathInStatefulSet, + ) + // statefulset -> old scale oldScale, err := scaleFromStatefulSet(statefulset) if err != nil { return nil, err } + scaleManagedFields, err := managedFieldsHandler.ToSubresource() + if err != nil { + return nil, err + } + oldScale.ManagedFields = scaleManagedFields // old scale -> new scale newScaleObj, err := i.reqObjInfo.UpdatedObject(ctx, oldScale) @@ -301,5 +318,12 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti // move replicas/resourceVersion fields to object and return statefulset.Spec.Replicas = scale.Spec.Replicas statefulset.ResourceVersion = scale.ResourceVersion + + updatedEntries, err := managedFieldsHandler.ToParent(scale.ManagedFields) + if err != nil { + return nil, err + } + statefulset.ManagedFields = updatedEntries + return statefulset, nil } diff --git a/pkg/registry/core/replicationcontroller/storage/storage.go b/pkg/registry/core/replicationcontroller/storage/storage.go index 8ae12247f18..d8457e54a6c 100644 --- a/pkg/registry/core/replicationcontroller/storage/storage.go +++ b/pkg/registry/core/replicationcontroller/storage/storage.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" @@ -49,6 +50,11 @@ type ControllerStorage struct { Scale *ScaleREST } +// maps a group version to the replicas path in a deployment object +var replicasPathInReplicationController = fieldmanager.ResourcePathMappings{ + schema.GroupVersion{Group: "", Version: "v1"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), +} + func NewStorage(optsGetter generic.RESTOptionsGetter) (ControllerStorage, error) { controllerREST, statusREST, err := NewREST(optsGetter) if err != nil { @@ -239,8 +245,19 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti return nil, errors.NewNotFound(api.Resource("replicationcontrollers/scale"), i.name) } + managedFieldsHandler := fieldmanager.NewScaleHandler( + replicationcontroller.ManagedFields, + schema.GroupVersion{Group: "", Version: "v1"}, + replicasPathInReplicationController, + ) + // replicationcontroller -> old scale oldScale := scaleFromRC(replicationcontroller) + scaleManagedFields, err := managedFieldsHandler.ToSubresource() + if err != nil { + return nil, err + } + oldScale.ManagedFields = scaleManagedFields // old scale -> new scale newScaleObj, err := i.reqObjInfo.UpdatedObject(ctx, oldScale) @@ -272,5 +289,12 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti // move replicas/resourceVersion fields to object and return replicationcontroller.Spec.Replicas = scale.Spec.Replicas replicationcontroller.ResourceVersion = scale.ResourceVersion + + updatedEntries, err := managedFieldsHandler.ToParent(scale.ManagedFields) + if err != nil { + return nil, err + } + replicationcontroller.ManagedFields = updatedEntries + return replicationcontroller, nil } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/converter.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/converter.go index 23564762c75..d7487a495c6 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/converter.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/converter.go @@ -165,6 +165,13 @@ func (c *crConverter) ConvertToVersion(in runtime.Object, target runtime.GroupVe // TODO: should this be a typed error? return nil, fmt.Errorf("%v is unstructured and is not suitable for converting to %q", fromGVK.String(), target) } + // Special-case typed scale conversion if this custom resource supports a scale endpoint + if c.convertScale { + if _, isInScale := in.(*autoscalingv1.Scale); isInScale { + return typedscheme.Scheme.ConvertToVersion(in, target) + } + } + if !c.validVersions[toGVK.GroupVersion()] { return nil, fmt.Errorf("request to convert CR to an invalid group/version: %s", toGVK.GroupVersion().String()) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go index efd6487f03d..d3585429b5f 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go @@ -699,6 +699,27 @@ func (r *crdHandler) getOrCreateServingInfoFor(uid types.UID, name string) (*crd if err != nil { return nil, err } + + // Create replicasPathInCustomResource + replicasPathInCustomResource := fieldmanager.ResourcePathMappings{} + for _, v := range crd.Spec.Versions { + subresources, err := apiextensionshelpers.GetSubresourcesForVersion(crd, v.Name) + if err != nil { + utilruntime.HandleError(err) + return nil, fmt.Errorf("the server could not properly serve the CR subresources") + } + if subresources == nil || subresources.Scale == nil { + continue + } + path := fieldpath.Path{} + splitReplicasPath := strings.Split(strings.TrimPrefix(subresources.Scale.SpecReplicasPath, "."), ".") + for _, element := range splitReplicasPath { + s := element + path = append(path, fieldpath.PathElement{FieldName: &s}) + } + replicasPathInCustomResource[schema.GroupVersion{Group: crd.Spec.Group, Version: v.Name}.String()] = path + } + for _, v := range crd.Spec.Versions { // In addition to Unstructured objects (Custom Resources), we also may sometimes need to // decode unversioned Options objects, so we delegate to parameterScheme for such types. @@ -803,6 +824,7 @@ func (r *crdHandler) getOrCreateServingInfoFor(uid types.UID, name string) (*crd }, crd.Status.AcceptedNames.Categories, table, + replicasPathInCustomResource, ) selfLinkPrefix := "" @@ -892,8 +914,19 @@ func (r *crdHandler) getOrCreateServingInfoFor(uid types.UID, name string) (*crd SelfLinkPathPrefix: selfLinkPrefix, SelfLinkPathSuffix: "/scale", } - // TODO(issues.k8s.io/82046): We can't effectively track ownership on scale requests yet. - scaleScope.FieldManager = nil + + if utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) && subresources != nil && subresources.Scale != nil { + scaleScope, err = scopeWithFieldManager( + typeConverter, + scaleScope, + nil, + "scale", + ) + if err != nil { + return nil, err + } + } + scaleScopes[v.Name] = &scaleScope // override status subresource values diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd.go index 2b51d64c3c0..bfbc8b8b14f 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" @@ -41,7 +42,7 @@ type CustomResourceStorage struct { Scale *ScaleREST } -func NewStorage(resource schema.GroupResource, kind, listKind schema.GroupVersionKind, strategy customResourceStrategy, optsGetter generic.RESTOptionsGetter, categories []string, tableConvertor rest.TableConvertor) CustomResourceStorage { +func NewStorage(resource schema.GroupResource, kind, listKind schema.GroupVersionKind, strategy customResourceStrategy, optsGetter generic.RESTOptionsGetter, categories []string, tableConvertor rest.TableConvertor, replicasPathMapping fieldmanager.ResourcePathMappings) CustomResourceStorage { customResourceREST, customResourceStatusREST := newREST(resource, kind, listKind, strategy, optsGetter, categories, tableConvertor) s := CustomResourceStorage{ @@ -59,10 +60,12 @@ func NewStorage(resource schema.GroupResource, kind, listKind schema.GroupVersio } s.Scale = &ScaleREST{ - store: customResourceREST.Store, - specReplicasPath: scale.SpecReplicasPath, - statusReplicasPath: scale.StatusReplicasPath, - labelSelectorPath: labelSelectorPath, + store: customResourceREST.Store, + specReplicasPath: scale.SpecReplicasPath, + statusReplicasPath: scale.StatusReplicasPath, + labelSelectorPath: labelSelectorPath, + parentGV: kind.GroupVersion(), + replicasPathMapping: replicasPathMapping, } } @@ -209,10 +212,12 @@ func (r *StatusREST) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set { } type ScaleREST struct { - store *genericregistry.Store - specReplicasPath string - statusReplicasPath string - labelSelectorPath string + store *genericregistry.Store + specReplicasPath string + statusReplicasPath string + labelSelectorPath string + parentGV schema.GroupVersion + replicasPathMapping fieldmanager.ResourcePathMappings } // ScaleREST implements Patcher @@ -247,10 +252,12 @@ func (r *ScaleREST) Get(ctx context.Context, name string, options *metav1.GetOpt func (r *ScaleREST) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) { scaleObjInfo := &scaleUpdatedObjectInfo{ - reqObjInfo: objInfo, - specReplicasPath: r.specReplicasPath, - labelSelectorPath: r.labelSelectorPath, - statusReplicasPath: r.statusReplicasPath, + reqObjInfo: objInfo, + specReplicasPath: r.specReplicasPath, + labelSelectorPath: r.labelSelectorPath, + statusReplicasPath: r.statusReplicasPath, + parentGV: r.parentGV, + replicasPathMapping: r.replicasPathMapping, } obj, _, err := r.store.Update( @@ -299,19 +306,22 @@ func toScaleUpdateValidation(f rest.ValidateObjectUpdateFunc, specReplicasPath, } } +// Split the path per period, ignoring the leading period. +func splitReplicasPath(replicasPath string) []string { + return strings.Split(strings.TrimPrefix(replicasPath, "."), ".") +} + // scaleFromCustomResource returns a scale subresource for a customresource and a bool signalling wether // the specReplicas value was found. func scaleFromCustomResource(cr *unstructured.Unstructured, specReplicasPath, statusReplicasPath, labelSelectorPath string) (*autoscalingv1.Scale, bool, error) { - specReplicasPath = strings.TrimPrefix(specReplicasPath, ".") // ignore leading period - specReplicas, foundSpecReplicas, err := unstructured.NestedInt64(cr.UnstructuredContent(), strings.Split(specReplicasPath, ".")...) + specReplicas, foundSpecReplicas, err := unstructured.NestedInt64(cr.UnstructuredContent(), splitReplicasPath(specReplicasPath)...) if err != nil { return nil, false, err } else if !foundSpecReplicas { specReplicas = 0 } - statusReplicasPath = strings.TrimPrefix(statusReplicasPath, ".") // ignore leading period - statusReplicas, found, err := unstructured.NestedInt64(cr.UnstructuredContent(), strings.Split(statusReplicasPath, ".")...) + statusReplicas, found, err := unstructured.NestedInt64(cr.UnstructuredContent(), splitReplicasPath(statusReplicasPath)...) if err != nil { return nil, false, err } else if !found { @@ -320,8 +330,7 @@ func scaleFromCustomResource(cr *unstructured.Unstructured, specReplicasPath, st var labelSelector string if len(labelSelectorPath) > 0 { - labelSelectorPath = strings.TrimPrefix(labelSelectorPath, ".") // ignore leading period - labelSelector, _, err = unstructured.NestedString(cr.UnstructuredContent(), strings.Split(labelSelectorPath, ".")...) + labelSelector, _, err = unstructured.NestedString(cr.UnstructuredContent(), splitReplicasPath(labelSelectorPath)...) if err != nil { return nil, false, err } @@ -353,10 +362,12 @@ func scaleFromCustomResource(cr *unstructured.Unstructured, specReplicasPath, st } type scaleUpdatedObjectInfo struct { - reqObjInfo rest.UpdatedObjectInfo - specReplicasPath string - statusReplicasPath string - labelSelectorPath string + reqObjInfo rest.UpdatedObjectInfo + specReplicasPath string + statusReplicasPath string + labelSelectorPath string + parentGV schema.GroupVersion + replicasPathMapping fieldmanager.ResourcePathMappings } func (i *scaleUpdatedObjectInfo) Preconditions() *metav1.Preconditions { @@ -366,6 +377,13 @@ func (i *scaleUpdatedObjectInfo) Preconditions() *metav1.Preconditions { func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runtime.Object) (runtime.Object, error) { cr := oldObj.DeepCopyObject().(*unstructured.Unstructured) const invalidSpecReplicas = -2147483648 // smallest int32 + + managedFieldsHandler := fieldmanager.NewScaleHandler( + cr.GetManagedFields(), + i.parentGV, + i.replicasPathMapping, + ) + oldScale, replicasFound, err := scaleFromCustomResource(cr, i.specReplicasPath, i.statusReplicasPath, i.labelSelectorPath) if err != nil { return nil, err @@ -374,6 +392,12 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti oldScale.Spec.Replicas = invalidSpecReplicas // signal that this was not set before } + scaleManagedFields, err := managedFieldsHandler.ToSubresource() + if err != nil { + return nil, err + } + oldScale.ManagedFields = scaleManagedFields + obj, err := i.reqObjInfo.UpdatedObject(ctx, oldScale) if err != nil { return nil, err @@ -391,9 +415,7 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti return nil, apierrors.NewBadRequest(fmt.Sprintf("the spec replicas field %q cannot be empty", i.specReplicasPath)) } - specReplicasPath := strings.TrimPrefix(i.specReplicasPath, ".") // ignore leading period - - if err := unstructured.SetNestedField(cr.Object, int64(scale.Spec.Replicas), strings.Split(specReplicasPath, ".")...); err != nil { + if err := unstructured.SetNestedField(cr.Object, int64(scale.Spec.Replicas), splitReplicasPath(i.specReplicasPath)...); err != nil { return nil, err } if len(scale.ResourceVersion) != 0 { @@ -401,5 +423,12 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti // Set that precondition and return any conflict errors to the client. cr.SetResourceVersion(scale.ResourceVersion) } + + updatedEntries, err := managedFieldsHandler.ToParent(scale.ManagedFields) + if err != nil { + return nil, err + } + cr.SetManagedFields(updatedEntries) + return cr, nil } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd_test.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd_test.go index 8612eeeb31f..d2d3125653b 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd_test.go @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/generic" registrytest "k8s.io/apiserver/pkg/registry/generic/testing" @@ -106,6 +107,7 @@ func newStorage(t *testing.T) (customresource.CustomResourceStorage, *etcd3testi restOptions, []string{"all"}, table, + fieldmanager.ResourcePathMappings{}, ) return storage, server diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/subresources_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/subresources_test.go index 731291acc4d..b773100038f 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/subresources_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/subresources_test.go @@ -33,8 +33,10 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apiserver/pkg/features" + genericfeatures "k8s.io/apiserver/pkg/features" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/dynamic" + featuregatetesting "k8s.io/component-base/featuregate/testing" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" @@ -114,6 +116,26 @@ func NewNoxuSubresourceInstance(namespace, name, version string) *unstructured.U } } +func NewNoxuSubresourceInstanceWithReplicas(namespace, name, version, replicasField string) *unstructured.Unstructured { + return &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": fmt.Sprintf("mygroup.example.com/%s", version), + "kind": "WishIHadChosenNoxu", + "metadata": map[string]interface{}{ + "namespace": namespace, + "name": name, + }, + "spec": map[string]interface{}{ + "num": int64(10), + replicasField: int64(3), + }, + "status": map[string]interface{}{ + "replicas": int64(7), + }, + }, + } +} + func TestStatusSubresource(t *testing.T) { tearDown, apiExtensionClient, dynamicClient, err := fixtures.StartDefaultServerWithClients(t) if err != nil { @@ -373,6 +395,87 @@ func TestScaleSubresource(t *testing.T) { } } +func TestApplyScaleSubresource(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)() + + tearDown, config, _, err := fixtures.StartDefaultServer(t) + if err != nil { + t.Fatal(err) + } + defer tearDown() + + apiExtensionClient, err := clientset.NewForConfig(config) + if err != nil { + t.Fatal(err) + } + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + t.Fatal(err) + } + + noxuDefinition := NewNoxuSubresourcesCRDs(apiextensionsv1.NamespaceScoped)[0] + subresources, err := getSubresourcesForVersion(noxuDefinition, "v1beta1") + if err != nil { + t.Fatal(err) + } + subresources.Scale.SpecReplicasPath = ".spec.replicas[0]" + noxuDefinition, err = fixtures.CreateNewV1CustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + if err != nil { + t.Fatal(err) + } + + // Create a client for it. + ns := "not-the-default" + noxuResourceClient := newNamespacedCustomResourceVersionedClient(ns, dynamicClient, noxuDefinition, "v1beta1") + + obj := NewNoxuSubresourceInstanceWithReplicas(ns, "foo", "v1beta1", "replicas[0]") + obj, err = noxuResourceClient.Create(context.TODO(), obj, metav1.CreateOptions{}) + if err != nil { + t.Logf("%#v", obj) + t.Fatalf("Failed to create CustomResource: %v", err) + } + + noxuResourceClient = newNamespacedCustomResourceVersionedClient(ns, dynamicClient, noxuDefinition, "v1") + patch := `{"metadata": {"name": "foo"}, "kind": "WishIHadChosenNoxu", "apiVersion": "mygroup.example.com/v1", "spec": {"replicas": 3}}` + obj, err = noxuResourceClient.Patch(context.TODO(), "foo", types.ApplyPatchType, []byte(patch), metav1.PatchOptions{FieldManager: "applier"}) + if err != nil { + t.Logf("%#v", obj) + t.Fatalf("Failed to Apply CustomResource: %v", err) + } + + if got := len(obj.GetManagedFields()); got != 2 { + t.Fatalf("Expected 2 managed fields, got %v: %v", got, obj.GetManagedFields()) + } + + _, err = noxuResourceClient.Patch(context.TODO(), "foo", types.MergePatchType, []byte(`{"spec": {"replicas": 5}}`), metav1.PatchOptions{FieldManager: "scaler"}, "scale") + if err != nil { + t.Fatal(err) + } + + obj, err = noxuResourceClient.Get(context.TODO(), "foo", metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to Get CustomResource: %v", err) + } + + // Managed fields should have 3 entries: one for scale, one for spec, and one for the rest of the fields + managedFields := obj.GetManagedFields() + if len(managedFields) != 3 { + t.Fatalf("Expected 3 managed fields, got %v: %v", len(managedFields), obj.GetManagedFields()) + } + specEntry := managedFields[0] + if specEntry.Manager != "applier" || specEntry.APIVersion != "mygroup.example.com/v1" || specEntry.Operation != "Apply" || string(specEntry.FieldsV1.Raw) != `{"f:spec":{}}` || specEntry.Subresource != "" { + t.Fatalf("Unexpected entry: %v", specEntry) + } + scaleEntry := managedFields[1] + if scaleEntry.Manager != "scaler" || scaleEntry.APIVersion != "mygroup.example.com/v1" || scaleEntry.Operation != "Update" || string(scaleEntry.FieldsV1.Raw) != `{"f:spec":{"f:replicas":{}}}` || scaleEntry.Subresource != "scale" { + t.Fatalf("Unexpected entry: %v", scaleEntry) + } + restEntry := managedFields[2] + if restEntry.Manager != "integration.test" || restEntry.APIVersion != "mygroup.example.com/v1beta1" { + t.Fatalf("Unexpected entry: %v", restEntry) + } +} + func TestValidationSchemaWithStatus(t *testing.T) { tearDown, config, _, err := fixtures.StartDefaultServer(t) if err != nil { diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehander.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go similarity index 100% rename from staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehander.go rename to staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehander_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler_test.go similarity index 100% rename from staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehander_test.go rename to staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler_test.go diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go index bfa3a3f715a..1be9e763af4 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go @@ -76,7 +76,7 @@ func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter r func (f *structuredMergeManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { newObjVersioned, err := f.toVersioned(newObj) if err != nil { - return nil, nil, fmt.Errorf("failed to convert new object to proper version: %v", err) + return nil, nil, fmt.Errorf("failed to convert new object to proper version: %v (from %v to %v)", err, newObj.GetObjectKind().GroupVersionKind(), f.groupVersion) } liveObjVersioned, err := f.toVersioned(liveObj) if err != nil { diff --git a/test/integration/apiserver/apply/apply_test.go b/test/integration/apiserver/apply/apply_test.go index f9859233d2e..fe4b5636214 100644 --- a/test/integration/apiserver/apply/apply_test.go +++ b/test/integration/apiserver/apply/apply_test.go @@ -3424,300 +3424,3 @@ func TestSubresourceField(t *testing.T) { t.Fatalf(`Unexpected entry, got: %v`, managedFields[1]) } } - -func TestApplyOnScaleDeployment(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)() - - _, client, closeFn := setup(t) - defer closeFn() - - validDeployment := []byte(`{ - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": { - "name": "deployment" - }, - "spec": { - "replicas": 1, - "selector": { - "matchLabels": { - "app": "nginx" - } - }, - "template": { - "metadata": { - "labels": { - "app": "nginx" - } - }, - "spec": { - "containers": [{ - "name": "nginx", - "image": "nginx:latest" - }] - } - } - } - }`) - - // Create deployment - _, err := client.CoreV1().RESTClient().Patch(types.ApplyPatchType). - AbsPath("/apis/apps/v1"). - Namespace("default"). - Resource("deployments"). - Name("deployment"). - Param("fieldManager", "apply_test"). - Body(validDeployment). - Do(context.TODO()). - Get() - if err != nil { - t.Fatalf("Failed to create object using Apply patch: %v", err) - } - deployment, err := client.AppsV1().Deployments("default").Get(context.TODO(), "deployment", metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to retrieve object: %v", err) - } - if *deployment.Spec.Replicas != 1 { - t.Fatalf("Expected replicas to be 1, but got: %d", *deployment.Spec.Replicas) - } - - // Call scale subresource to update replicas - _, err = client.CoreV1().RESTClient(). - Patch(types.MergePatchType). - AbsPath("/apis/apps/v1"). - Name("deployment"). - Resource("deployments"). - SubResource("scale"). - Namespace("default"). - Param("fieldManager", "scale_test"). - Body([]byte(`{"spec":{"replicas": 5}}`)). - Do(context.TODO()). - Get() - if err != nil { - t.Fatalf("Error updating scale subresource: %v ", err) - } - - deployment, err = client.AppsV1().Deployments("default").Get(context.TODO(), "deployment", metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to retrieve object: %v", err) - } - - if *deployment.Spec.Replicas != 5 { - t.Fatalf("Expected replicas to be 5, but got: %d", *deployment.Spec.Replicas) - } - - assertReplicasOwnership(t, (*deployment).GetManagedFields(), "scale_test") - - // Re-apply the original object, it should fail with conflict because replicas have changed - _, err = client.CoreV1().RESTClient().Patch(types.ApplyPatchType). - AbsPath("/apis/apps/v1"). - Namespace("default"). - Resource("deployments"). - Name("deployment"). - Param("fieldManager", "apply_test"). - Body(validDeployment). - Do(context.TODO()). - Get() - if !apierrors.IsConflict(err) { - t.Fatalf("Expected conflict error but got: %v", err) - } - - // Re-apply forcing the changes should succeed - _, err = client.CoreV1().RESTClient().Patch(types.ApplyPatchType). - AbsPath("/apis/apps/v1"). - Namespace("default"). - Resource("deployments"). - Name("deployment"). - Param("fieldManager", "apply_test"). - Param("force", "true"). - Body(validDeployment). - Do(context.TODO()). - Get() - if err != nil { - t.Fatalf("Error updating deployment: %v ", err) - } - - deployment, err = client.AppsV1().Deployments("default").Get(context.TODO(), "deployment", metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to retrieve object: %v", err) - } - - if *deployment.Spec.Replicas != 1 { - t.Fatalf("Expected replicas to be 1, but got: %d", *deployment.Spec.Replicas) - } - - assertReplicasOwnership(t, (*deployment).GetManagedFields(), "apply_test") - - // Run "Apply" with a scale object with a different number of replicas. It should generate a conflict. - _, err = client.CoreV1().RESTClient(). - Patch(types.ApplyPatchType). - AbsPath("/apis/apps/v1"). - Namespace("default"). - Resource("deployments"). - SubResource("scale"). - Name("deployment"). - Param("fieldManager", "apply_scale"). - Body([]byte(`{"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"deployment","namespace":"default"},"spec":{"replicas":17}}`)). - Do(context.TODO()). - Get() - if !apierrors.IsConflict(err) { - t.Fatalf("Expected conflict error but got: %v", err) - } - if !strings.Contains(err.Error(), "apply_test") { - t.Fatalf("Expected conflict with `apply_test` manager but got: %v", err) - } - - // Same as before but force. Only the new manager should own .spec.replicas - _, err = client.CoreV1().RESTClient(). - Patch(types.ApplyPatchType). - AbsPath("/apis/apps/v1"). - Namespace("default"). - Resource("deployments"). - SubResource("scale"). - Name("deployment"). - Param("fieldManager", "apply_scale"). - Param("force", "true"). - Body([]byte(`{"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"deployment","namespace":"default"},"spec":{"replicas":17}}`)). - Do(context.TODO()). - Get() - if err != nil { - t.Fatalf("Error updating deployment: %v ", err) - } - - deployment, err = client.AppsV1().Deployments("default").Get(context.TODO(), "deployment", metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to retrieve object: %v", err) - } - - if *deployment.Spec.Replicas != 17 { - t.Fatalf("Expected to replicas to be 17, but got: %d", *deployment.Spec.Replicas) - } - - assertReplicasOwnership(t, (*deployment).GetManagedFields(), "apply_scale") - - // Replace scale object - _, err = client.CoreV1().RESTClient(). - Put(). - AbsPath("/apis/apps/v1"). - Namespace("default"). - Resource("deployments"). - SubResource("scale"). - Name("deployment"). - Param("fieldManager", "replace_test"). - Body([]byte(`{"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"deployment","namespace":"default"},"spec":{"replicas":7}}`)). - Do(context.TODO()). - Get() - if err != nil { - t.Fatalf("Error updating deployment: %v ", err) - } - - deployment, err = client.AppsV1().Deployments("default").Get(context.TODO(), "deployment", metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to retrieve object: %v", err) - } - - if *deployment.Spec.Replicas != 7 { - t.Fatalf("Expected to replicas to be 7, but got: %d", *deployment.Spec.Replicas) - } - - assertReplicasOwnership(t, (*deployment).GetManagedFields(), "replace_test") - - // Apply the same number of replicas, both managers should own the field - _, err = client.CoreV1().RESTClient(). - Patch(types.ApplyPatchType). - AbsPath("/apis/apps/v1"). - Namespace("default"). - Resource("deployments"). - SubResource("scale"). - Name("deployment"). - Param("fieldManager", "co_owning_test"). - Param("force", "true"). - Body([]byte(`{"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"deployment","namespace":"default"},"spec":{"replicas":7}}`)). - Do(context.TODO()). - Get() - if err != nil { - t.Fatalf("Error updating deployment: %v ", err) - } - - deployment, err = client.AppsV1().Deployments("default").Get(context.TODO(), "deployment", metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to retrieve object: %v", err) - } - - if *deployment.Spec.Replicas != 7 { - t.Fatalf("Expected to replicas to be 7, but got: %d", *deployment.Spec.Replicas) - } - - assertReplicasOwnership(t, (*deployment).GetManagedFields(), "replace_test", "co_owning_test") - - // Scaling again should make this manager the only owner of replicas - _, err = client.CoreV1().RESTClient(). - Patch(types.MergePatchType). - AbsPath("/apis/apps/v1"). - Name("deployment"). - Resource("deployments"). - SubResource("scale"). - Namespace("default"). - Param("fieldManager", "scale_test"). - Body([]byte(`{"spec":{"replicas": 5}}`)). - Do(context.TODO()). - Get() - if err != nil { - t.Fatalf("Error updating scale subresource: %v ", err) - } - - deployment, err = client.AppsV1().Deployments("default").Get(context.TODO(), "deployment", metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to retrieve object: %v", err) - } - - if *deployment.Spec.Replicas != 5 { - t.Fatalf("Expected replicas to be 5, but got: %d", *deployment.Spec.Replicas) - } - - assertReplicasOwnership(t, (*deployment).GetManagedFields(), "scale_test") -} - -func assertReplicasOwnership(t *testing.T, managedFields []metav1.ManagedFieldsEntry, fieldManagers ...string) { - t.Helper() - - seen := make(map[string]bool) - for _, m := range fieldManagers { - seen[m] = false - } - - for _, managedField := range managedFields { - var entryJSON map[string]interface{} - if err := json.Unmarshal(managedField.FieldsV1.Raw, &entryJSON); err != nil { - t.Fatalf("failed to read into json") - } - - spec, ok := entryJSON["f:spec"].(map[string]interface{}) - if !ok { - // continue with the next managedField, as we this field does not hold the spec entry - continue - } - - if _, ok := spec["f:replicas"]; !ok { - // continue with the next managedField, as we this field does not hold the spec.replicas entry - continue - } - - // check if the manager is one of the ones we expect - if _, ok := seen[managedField.Manager]; !ok { - t.Fatalf("Unexpected field manager, found %q, expected to be in: %v", managedField.Manager, seen) - } - - seen[managedField.Manager] = true - } - - var missingManagers []string - for manager, managerSeen := range seen { - if !managerSeen { - missingManagers = append(missingManagers, manager) - } - } - if len(missingManagers) > 0 { - t.Fatalf("replicas fields should be owned by %v", missingManagers) - } -} diff --git a/test/integration/apiserver/apply/scale_test.go b/test/integration/apiserver/apply/scale_test.go new file mode 100644 index 00000000000..de8005e6893 --- /dev/null +++ b/test/integration/apiserver/apply/scale_test.go @@ -0,0 +1,371 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiserver + +import ( + "context" + "encoding/json" + "fmt" + "path" + "strings" + "testing" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" + clientset "k8s.io/client-go/kubernetes" + featuregatetesting "k8s.io/component-base/featuregate/testing" +) + +type scaleTest struct { + kind string + resource string + path string + validObj string +} + +func TestScaleAllResources(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)() + + _, client, closeFn := setup(t) + defer closeFn() + + tests := []scaleTest{ + { + kind: "Deployment", + resource: "deployments", + path: "/apis/apps/v1", + validObj: validAppsV1("Deployment"), + }, + { + kind: "StatefulSet", + resource: "statefulsets", + path: "/apis/apps/v1", + validObj: validAppsV1("StatefulSet"), + }, + { + kind: "ReplicaSet", + resource: "replicasets", + path: "/apis/apps/v1", + validObj: validAppsV1("ReplicaSet"), + }, + { + kind: "ReplicationController", + resource: "replicationcontrollers", + path: "/api/v1", + validObj: validV1ReplicationController(), + }, + } + + for _, test := range tests { + t.Run(test.kind, func(t *testing.T) { + validObject := []byte(test.validObj) + + // Create the object + _, err := client.CoreV1().RESTClient().Patch(types.ApplyPatchType). + AbsPath(test.path). + Namespace("default"). + Resource(test.resource). + Name("test"). + Param("fieldManager", "apply_test"). + Body(validObject). + Do(context.TODO()).Get() + if err != nil { + t.Fatalf("Failed to create object using apply: %v", err) + } + obj := retrieveObject(t, client, test) + assertReplicasValue(t, obj, 1) + assertReplicasOwnership(t, obj, "apply_test") + + // Call scale subresource to update replicas + _, err = client.CoreV1().RESTClient(). + Patch(types.MergePatchType). + AbsPath(test.path). + Namespace("default"). + Resource(test.resource). + Name("test"). + SubResource("scale"). + Param("fieldManager", "scale_test"). + Body([]byte(`{"spec":{"replicas": 5}}`)). + Do(context.TODO()).Get() + if err != nil { + t.Fatalf("Failed to scale object: %v", err) + } + obj = retrieveObject(t, client, test) + assertReplicasValue(t, obj, 5) + assertReplicasOwnership(t, obj, "scale_test") + + // Re-apply the original object, it should fail with conflict because replicas have changed + _, err = client.CoreV1().RESTClient().Patch(types.ApplyPatchType). + AbsPath(test.path). + Namespace("default"). + Resource(test.resource). + Name("test"). + Param("fieldManager", "apply_test"). + Body(validObject). + Do(context.TODO()).Get() + if !apierrors.IsConflict(err) { + t.Fatalf("Expected conflict when re-applying the original object, but got: %v", err) + } + + // Re-apply forcing the changes should succeed + _, err = client.CoreV1().RESTClient().Patch(types.ApplyPatchType). + AbsPath(test.path). + Namespace("default"). + Resource(test.resource). + Name("test"). + Param("fieldManager", "apply_test"). + Param("force", "true"). + Body(validObject). + Do(context.TODO()).Get() + if err != nil { + t.Fatalf("Error force-updating: %v", err) + } + obj = retrieveObject(t, client, test) + assertReplicasValue(t, obj, 1) + assertReplicasOwnership(t, obj, "apply_test") + + // Run "Apply" with a scale object with a different number of replicas. It should generate a conflict. + _, err = client.CoreV1().RESTClient().Patch(types.ApplyPatchType). + AbsPath(test.path). + Namespace("default"). + Resource(test.resource). + SubResource("scale"). + Name("test"). + Param("fieldManager", "apply_scale"). + Body([]byte(`{"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"test","namespace":"default"},"spec":{"replicas":17}}`)). + Do(context.TODO()).Get() + if !apierrors.IsConflict(err) { + t.Fatalf("Expected conflict error but got: %v", err) + } + if !strings.Contains(err.Error(), "apply_test") { + t.Fatalf("Expected conflict with `apply_test` manager when but got: %v", err) + } + + // Same as before but force. Only the new manager should own .spec.replicas + _, err = client.CoreV1().RESTClient().Patch(types.ApplyPatchType). + AbsPath(test.path). + Namespace("default"). + Resource(test.resource). + SubResource("scale"). + Name("test"). + Param("fieldManager", "apply_scale"). + Param("force", "true"). + Body([]byte(`{"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"test","namespace":"default"},"spec":{"replicas":17}}`)). + Do(context.TODO()).Get() + if err != nil { + t.Fatalf("Error updating object by applying scale and forcing: %v ", err) + } + obj = retrieveObject(t, client, test) + assertReplicasValue(t, obj, 17) + assertReplicasOwnership(t, obj, "apply_scale") + + // Replace scale object + _, err = client.CoreV1().RESTClient().Put(). + AbsPath(test.path). + Namespace("default"). + Resource(test.resource). + SubResource("scale"). + Name("test"). + Param("fieldManager", "replace_test"). + Body([]byte(`{"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"test","namespace":"default"},"spec":{"replicas":7}}`)). + Do(context.TODO()).Get() + if err != nil { + t.Fatalf("Error replacing object: %v", err) + } + obj = retrieveObject(t, client, test) + assertReplicasValue(t, obj, 7) + assertReplicasOwnership(t, obj, "replace_test") + + // Apply the same number of replicas, both managers should own the field + _, err = client.CoreV1().RESTClient().Patch(types.ApplyPatchType). + AbsPath(test.path). + Namespace("default"). + Resource(test.resource). + SubResource("scale"). + Name("test"). + Param("fieldManager", "co_owning_test"). + Body([]byte(`{"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"test","namespace":"default"},"spec":{"replicas":7}}`)). + Do(context.TODO()).Get() + if err != nil { + t.Fatalf("Error updating object: %v", err) + } + obj = retrieveObject(t, client, test) + assertReplicasValue(t, obj, 7) + assertReplicasOwnership(t, obj, "replace_test", "co_owning_test") + + // Scaling again should make this manager the only owner of replicas + _, err = client.CoreV1().RESTClient().Patch(types.MergePatchType). + AbsPath(test.path). + Namespace("default"). + Resource(test.resource). + SubResource("scale"). + Name("test"). + Param("fieldManager", "scale_test"). + Body([]byte(`{"spec":{"replicas": 5}}`)). + Do(context.TODO()).Get() + if err != nil { + t.Fatalf("Error scaling object: %v", err) + } + obj = retrieveObject(t, client, test) + assertReplicasValue(t, obj, 5) + assertReplicasOwnership(t, obj, "scale_test") + }) + } +} + +func validAppsV1(kind string) string { + return fmt.Sprintf(`{ + "apiVersion": "apps/v1", + "kind": "%s", + "metadata": { + "name": "test" + }, + "spec": { + "replicas": 1, + "selector": { + "matchLabels": { + "app": "nginx" + } + }, + "template": { + "metadata": { + "labels": { + "app": "nginx" + } + }, + "spec": { + "containers": [{ + "name": "nginx", + "image": "nginx:latest" + }] + } + } + } + }`, kind) +} + +func validV1ReplicationController() string { + return `{ + "apiVersion": "v1", + "kind": "ReplicationController", + "metadata": { + "name": "test" + }, + "spec": { + "replicas": 1, + "selector": { + "app": "nginx" + }, + "template": { + "metadata": { + "labels": { + "app": "nginx" + } + }, + "spec": { + "containers": [{ + "name": "nginx", + "image": "nginx:latest" + }] + } + } + } + }` +} + +func retrieveObject(t *testing.T, client clientset.Interface, test scaleTest) *unstructured.Unstructured { + t.Helper() + + urlPath := path.Join(test.path, "namespaces", "default", test.resource, "test") + bytes, err := client.CoreV1().RESTClient().Get().AbsPath(urlPath).DoRaw(context.TODO()) + if err != nil { + t.Fatalf("Failed to retrieve object: %v", err) + } + obj := &unstructured.Unstructured{} + if err := json.Unmarshal(bytes, obj); err != nil { + t.Fatalf("Error unmarshalling the retrieved object: %v", err) + } + return obj +} + +func assertReplicasValue(t *testing.T, obj *unstructured.Unstructured, value int) { + actualValue, found, err := unstructured.NestedInt64(obj.Object, "spec", "replicas") + + if err != nil { + t.Fatalf("Error when retriving replicas field: %v", err) + } + if !found { + t.Fatalf("Replicas field not found") + } + + if int(actualValue) != value { + t.Fatalf("Expected replicas field value to be %d but got %d", value, actualValue) + } +} + +func assertReplicasOwnership(t *testing.T, obj *unstructured.Unstructured, fieldManagers ...string) { + t.Helper() + + accessor, err := meta.Accessor(obj) + if err != nil { + t.Fatalf("Failed to get meta accessor for object: %v", err) + } + + seen := make(map[string]bool) + for _, m := range fieldManagers { + seen[m] = false + } + + for _, managedField := range accessor.GetManagedFields() { + var entryJSON map[string]interface{} + if err := json.Unmarshal(managedField.FieldsV1.Raw, &entryJSON); err != nil { + t.Fatalf("failed to read into json") + } + + spec, ok := entryJSON["f:spec"].(map[string]interface{}) + if !ok { + // continue with the next managedField, as we this field does not hold the spec entry + continue + } + + if _, ok := spec["f:replicas"]; !ok { + // continue with the next managedField, as we this field does not hold the spec.replicas entry + continue + } + + // check if the manager is one of the ones we expect + if _, ok := seen[managedField.Manager]; !ok { + t.Fatalf("Unexpected field manager, found %q, expected to be in: %v", managedField.Manager, seen) + } + + seen[managedField.Manager] = true + } + + var missingManagers []string + for manager, managerSeen := range seen { + if !managerSeen { + missingManagers = append(missingManagers, manager) + } + } + if len(missingManagers) > 0 { + t.Fatalf("replicas fields should be owned by %v", missingManagers) + } +} From 09649e58b5a1368929e194991a763afc8011795e Mon Sep 17 00:00:00 2001 From: Andrea Nodari Date: Thu, 11 Mar 2021 16:51:46 +0100 Subject: [PATCH 3/6] Check request info when updating managed fields during scale - Test all versions to make sure each resource version is in the mappings - Fail when request info contains an unrecognized version. We have tests that guarantee that all known versions are in the mappings. If we get a version in request info that is not there we should fail fast to prevent inconsistent behaviour (e.g. for some reason the mappings is not up to date). Ensure all known versions are in mappings --- .../apps/deployment/storage/storage.go | 23 ++++- .../apps/replicaset/storage/storage.go | 22 ++++- .../apps/statefulset/storage/storage.go | 23 ++++- .../replicationcontroller/storage/storage.go | 19 +++- .../handlers/fieldmanager/scalehandler.go | 18 ++-- .../integration/apiserver/apply/scale_test.go | 88 +++++++++++++++++++ 6 files changed, 177 insertions(+), 16 deletions(-) diff --git a/pkg/registry/apps/deployment/storage/storage.go b/pkg/registry/apps/deployment/storage/storage.go index 409d946929a..773ec94b184 100644 --- a/pkg/registry/apps/deployment/storage/storage.go +++ b/pkg/registry/apps/deployment/storage/storage.go @@ -26,12 +26,14 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage" storeerr "k8s.io/apiserver/pkg/storage/errors" "k8s.io/apiserver/pkg/util/dryrun" + "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/apis/apps" appsv1beta1 "k8s.io/kubernetes/pkg/apis/apps/v1beta1" appsv1beta2 "k8s.io/kubernetes/pkg/apis/apps/v1beta2" @@ -55,9 +57,16 @@ type DeploymentStorage struct { Rollback *RollbackREST } +// ReplicasPathMappings returns the mappings between each group version and a replicas path +func ReplicasPathMappings() fieldmanager.ResourcePathMappings { + return replicasPathInDeployment +} + // maps a group version to the replicas path in a deployment object var replicasPathInDeployment = fieldmanager.ResourcePathMappings{ - schema.GroupVersion{Group: "apps", Version: "v1"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), + schema.GroupVersion{Group: "apps", Version: "v1beta1"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), + schema.GroupVersion{Group: "apps", Version: "v1beta2"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), + schema.GroupVersion{Group: "apps", Version: "v1"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), } // NewStorage returns new instance of DeploymentStorage. @@ -383,9 +392,19 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti return nil, errors.NewNotFound(apps.Resource("deployments/scale"), i.name) } + groupVersion := schema.GroupVersion{Group: "apps", Version: "v1"} + if requestInfo, found := genericapirequest.RequestInfoFrom(ctx); found { + requestGroupVersion := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + if _, ok := replicasPathInDeployment[requestGroupVersion.String()]; ok { + groupVersion = requestGroupVersion + } else { + klog.Fatal("Unrecognized group/version in request info %q", requestGroupVersion.String()) + } + } + managedFieldsHandler := fieldmanager.NewScaleHandler( deployment.ManagedFields, - schema.GroupVersion{Group: "apps", Version: "v1"}, + groupVersion, replicasPathInDeployment, ) diff --git a/pkg/registry/apps/replicaset/storage/storage.go b/pkg/registry/apps/replicaset/storage/storage.go index e16a4664608..11e51a300b4 100644 --- a/pkg/registry/apps/replicaset/storage/storage.go +++ b/pkg/registry/apps/replicaset/storage/storage.go @@ -27,9 +27,11 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/apis/apps" appsv1beta1 "k8s.io/kubernetes/pkg/apis/apps/v1beta1" appsv1beta2 "k8s.io/kubernetes/pkg/apis/apps/v1beta2" @@ -51,9 +53,15 @@ type ReplicaSetStorage struct { Scale *ScaleREST } +// ReplicasPathMappings returns the mappings between each group version and a replicas path +func ReplicasPathMappings() fieldmanager.ResourcePathMappings { + return replicasPathInReplicaSet +} + // maps a group version to the replicas path in a replicaset object var replicasPathInReplicaSet = fieldmanager.ResourcePathMappings{ - schema.GroupVersion{Group: "apps", Version: "v1"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), + schema.GroupVersion{Group: "apps", Version: "v1beta2"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), + schema.GroupVersion{Group: "apps", Version: "v1"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), } // NewStorage returns new instance of ReplicaSetStorage. @@ -285,9 +293,19 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti return nil, errors.NewNotFound(apps.Resource("replicasets/scale"), i.name) } + groupVersion := schema.GroupVersion{Group: "apps", Version: "v1"} + if requestInfo, found := genericapirequest.RequestInfoFrom(ctx); found { + requestGroupVersion := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + if _, ok := replicasPathInReplicaSet[requestGroupVersion.String()]; ok { + groupVersion = requestGroupVersion + } else { + klog.Fatal("Unrecognized group/version in request info %q", requestGroupVersion.String()) + } + } + managedFieldsHandler := fieldmanager.NewScaleHandler( replicaset.ManagedFields, - schema.GroupVersion{Group: "apps", Version: "v1"}, + groupVersion, replicasPathInReplicaSet, ) diff --git a/pkg/registry/apps/statefulset/storage/storage.go b/pkg/registry/apps/statefulset/storage/storage.go index 50b9954784c..6035350ac07 100644 --- a/pkg/registry/apps/statefulset/storage/storage.go +++ b/pkg/registry/apps/statefulset/storage/storage.go @@ -25,9 +25,11 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/apis/apps" appsv1beta1 "k8s.io/kubernetes/pkg/apis/apps/v1beta1" appsv1beta2 "k8s.io/kubernetes/pkg/apis/apps/v1beta2" @@ -48,9 +50,16 @@ type StatefulSetStorage struct { Scale *ScaleREST } +// ReplicasPathMappings returns the mappings between each group version and a replicas path +func ReplicasPathMappings() fieldmanager.ResourcePathMappings { + return replicasPathInStatefulSet +} + // maps a group version to the replicas path in a statefulset object var replicasPathInStatefulSet = fieldmanager.ResourcePathMappings{ - schema.GroupVersion{Group: "apps", Version: "v1"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), + schema.GroupVersion{Group: "apps", Version: "v1beta1"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), + schema.GroupVersion{Group: "apps", Version: "v1beta2"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), + schema.GroupVersion{Group: "apps", Version: "v1"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), } // NewStorage returns new instance of StatefulSetStorage. @@ -271,9 +280,19 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti return nil, errors.NewNotFound(apps.Resource("statefulsets/scale"), i.name) } + groupVersion := schema.GroupVersion{Group: "apps", Version: "v1"} + if requestInfo, found := genericapirequest.RequestInfoFrom(ctx); found { + requestGroupVersion := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + if _, ok := replicasPathInStatefulSet[requestGroupVersion.String()]; ok { + groupVersion = requestGroupVersion + } else { + klog.Fatal("Unrecognized group/version in request info %q", requestGroupVersion.String()) + } + } + managedFieldsHandler := fieldmanager.NewScaleHandler( statefulset.ManagedFields, - schema.GroupVersion{Group: "apps", Version: "v1"}, + groupVersion, replicasPathInStatefulSet, ) diff --git a/pkg/registry/core/replicationcontroller/storage/storage.go b/pkg/registry/core/replicationcontroller/storage/storage.go index d8457e54a6c..925a308eec2 100644 --- a/pkg/registry/core/replicationcontroller/storage/storage.go +++ b/pkg/registry/core/replicationcontroller/storage/storage.go @@ -28,9 +28,11 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/apis/autoscaling" autoscalingv1 "k8s.io/kubernetes/pkg/apis/autoscaling/v1" "k8s.io/kubernetes/pkg/apis/autoscaling/validation" @@ -50,6 +52,11 @@ type ControllerStorage struct { Scale *ScaleREST } +// ReplicasPathMappings returns the mappings between each group version and a replicas path +func ReplicasPathMappings() fieldmanager.ResourcePathMappings { + return replicasPathInReplicationController +} + // maps a group version to the replicas path in a deployment object var replicasPathInReplicationController = fieldmanager.ResourcePathMappings{ schema.GroupVersion{Group: "", Version: "v1"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), @@ -245,9 +252,19 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti return nil, errors.NewNotFound(api.Resource("replicationcontrollers/scale"), i.name) } + groupVersion := schema.GroupVersion{Group: "", Version: "v1"} + if requestInfo, found := genericapirequest.RequestInfoFrom(ctx); found { + requestGroupVersion := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + if _, ok := replicasPathInReplicationController[requestGroupVersion.String()]; ok { + groupVersion = requestGroupVersion + } else { + klog.Fatal("Unrecognized group/version in request info %q", requestGroupVersion.String()) + } + } + managedFieldsHandler := fieldmanager.NewScaleHandler( replicationcontroller.ManagedFields, - schema.GroupVersion{Group: "", Version: "v1"}, + groupVersion, replicasPathInReplicationController, ) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go index d551a43cb59..114cdb1845d 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go @@ -39,17 +39,17 @@ type ResourcePathMappings map[string]fieldpath.Path // ScaleHandler manages the conversion of managed fields between a main // resource and the scale subresource type ScaleHandler struct { - parentEntries []metav1.ManagedFieldsEntry - defaultGroupVersion schema.GroupVersion - mappings ResourcePathMappings + parentEntries []metav1.ManagedFieldsEntry + groupVersion schema.GroupVersion + mappings ResourcePathMappings } // NewScaleHandler creates a new ScaleHandler -func NewScaleHandler(parentEntries []metav1.ManagedFieldsEntry, defaultGroupVersion schema.GroupVersion, mappings ResourcePathMappings) *ScaleHandler { +func NewScaleHandler(parentEntries []metav1.ManagedFieldsEntry, groupVersion schema.GroupVersion, mappings ResourcePathMappings) *ScaleHandler { return &ScaleHandler{ - parentEntries: parentEntries, - defaultGroupVersion: defaultGroupVersion, - mappings: mappings, + parentEntries: parentEntries, + groupVersion: groupVersion, + mappings: mappings, } } @@ -136,8 +136,8 @@ func (h *ScaleHandler) ToParent(scaleEntries []metav1.ManagedFieldsEntry) ([]met for manager, versionedSet := range scaleFields { newVersionedSet := fieldpath.NewVersionedSet( - fieldpath.NewSet(h.mappings[h.defaultGroupVersion.String()]), - fieldpath.APIVersion(h.defaultGroupVersion.String()), + fieldpath.NewSet(h.mappings[h.groupVersion.String()]), + fieldpath.APIVersion(h.groupVersion.String()), versionedSet.Applied(), ) f[manager] = newVersionedSet diff --git a/test/integration/apiserver/apply/scale_test.go b/test/integration/apiserver/apply/scale_test.go index de8005e6893..ad0db889f7b 100644 --- a/test/integration/apiserver/apply/scale_test.go +++ b/test/integration/apiserver/apply/scale_test.go @@ -27,11 +27,18 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" genericfeatures "k8s.io/apiserver/pkg/features" utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" featuregatetesting "k8s.io/component-base/featuregate/testing" + deploymentstorage "k8s.io/kubernetes/pkg/registry/apps/deployment/storage" + replicasetstorage "k8s.io/kubernetes/pkg/registry/apps/replicaset/storage" + statefulsetstorage "k8s.io/kubernetes/pkg/registry/apps/statefulset/storage" + replicationcontrollerstorage "k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage" ) type scaleTest struct { @@ -231,6 +238,87 @@ func TestScaleAllResources(t *testing.T) { } } +func TestScaleUpdateOnlyStatus(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)() + + _, client, closeFn := setup(t) + defer closeFn() + + resource := "deployments" + path := "/apis/apps/v1" + validObject := []byte(validAppsV1("Deployment")) + + // Create the object + _, err := client.CoreV1().RESTClient().Patch(types.ApplyPatchType). + AbsPath(path). + Namespace("default"). + Resource(resource). + Name("test"). + Param("fieldManager", "apply_test"). + Body(validObject). + Do(context.TODO()).Get() + if err != nil { + t.Fatalf("Failed to create object using apply: %v", err) + } + obj := retrieveObject(t, client, path, resource) + assertReplicasValue(t, obj, 1) + assertReplicasOwnership(t, obj, "apply_test") + + // Call scale subresource to update replicas + _, err = client.CoreV1().RESTClient(). + Patch(types.MergePatchType). + AbsPath(path). + Namespace("default"). + Resource(resource). + Name("test"). + SubResource("scale"). + Param("fieldManager", "scale_test"). + Body([]byte(`{"status":{"replicas": 42}}`)). + Do(context.TODO()).Get() + if err != nil { + t.Fatalf("Failed to scale object: %v", err) + } + obj = retrieveObject(t, client, path, resource) + assertReplicasValue(t, obj, 1) + assertReplicasOwnership(t, obj, "apply_test") +} + +func TestAllKnownVersionsAreInMappings(t *testing.T) { + cases := []struct { + groupKind schema.GroupKind + mappings fieldmanager.ResourcePathMappings + }{ + { + groupKind: schema.GroupKind{Group: "apps", Kind: "ReplicaSet"}, + mappings: replicasetstorage.ReplicasPathMappings(), + }, + { + groupKind: schema.GroupKind{Group: "apps", Kind: "StatefulSet"}, + mappings: statefulsetstorage.ReplicasPathMappings(), + }, + { + groupKind: schema.GroupKind{Group: "apps", Kind: "Deployment"}, + mappings: deploymentstorage.ReplicasPathMappings(), + }, + { + groupKind: schema.GroupKind{Group: "", Kind: "ReplicationController"}, + mappings: replicationcontrollerstorage.ReplicasPathMappings(), + }, + } + for _, c := range cases { + knownVersions := scheme.Scheme.VersionsForGroupKind(c.groupKind) + for _, version := range knownVersions { + if _, ok := c.mappings[version.String()]; !ok { + t.Errorf("missing version %v for %v mappings", version, c.groupKind) + } + } + + if len(knownVersions) != len(c.mappings) { + t.Errorf("%v mappings has extra items: %v vs %v", c.groupKind, c.mappings, knownVersions) + } + } +} + func validAppsV1(kind string) string { return fmt.Sprintf(`{ "apiVersion": "apps/v1", From 8e4b5c849b67b3a12dbd63391a4e75234382ba2c Mon Sep 17 00:00:00 2001 From: Andrea Nodari Date: Sun, 4 Apr 2021 19:05:45 +0200 Subject: [PATCH 4/6] Do not add managed fields if a scale entry doesn't own replicas This happens when a request changes the .status.replicas but not .spec.replicas --- .../handlers/fieldmanager/scalehandler.go | 3 +++ .../handlers/fieldmanager/scalehandler_test.go | 12 ++---------- test/integration/apiserver/apply/scale_test.go | 18 +++++++++--------- 3 files changed, 14 insertions(+), 19 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go index 114cdb1845d..4f7296b88ed 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go @@ -135,6 +135,9 @@ func (h *ScaleHandler) ToParent(scaleEntries []metav1.ManagedFieldsEntry) ([]met } for manager, versionedSet := range scaleFields { + if !versionedSet.Set().Has(replicasPathInScale) { + continue + } newVersionedSet := fieldpath.NewVersionedSet( fieldpath.NewSet(h.mappings[h.groupVersion.String()]), fieldpath.APIVersion(h.groupVersion.String()), diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler_test.go index 6ea8a8c7649..291f32fb35b 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler_test.go @@ -463,7 +463,7 @@ func TestTransformingManagedFieldsToParent(t *testing.T) { Operation: metav1.ManagedFieldsOperationApply, APIVersion: "apps/v1", FieldsType: "FieldsV1", - FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{},"f:selector":{}}}`)}, + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, }, }, subresource: []metav1.ManagedFieldsEntry{ @@ -472,7 +472,7 @@ func TestTransformingManagedFieldsToParent(t *testing.T) { Operation: metav1.ManagedFieldsOperationUpdate, APIVersion: "autoscaling/v1", FieldsType: "FieldsV1", - FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:another":{}}}`)}, + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:status":{"f:replicas":{}}}`)}, Subresource: "scale", }, }, @@ -484,14 +484,6 @@ func TestTransformingManagedFieldsToParent(t *testing.T) { FieldsType: "FieldsV1", FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, }, - { - Manager: "scale", - Operation: metav1.ManagedFieldsOperationUpdate, - APIVersion: "apps/v1", - FieldsType: "FieldsV1", - FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, - Subresource: "scale", - }, }, }, { diff --git a/test/integration/apiserver/apply/scale_test.go b/test/integration/apiserver/apply/scale_test.go index ad0db889f7b..834af8741d1 100644 --- a/test/integration/apiserver/apply/scale_test.go +++ b/test/integration/apiserver/apply/scale_test.go @@ -97,7 +97,7 @@ func TestScaleAllResources(t *testing.T) { if err != nil { t.Fatalf("Failed to create object using apply: %v", err) } - obj := retrieveObject(t, client, test) + obj := retrieveObject(t, client, test.path, test.resource) assertReplicasValue(t, obj, 1) assertReplicasOwnership(t, obj, "apply_test") @@ -115,7 +115,7 @@ func TestScaleAllResources(t *testing.T) { if err != nil { t.Fatalf("Failed to scale object: %v", err) } - obj = retrieveObject(t, client, test) + obj = retrieveObject(t, client, test.path, test.resource) assertReplicasValue(t, obj, 5) assertReplicasOwnership(t, obj, "scale_test") @@ -145,7 +145,7 @@ func TestScaleAllResources(t *testing.T) { if err != nil { t.Fatalf("Error force-updating: %v", err) } - obj = retrieveObject(t, client, test) + obj = retrieveObject(t, client, test.path, test.resource) assertReplicasValue(t, obj, 1) assertReplicasOwnership(t, obj, "apply_test") @@ -180,7 +180,7 @@ func TestScaleAllResources(t *testing.T) { if err != nil { t.Fatalf("Error updating object by applying scale and forcing: %v ", err) } - obj = retrieveObject(t, client, test) + obj = retrieveObject(t, client, test.path, test.resource) assertReplicasValue(t, obj, 17) assertReplicasOwnership(t, obj, "apply_scale") @@ -197,7 +197,7 @@ func TestScaleAllResources(t *testing.T) { if err != nil { t.Fatalf("Error replacing object: %v", err) } - obj = retrieveObject(t, client, test) + obj = retrieveObject(t, client, test.path, test.resource) assertReplicasValue(t, obj, 7) assertReplicasOwnership(t, obj, "replace_test") @@ -214,7 +214,7 @@ func TestScaleAllResources(t *testing.T) { if err != nil { t.Fatalf("Error updating object: %v", err) } - obj = retrieveObject(t, client, test) + obj = retrieveObject(t, client, test.path, test.resource) assertReplicasValue(t, obj, 7) assertReplicasOwnership(t, obj, "replace_test", "co_owning_test") @@ -231,7 +231,7 @@ func TestScaleAllResources(t *testing.T) { if err != nil { t.Fatalf("Error scaling object: %v", err) } - obj = retrieveObject(t, client, test) + obj = retrieveObject(t, client, test.path, test.resource) assertReplicasValue(t, obj, 5) assertReplicasOwnership(t, obj, "scale_test") }) @@ -379,10 +379,10 @@ func validV1ReplicationController() string { }` } -func retrieveObject(t *testing.T, client clientset.Interface, test scaleTest) *unstructured.Unstructured { +func retrieveObject(t *testing.T, client clientset.Interface, prefix, resource string) *unstructured.Unstructured { t.Helper() - urlPath := path.Join(test.path, "namespaces", "default", test.resource, "test") + urlPath := path.Join(prefix, "namespaces", "default", resource, "test") bytes, err := client.CoreV1().RESTClient().Get().AbsPath(urlPath).DoRaw(context.TODO()) if err != nil { t.Fatalf("Failed to retrieve object: %v", err) From c10dd884c494734d12aceb41daaccd1d8da9356b Mon Sep 17 00:00:00 2001 From: Andrea Nodari Date: Fri, 9 Apr 2021 17:17:23 +0200 Subject: [PATCH 5/6] Drop managed fields entries with unknown fields This is aligned to the behaviour of server-side apply on main resources. --- .../handlers/fieldmanager/scalehandler.go | 13 ++++- .../fieldmanager/scalehandler_test.go | 50 +++++++++++++++++++ 2 files changed, 61 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go index 4f7296b88ed..3b328ae9a54 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go @@ -68,7 +68,12 @@ func (h *ScaleHandler) ToSubresource() ([]metav1.ManagedFieldsEntry, error) { f := fieldpath.ManagedFields{} t := map[string]*metav1.Time{} for manager, versionedSet := range managed.Fields() { - path := h.mappings[string(versionedSet.APIVersion())] + path, ok := h.mappings[string(versionedSet.APIVersion())] + // Drop the field if the APIVersion of the managed field is unknown + if !ok { + continue + } + if versionedSet.Set().Has(path) { newVersionedSet := fieldpath.NewVersionedSet( fieldpath.NewSet(replicasPathInScale), @@ -104,7 +109,11 @@ func (h *ScaleHandler) ToParent(scaleEntries []metav1.ManagedFieldsEntry) ([]met for manager, versionedSet := range parentFields { // Get the main resource "replicas" path - path := h.mappings[string(versionedSet.APIVersion())] + path, ok := h.mappings[string(versionedSet.APIVersion())] + // Drop the field if the APIVersion of the managed field is unknown + if !ok { + continue + } // If the parent entry does not have the replicas path, just keep it as it is if !versionedSet.Set().Has(path) { diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler_test.go index 291f32fb35b..ffed4839b1a 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler_test.go @@ -117,6 +117,19 @@ func TestTransformManagedFieldsToSubresource(t *testing.T) { }, }, }, + { + desc: "drops fields if the api version is unknown", + input: []metav1.ManagedFieldsEntry{ + { + Manager: "manager-1", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v10", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + }, + expected: nil, + }, } for _, test := range tests { @@ -564,6 +577,43 @@ func TestTransformingManagedFieldsToParent(t *testing.T) { }, }, }, + { + desc: "drops other fields if the api version is unknown", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + { + Manager: "another-manager", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v10", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + }, + }, } for _, test := range tests { From 5b666a61a170f61c7e223085478b24a03612fa99 Mon Sep 17 00:00:00 2001 From: Andrea Nodari Date: Sun, 18 Apr 2021 13:55:15 +0200 Subject: [PATCH 6/6] Add nil path to mapping when a CR has no "scale" subresource This is to prevent the ScaleHandler to drop the entry. In this way entries just get ignored. --- .../pkg/apiserver/customresource_handler.go | 1 + .../handlers/fieldmanager/scalehandler.go | 12 +-- .../fieldmanager/scalehandler_test.go | 76 +++++++++++++++++-- 3 files changed, 76 insertions(+), 13 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go index d3585429b5f..5cf0fad6965 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go @@ -709,6 +709,7 @@ func (r *crdHandler) getOrCreateServingInfoFor(uid types.UID, name string) (*crd return nil, fmt.Errorf("the server could not properly serve the CR subresources") } if subresources == nil || subresources.Scale == nil { + replicasPathInCustomResource[schema.GroupVersion{Group: crd.Spec.Group, Version: v.Name}.String()] = nil continue } path := fieldpath.Path{} diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go index 3b328ae9a54..d81383628c7 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go @@ -69,8 +69,8 @@ func (h *ScaleHandler) ToSubresource() ([]metav1.ManagedFieldsEntry, error) { t := map[string]*metav1.Time{} for manager, versionedSet := range managed.Fields() { path, ok := h.mappings[string(versionedSet.APIVersion())] - // Drop the field if the APIVersion of the managed field is unknown - if !ok { + // Skip the entry if the APIVersion is unknown + if !ok || path == nil { continue } @@ -110,13 +110,15 @@ func (h *ScaleHandler) ToParent(scaleEntries []metav1.ManagedFieldsEntry) ([]met for manager, versionedSet := range parentFields { // Get the main resource "replicas" path path, ok := h.mappings[string(versionedSet.APIVersion())] - // Drop the field if the APIVersion of the managed field is unknown + // Drop the entry if the APIVersion is unknown. if !ok { continue } - // If the parent entry does not have the replicas path, just keep it as it is - if !versionedSet.Set().Has(path) { + // If the parent entry does not have the replicas path or it is nil, just + // keep it as it is. The path is nil for Custom Resources without scale + // subresource. + if path == nil || !versionedSet.Set().Has(path) { f[manager] = versionedSet t[manager] = decodedParentEntries.Times()[manager] continue diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler_test.go index ffed4839b1a..96d40e70d5a 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler_test.go @@ -578,7 +578,7 @@ func TestTransformingManagedFieldsToParent(t *testing.T) { }, }, { - desc: "drops other fields if the api version is unknown", + desc: "drops the entry if the api version is unknown", parent: []metav1.ManagedFieldsEntry{ { Manager: "test", @@ -636,14 +636,16 @@ func TestTransformingManagedFieldsToParent(t *testing.T) { func TestTransformingManagedFieldsToParentMultiVersion(t *testing.T) { tests := []struct { - desc string - mappings ResourcePathMappings - parent []metav1.ManagedFieldsEntry - subresource []metav1.ManagedFieldsEntry - expected []metav1.ManagedFieldsEntry + desc string + groupVersion schema.GroupVersion + mappings ResourcePathMappings + parent []metav1.ManagedFieldsEntry + subresource []metav1.ManagedFieldsEntry + expected []metav1.ManagedFieldsEntry }{ { - desc: "multi-version", + desc: "multi-version", + groupVersion: schema.GroupVersion{Group: "apps", Version: "v1"}, mappings: ResourcePathMappings{ "apps/v1": fieldpath.MakePathOrDie("spec", "the-replicas"), "apps/v2": fieldpath.MakePathOrDie("spec", "not-the-replicas"), @@ -699,13 +701,71 @@ func TestTransformingManagedFieldsToParentMultiVersion(t *testing.T) { }, }, }, + { + desc: "Custom resource without scale subresource, scaling a version with `scale`", + groupVersion: schema.GroupVersion{Group: "mygroup", Version: "v1"}, + mappings: ResourcePathMappings{ + "mygroup/v1": fieldpath.MakePathOrDie("spec", "the-replicas"), + "mygroup/v2": nil, + }, + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "mygroup/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:the-replicas":{},"f:selector":{}}}`)}, + }, + { + Manager: "test-other", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "mygroup/v2", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:test-other":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "mygroup/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + { + Manager: "test-other", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "mygroup/v2", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:test-other":{}}}`)}, + }, + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "mygroup/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:the-replicas":{}}}`)}, + Subresource: "scale", + }, + }, + }, } for _, test := range tests { t.Run(test.desc, func(t *testing.T) { handler := NewScaleHandler( test.parent, - schema.GroupVersion{Group: "apps", Version: "v1"}, + test.groupVersion, test.mappings, ) parentEntries, err := handler.ToParent(test.subresource)