diff --git a/pkg/registry/apps/deployment/storage/storage.go b/pkg/registry/apps/deployment/storage/storage.go index 8b2b48c6a54..773ec94b184 100644 --- a/pkg/registry/apps/deployment/storage/storage.go +++ b/pkg/registry/apps/deployment/storage/storage.go @@ -25,12 +25,15 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage" storeerr "k8s.io/apiserver/pkg/storage/errors" "k8s.io/apiserver/pkg/util/dryrun" + "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/apis/apps" appsv1beta1 "k8s.io/kubernetes/pkg/apis/apps/v1beta1" appsv1beta2 "k8s.io/kubernetes/pkg/apis/apps/v1beta2" @@ -54,6 +57,18 @@ type DeploymentStorage struct { Rollback *RollbackREST } +// ReplicasPathMappings returns the mappings between each group version and a replicas path +func ReplicasPathMappings() fieldmanager.ResourcePathMappings { + return replicasPathInDeployment +} + +// maps a group version to the replicas path in a deployment object +var replicasPathInDeployment = fieldmanager.ResourcePathMappings{ + schema.GroupVersion{Group: "apps", Version: "v1beta1"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), + schema.GroupVersion{Group: "apps", Version: "v1beta2"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), + schema.GroupVersion{Group: "apps", Version: "v1"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), +} + // NewStorage returns new instance of DeploymentStorage. func NewStorage(optsGetter generic.RESTOptionsGetter) (DeploymentStorage, error) { deploymentRest, deploymentStatusRest, deploymentRollbackRest, err := NewREST(optsGetter) @@ -337,6 +352,7 @@ func scaleFromDeployment(deployment *apps.Deployment) (*autoscaling.Scale, error if err != nil { return nil, err } + return &autoscaling.Scale{ // TODO: Create a variant of ObjectMeta type that only contains the fields below. ObjectMeta: metav1.ObjectMeta{ @@ -376,11 +392,32 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti return nil, errors.NewNotFound(apps.Resource("deployments/scale"), i.name) } + groupVersion := schema.GroupVersion{Group: "apps", Version: "v1"} + if requestInfo, found := genericapirequest.RequestInfoFrom(ctx); found { + requestGroupVersion := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + if _, ok := replicasPathInDeployment[requestGroupVersion.String()]; ok { + groupVersion = requestGroupVersion + } else { + klog.Fatal("Unrecognized group/version in request info %q", requestGroupVersion.String()) + } + } + + managedFieldsHandler := fieldmanager.NewScaleHandler( + deployment.ManagedFields, + groupVersion, + replicasPathInDeployment, + ) + // deployment -> old scale oldScale, err := scaleFromDeployment(deployment) if err != nil { return nil, err } + scaleManagedFields, err := managedFieldsHandler.ToSubresource() + if err != nil { + return nil, err + } + oldScale.ManagedFields = scaleManagedFields // old scale -> new scale newScaleObj, err := i.reqObjInfo.UpdatedObject(ctx, oldScale) @@ -412,5 +449,12 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti // move replicas/resourceVersion fields to object and return deployment.Spec.Replicas = scale.Spec.Replicas deployment.ResourceVersion = scale.ResourceVersion + + updatedEntries, err := managedFieldsHandler.ToParent(scale.ManagedFields) + if err != nil { + return nil, err + } + deployment.ManagedFields = updatedEntries + return deployment, nil } diff --git a/pkg/registry/apps/replicaset/storage/storage.go b/pkg/registry/apps/replicaset/storage/storage.go index be129c7ea82..11e51a300b4 100644 --- a/pkg/registry/apps/replicaset/storage/storage.go +++ b/pkg/registry/apps/replicaset/storage/storage.go @@ -26,9 +26,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/apis/apps" appsv1beta1 "k8s.io/kubernetes/pkg/apis/apps/v1beta1" appsv1beta2 "k8s.io/kubernetes/pkg/apis/apps/v1beta2" @@ -50,6 +53,17 @@ type ReplicaSetStorage struct { Scale *ScaleREST } +// ReplicasPathMappings returns the mappings between each group version and a replicas path +func ReplicasPathMappings() fieldmanager.ResourcePathMappings { + return replicasPathInReplicaSet +} + +// maps a group version to the replicas path in a replicaset object +var replicasPathInReplicaSet = fieldmanager.ResourcePathMappings{ + schema.GroupVersion{Group: "apps", Version: "v1beta2"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), + schema.GroupVersion{Group: "apps", Version: "v1"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), +} + // NewStorage returns new instance of ReplicaSetStorage. func NewStorage(optsGetter generic.RESTOptionsGetter) (ReplicaSetStorage, error) { replicaSetRest, replicaSetStatusRest, err := NewREST(optsGetter) @@ -279,12 +293,34 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti return nil, errors.NewNotFound(apps.Resource("replicasets/scale"), i.name) } + groupVersion := schema.GroupVersion{Group: "apps", Version: "v1"} + if requestInfo, found := genericapirequest.RequestInfoFrom(ctx); found { + requestGroupVersion := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + if _, ok := replicasPathInReplicaSet[requestGroupVersion.String()]; ok { + groupVersion = requestGroupVersion + } else { + klog.Fatal("Unrecognized group/version in request info %q", requestGroupVersion.String()) + } + } + + managedFieldsHandler := fieldmanager.NewScaleHandler( + replicaset.ManagedFields, + groupVersion, + replicasPathInReplicaSet, + ) + // replicaset -> old scale oldScale, err := scaleFromReplicaSet(replicaset) if err != nil { return nil, err } + scaleManagedFields, err := managedFieldsHandler.ToSubresource() + if err != nil { + return nil, err + } + oldScale.ManagedFields = scaleManagedFields + // old scale -> new scale newScaleObj, err := i.reqObjInfo.UpdatedObject(ctx, oldScale) if err != nil { @@ -315,5 +351,12 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti // move replicas/resourceVersion fields to object and return replicaset.Spec.Replicas = scale.Spec.Replicas replicaset.ResourceVersion = scale.ResourceVersion + + updatedEntries, err := managedFieldsHandler.ToParent(scale.ManagedFields) + if err != nil { + return nil, err + } + replicaset.ManagedFields = updatedEntries + return replicaset, nil } diff --git a/pkg/registry/apps/statefulset/storage/storage.go b/pkg/registry/apps/statefulset/storage/storage.go index 0afa6b69098..6035350ac07 100644 --- a/pkg/registry/apps/statefulset/storage/storage.go +++ b/pkg/registry/apps/statefulset/storage/storage.go @@ -24,9 +24,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/apis/apps" appsv1beta1 "k8s.io/kubernetes/pkg/apis/apps/v1beta1" appsv1beta2 "k8s.io/kubernetes/pkg/apis/apps/v1beta2" @@ -47,6 +50,18 @@ type StatefulSetStorage struct { Scale *ScaleREST } +// ReplicasPathMappings returns the mappings between each group version and a replicas path +func ReplicasPathMappings() fieldmanager.ResourcePathMappings { + return replicasPathInStatefulSet +} + +// maps a group version to the replicas path in a statefulset object +var replicasPathInStatefulSet = fieldmanager.ResourcePathMappings{ + schema.GroupVersion{Group: "apps", Version: "v1beta1"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), + schema.GroupVersion{Group: "apps", Version: "v1beta2"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), + schema.GroupVersion{Group: "apps", Version: "v1"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), +} + // NewStorage returns new instance of StatefulSetStorage. func NewStorage(optsGetter generic.RESTOptionsGetter) (StatefulSetStorage, error) { statefulSetRest, statefulSetStatusRest, err := NewREST(optsGetter) @@ -265,11 +280,32 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti return nil, errors.NewNotFound(apps.Resource("statefulsets/scale"), i.name) } + groupVersion := schema.GroupVersion{Group: "apps", Version: "v1"} + if requestInfo, found := genericapirequest.RequestInfoFrom(ctx); found { + requestGroupVersion := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + if _, ok := replicasPathInStatefulSet[requestGroupVersion.String()]; ok { + groupVersion = requestGroupVersion + } else { + klog.Fatal("Unrecognized group/version in request info %q", requestGroupVersion.String()) + } + } + + managedFieldsHandler := fieldmanager.NewScaleHandler( + statefulset.ManagedFields, + groupVersion, + replicasPathInStatefulSet, + ) + // statefulset -> old scale oldScale, err := scaleFromStatefulSet(statefulset) if err != nil { return nil, err } + scaleManagedFields, err := managedFieldsHandler.ToSubresource() + if err != nil { + return nil, err + } + oldScale.ManagedFields = scaleManagedFields // old scale -> new scale newScaleObj, err := i.reqObjInfo.UpdatedObject(ctx, oldScale) @@ -301,5 +337,12 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti // move replicas/resourceVersion fields to object and return statefulset.Spec.Replicas = scale.Spec.Replicas statefulset.ResourceVersion = scale.ResourceVersion + + updatedEntries, err := managedFieldsHandler.ToParent(scale.ManagedFields) + if err != nil { + return nil, err + } + statefulset.ManagedFields = updatedEntries + return statefulset, nil } diff --git a/pkg/registry/core/replicationcontroller/storage/storage.go b/pkg/registry/core/replicationcontroller/storage/storage.go index 8ae12247f18..925a308eec2 100644 --- a/pkg/registry/core/replicationcontroller/storage/storage.go +++ b/pkg/registry/core/replicationcontroller/storage/storage.go @@ -27,9 +27,12 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/apis/autoscaling" autoscalingv1 "k8s.io/kubernetes/pkg/apis/autoscaling/v1" "k8s.io/kubernetes/pkg/apis/autoscaling/validation" @@ -49,6 +52,16 @@ type ControllerStorage struct { Scale *ScaleREST } +// ReplicasPathMappings returns the mappings between each group version and a replicas path +func ReplicasPathMappings() fieldmanager.ResourcePathMappings { + return replicasPathInReplicationController +} + +// maps a group version to the replicas path in a deployment object +var replicasPathInReplicationController = fieldmanager.ResourcePathMappings{ + schema.GroupVersion{Group: "", Version: "v1"}.String(): fieldpath.MakePathOrDie("spec", "replicas"), +} + func NewStorage(optsGetter generic.RESTOptionsGetter) (ControllerStorage, error) { controllerREST, statusREST, err := NewREST(optsGetter) if err != nil { @@ -239,8 +252,29 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti return nil, errors.NewNotFound(api.Resource("replicationcontrollers/scale"), i.name) } + groupVersion := schema.GroupVersion{Group: "", Version: "v1"} + if requestInfo, found := genericapirequest.RequestInfoFrom(ctx); found { + requestGroupVersion := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + if _, ok := replicasPathInReplicationController[requestGroupVersion.String()]; ok { + groupVersion = requestGroupVersion + } else { + klog.Fatal("Unrecognized group/version in request info %q", requestGroupVersion.String()) + } + } + + managedFieldsHandler := fieldmanager.NewScaleHandler( + replicationcontroller.ManagedFields, + groupVersion, + replicasPathInReplicationController, + ) + // replicationcontroller -> old scale oldScale := scaleFromRC(replicationcontroller) + scaleManagedFields, err := managedFieldsHandler.ToSubresource() + if err != nil { + return nil, err + } + oldScale.ManagedFields = scaleManagedFields // old scale -> new scale newScaleObj, err := i.reqObjInfo.UpdatedObject(ctx, oldScale) @@ -272,5 +306,12 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti // move replicas/resourceVersion fields to object and return replicationcontroller.Spec.Replicas = scale.Spec.Replicas replicationcontroller.ResourceVersion = scale.ResourceVersion + + updatedEntries, err := managedFieldsHandler.ToParent(scale.ManagedFields) + if err != nil { + return nil, err + } + replicationcontroller.ManagedFields = updatedEntries + return replicationcontroller, nil } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/converter.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/converter.go index 23564762c75..d7487a495c6 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/converter.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/converter.go @@ -165,6 +165,13 @@ func (c *crConverter) ConvertToVersion(in runtime.Object, target runtime.GroupVe // TODO: should this be a typed error? return nil, fmt.Errorf("%v is unstructured and is not suitable for converting to %q", fromGVK.String(), target) } + // Special-case typed scale conversion if this custom resource supports a scale endpoint + if c.convertScale { + if _, isInScale := in.(*autoscalingv1.Scale); isInScale { + return typedscheme.Scheme.ConvertToVersion(in, target) + } + } + if !c.validVersions[toGVK.GroupVersion()] { return nil, fmt.Errorf("request to convert CR to an invalid group/version: %s", toGVK.GroupVersion().String()) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go index efd6487f03d..5cf0fad6965 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go @@ -699,6 +699,28 @@ func (r *crdHandler) getOrCreateServingInfoFor(uid types.UID, name string) (*crd if err != nil { return nil, err } + + // Create replicasPathInCustomResource + replicasPathInCustomResource := fieldmanager.ResourcePathMappings{} + for _, v := range crd.Spec.Versions { + subresources, err := apiextensionshelpers.GetSubresourcesForVersion(crd, v.Name) + if err != nil { + utilruntime.HandleError(err) + return nil, fmt.Errorf("the server could not properly serve the CR subresources") + } + if subresources == nil || subresources.Scale == nil { + replicasPathInCustomResource[schema.GroupVersion{Group: crd.Spec.Group, Version: v.Name}.String()] = nil + continue + } + path := fieldpath.Path{} + splitReplicasPath := strings.Split(strings.TrimPrefix(subresources.Scale.SpecReplicasPath, "."), ".") + for _, element := range splitReplicasPath { + s := element + path = append(path, fieldpath.PathElement{FieldName: &s}) + } + replicasPathInCustomResource[schema.GroupVersion{Group: crd.Spec.Group, Version: v.Name}.String()] = path + } + for _, v := range crd.Spec.Versions { // In addition to Unstructured objects (Custom Resources), we also may sometimes need to // decode unversioned Options objects, so we delegate to parameterScheme for such types. @@ -803,6 +825,7 @@ func (r *crdHandler) getOrCreateServingInfoFor(uid types.UID, name string) (*crd }, crd.Status.AcceptedNames.Categories, table, + replicasPathInCustomResource, ) selfLinkPrefix := "" @@ -892,8 +915,19 @@ func (r *crdHandler) getOrCreateServingInfoFor(uid types.UID, name string) (*crd SelfLinkPathPrefix: selfLinkPrefix, SelfLinkPathSuffix: "/scale", } - // TODO(issues.k8s.io/82046): We can't effectively track ownership on scale requests yet. - scaleScope.FieldManager = nil + + if utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) && subresources != nil && subresources.Scale != nil { + scaleScope, err = scopeWithFieldManager( + typeConverter, + scaleScope, + nil, + "scale", + ) + if err != nil { + return nil, err + } + } + scaleScopes[v.Name] = &scaleScope // override status subresource values diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd.go index 2b51d64c3c0..bfbc8b8b14f 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" @@ -41,7 +42,7 @@ type CustomResourceStorage struct { Scale *ScaleREST } -func NewStorage(resource schema.GroupResource, kind, listKind schema.GroupVersionKind, strategy customResourceStrategy, optsGetter generic.RESTOptionsGetter, categories []string, tableConvertor rest.TableConvertor) CustomResourceStorage { +func NewStorage(resource schema.GroupResource, kind, listKind schema.GroupVersionKind, strategy customResourceStrategy, optsGetter generic.RESTOptionsGetter, categories []string, tableConvertor rest.TableConvertor, replicasPathMapping fieldmanager.ResourcePathMappings) CustomResourceStorage { customResourceREST, customResourceStatusREST := newREST(resource, kind, listKind, strategy, optsGetter, categories, tableConvertor) s := CustomResourceStorage{ @@ -59,10 +60,12 @@ func NewStorage(resource schema.GroupResource, kind, listKind schema.GroupVersio } s.Scale = &ScaleREST{ - store: customResourceREST.Store, - specReplicasPath: scale.SpecReplicasPath, - statusReplicasPath: scale.StatusReplicasPath, - labelSelectorPath: labelSelectorPath, + store: customResourceREST.Store, + specReplicasPath: scale.SpecReplicasPath, + statusReplicasPath: scale.StatusReplicasPath, + labelSelectorPath: labelSelectorPath, + parentGV: kind.GroupVersion(), + replicasPathMapping: replicasPathMapping, } } @@ -209,10 +212,12 @@ func (r *StatusREST) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set { } type ScaleREST struct { - store *genericregistry.Store - specReplicasPath string - statusReplicasPath string - labelSelectorPath string + store *genericregistry.Store + specReplicasPath string + statusReplicasPath string + labelSelectorPath string + parentGV schema.GroupVersion + replicasPathMapping fieldmanager.ResourcePathMappings } // ScaleREST implements Patcher @@ -247,10 +252,12 @@ func (r *ScaleREST) Get(ctx context.Context, name string, options *metav1.GetOpt func (r *ScaleREST) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) { scaleObjInfo := &scaleUpdatedObjectInfo{ - reqObjInfo: objInfo, - specReplicasPath: r.specReplicasPath, - labelSelectorPath: r.labelSelectorPath, - statusReplicasPath: r.statusReplicasPath, + reqObjInfo: objInfo, + specReplicasPath: r.specReplicasPath, + labelSelectorPath: r.labelSelectorPath, + statusReplicasPath: r.statusReplicasPath, + parentGV: r.parentGV, + replicasPathMapping: r.replicasPathMapping, } obj, _, err := r.store.Update( @@ -299,19 +306,22 @@ func toScaleUpdateValidation(f rest.ValidateObjectUpdateFunc, specReplicasPath, } } +// Split the path per period, ignoring the leading period. +func splitReplicasPath(replicasPath string) []string { + return strings.Split(strings.TrimPrefix(replicasPath, "."), ".") +} + // scaleFromCustomResource returns a scale subresource for a customresource and a bool signalling wether // the specReplicas value was found. func scaleFromCustomResource(cr *unstructured.Unstructured, specReplicasPath, statusReplicasPath, labelSelectorPath string) (*autoscalingv1.Scale, bool, error) { - specReplicasPath = strings.TrimPrefix(specReplicasPath, ".") // ignore leading period - specReplicas, foundSpecReplicas, err := unstructured.NestedInt64(cr.UnstructuredContent(), strings.Split(specReplicasPath, ".")...) + specReplicas, foundSpecReplicas, err := unstructured.NestedInt64(cr.UnstructuredContent(), splitReplicasPath(specReplicasPath)...) if err != nil { return nil, false, err } else if !foundSpecReplicas { specReplicas = 0 } - statusReplicasPath = strings.TrimPrefix(statusReplicasPath, ".") // ignore leading period - statusReplicas, found, err := unstructured.NestedInt64(cr.UnstructuredContent(), strings.Split(statusReplicasPath, ".")...) + statusReplicas, found, err := unstructured.NestedInt64(cr.UnstructuredContent(), splitReplicasPath(statusReplicasPath)...) if err != nil { return nil, false, err } else if !found { @@ -320,8 +330,7 @@ func scaleFromCustomResource(cr *unstructured.Unstructured, specReplicasPath, st var labelSelector string if len(labelSelectorPath) > 0 { - labelSelectorPath = strings.TrimPrefix(labelSelectorPath, ".") // ignore leading period - labelSelector, _, err = unstructured.NestedString(cr.UnstructuredContent(), strings.Split(labelSelectorPath, ".")...) + labelSelector, _, err = unstructured.NestedString(cr.UnstructuredContent(), splitReplicasPath(labelSelectorPath)...) if err != nil { return nil, false, err } @@ -353,10 +362,12 @@ func scaleFromCustomResource(cr *unstructured.Unstructured, specReplicasPath, st } type scaleUpdatedObjectInfo struct { - reqObjInfo rest.UpdatedObjectInfo - specReplicasPath string - statusReplicasPath string - labelSelectorPath string + reqObjInfo rest.UpdatedObjectInfo + specReplicasPath string + statusReplicasPath string + labelSelectorPath string + parentGV schema.GroupVersion + replicasPathMapping fieldmanager.ResourcePathMappings } func (i *scaleUpdatedObjectInfo) Preconditions() *metav1.Preconditions { @@ -366,6 +377,13 @@ func (i *scaleUpdatedObjectInfo) Preconditions() *metav1.Preconditions { func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runtime.Object) (runtime.Object, error) { cr := oldObj.DeepCopyObject().(*unstructured.Unstructured) const invalidSpecReplicas = -2147483648 // smallest int32 + + managedFieldsHandler := fieldmanager.NewScaleHandler( + cr.GetManagedFields(), + i.parentGV, + i.replicasPathMapping, + ) + oldScale, replicasFound, err := scaleFromCustomResource(cr, i.specReplicasPath, i.statusReplicasPath, i.labelSelectorPath) if err != nil { return nil, err @@ -374,6 +392,12 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti oldScale.Spec.Replicas = invalidSpecReplicas // signal that this was not set before } + scaleManagedFields, err := managedFieldsHandler.ToSubresource() + if err != nil { + return nil, err + } + oldScale.ManagedFields = scaleManagedFields + obj, err := i.reqObjInfo.UpdatedObject(ctx, oldScale) if err != nil { return nil, err @@ -391,9 +415,7 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti return nil, apierrors.NewBadRequest(fmt.Sprintf("the spec replicas field %q cannot be empty", i.specReplicasPath)) } - specReplicasPath := strings.TrimPrefix(i.specReplicasPath, ".") // ignore leading period - - if err := unstructured.SetNestedField(cr.Object, int64(scale.Spec.Replicas), strings.Split(specReplicasPath, ".")...); err != nil { + if err := unstructured.SetNestedField(cr.Object, int64(scale.Spec.Replicas), splitReplicasPath(i.specReplicasPath)...); err != nil { return nil, err } if len(scale.ResourceVersion) != 0 { @@ -401,5 +423,12 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti // Set that precondition and return any conflict errors to the client. cr.SetResourceVersion(scale.ResourceVersion) } + + updatedEntries, err := managedFieldsHandler.ToParent(scale.ManagedFields) + if err != nil { + return nil, err + } + cr.SetManagedFields(updatedEntries) + return cr, nil } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd_test.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd_test.go index 8612eeeb31f..d2d3125653b 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd_test.go @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/generic" registrytest "k8s.io/apiserver/pkg/registry/generic/testing" @@ -106,6 +107,7 @@ func newStorage(t *testing.T) (customresource.CustomResourceStorage, *etcd3testi restOptions, []string{"all"}, table, + fieldmanager.ResourcePathMappings{}, ) return storage, server diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/subresources_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/subresources_test.go index 731291acc4d..b773100038f 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/subresources_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/subresources_test.go @@ -33,8 +33,10 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apiserver/pkg/features" + genericfeatures "k8s.io/apiserver/pkg/features" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/dynamic" + featuregatetesting "k8s.io/component-base/featuregate/testing" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" @@ -114,6 +116,26 @@ func NewNoxuSubresourceInstance(namespace, name, version string) *unstructured.U } } +func NewNoxuSubresourceInstanceWithReplicas(namespace, name, version, replicasField string) *unstructured.Unstructured { + return &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": fmt.Sprintf("mygroup.example.com/%s", version), + "kind": "WishIHadChosenNoxu", + "metadata": map[string]interface{}{ + "namespace": namespace, + "name": name, + }, + "spec": map[string]interface{}{ + "num": int64(10), + replicasField: int64(3), + }, + "status": map[string]interface{}{ + "replicas": int64(7), + }, + }, + } +} + func TestStatusSubresource(t *testing.T) { tearDown, apiExtensionClient, dynamicClient, err := fixtures.StartDefaultServerWithClients(t) if err != nil { @@ -373,6 +395,87 @@ func TestScaleSubresource(t *testing.T) { } } +func TestApplyScaleSubresource(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)() + + tearDown, config, _, err := fixtures.StartDefaultServer(t) + if err != nil { + t.Fatal(err) + } + defer tearDown() + + apiExtensionClient, err := clientset.NewForConfig(config) + if err != nil { + t.Fatal(err) + } + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + t.Fatal(err) + } + + noxuDefinition := NewNoxuSubresourcesCRDs(apiextensionsv1.NamespaceScoped)[0] + subresources, err := getSubresourcesForVersion(noxuDefinition, "v1beta1") + if err != nil { + t.Fatal(err) + } + subresources.Scale.SpecReplicasPath = ".spec.replicas[0]" + noxuDefinition, err = fixtures.CreateNewV1CustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + if err != nil { + t.Fatal(err) + } + + // Create a client for it. + ns := "not-the-default" + noxuResourceClient := newNamespacedCustomResourceVersionedClient(ns, dynamicClient, noxuDefinition, "v1beta1") + + obj := NewNoxuSubresourceInstanceWithReplicas(ns, "foo", "v1beta1", "replicas[0]") + obj, err = noxuResourceClient.Create(context.TODO(), obj, metav1.CreateOptions{}) + if err != nil { + t.Logf("%#v", obj) + t.Fatalf("Failed to create CustomResource: %v", err) + } + + noxuResourceClient = newNamespacedCustomResourceVersionedClient(ns, dynamicClient, noxuDefinition, "v1") + patch := `{"metadata": {"name": "foo"}, "kind": "WishIHadChosenNoxu", "apiVersion": "mygroup.example.com/v1", "spec": {"replicas": 3}}` + obj, err = noxuResourceClient.Patch(context.TODO(), "foo", types.ApplyPatchType, []byte(patch), metav1.PatchOptions{FieldManager: "applier"}) + if err != nil { + t.Logf("%#v", obj) + t.Fatalf("Failed to Apply CustomResource: %v", err) + } + + if got := len(obj.GetManagedFields()); got != 2 { + t.Fatalf("Expected 2 managed fields, got %v: %v", got, obj.GetManagedFields()) + } + + _, err = noxuResourceClient.Patch(context.TODO(), "foo", types.MergePatchType, []byte(`{"spec": {"replicas": 5}}`), metav1.PatchOptions{FieldManager: "scaler"}, "scale") + if err != nil { + t.Fatal(err) + } + + obj, err = noxuResourceClient.Get(context.TODO(), "foo", metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to Get CustomResource: %v", err) + } + + // Managed fields should have 3 entries: one for scale, one for spec, and one for the rest of the fields + managedFields := obj.GetManagedFields() + if len(managedFields) != 3 { + t.Fatalf("Expected 3 managed fields, got %v: %v", len(managedFields), obj.GetManagedFields()) + } + specEntry := managedFields[0] + if specEntry.Manager != "applier" || specEntry.APIVersion != "mygroup.example.com/v1" || specEntry.Operation != "Apply" || string(specEntry.FieldsV1.Raw) != `{"f:spec":{}}` || specEntry.Subresource != "" { + t.Fatalf("Unexpected entry: %v", specEntry) + } + scaleEntry := managedFields[1] + if scaleEntry.Manager != "scaler" || scaleEntry.APIVersion != "mygroup.example.com/v1" || scaleEntry.Operation != "Update" || string(scaleEntry.FieldsV1.Raw) != `{"f:spec":{"f:replicas":{}}}` || scaleEntry.Subresource != "scale" { + t.Fatalf("Unexpected entry: %v", scaleEntry) + } + restEntry := managedFields[2] + if restEntry.Manager != "integration.test" || restEntry.APIVersion != "mygroup.example.com/v1beta1" { + t.Fatalf("Unexpected entry: %v", restEntry) + } +} + func TestValidationSchemaWithStatus(t *testing.T) { tearDown, config, _, err := fixtures.StartDefaultServer(t) if err != nil { diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go new file mode 100644 index 00000000000..d81383628c7 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler.go @@ -0,0 +1,174 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +var ( + scaleGroupVersion = schema.GroupVersion{Group: "autoscaling", Version: "v1"} + replicasPathInScale = fieldpath.MakePathOrDie("spec", "replicas") +) + +// ResourcePathMappings maps a group/version to its replicas path. The +// assumption is that all the paths correspond to leaf fields. +type ResourcePathMappings map[string]fieldpath.Path + +// ScaleHandler manages the conversion of managed fields between a main +// resource and the scale subresource +type ScaleHandler struct { + parentEntries []metav1.ManagedFieldsEntry + groupVersion schema.GroupVersion + mappings ResourcePathMappings +} + +// NewScaleHandler creates a new ScaleHandler +func NewScaleHandler(parentEntries []metav1.ManagedFieldsEntry, groupVersion schema.GroupVersion, mappings ResourcePathMappings) *ScaleHandler { + return &ScaleHandler{ + parentEntries: parentEntries, + groupVersion: groupVersion, + mappings: mappings, + } +} + +// ToSubresource filter the managed fields of the main resource and convert +// them so that they can be handled by scale. +// For the managed fields that have a replicas path it performs two changes: +// 1. APIVersion is changed to the APIVersion of the scale subresource +// 2. Replicas path of the main resource is transformed to the replicas path of +// the scale subresource +func (h *ScaleHandler) ToSubresource() ([]metav1.ManagedFieldsEntry, error) { + managed, err := DecodeManagedFields(h.parentEntries) + if err != nil { + return nil, err + } + + f := fieldpath.ManagedFields{} + t := map[string]*metav1.Time{} + for manager, versionedSet := range managed.Fields() { + path, ok := h.mappings[string(versionedSet.APIVersion())] + // Skip the entry if the APIVersion is unknown + if !ok || path == nil { + continue + } + + if versionedSet.Set().Has(path) { + newVersionedSet := fieldpath.NewVersionedSet( + fieldpath.NewSet(replicasPathInScale), + fieldpath.APIVersion(scaleGroupVersion.String()), + versionedSet.Applied(), + ) + + f[manager] = newVersionedSet + t[manager] = managed.Times()[manager] + } + } + + return managedFieldsEntries(internal.NewManaged(f, t)) +} + +// ToParent merges `scaleEntries` with the entries of the main resource and +// transforms them accordingly +func (h *ScaleHandler) ToParent(scaleEntries []metav1.ManagedFieldsEntry) ([]metav1.ManagedFieldsEntry, error) { + decodedParentEntries, err := DecodeManagedFields(h.parentEntries) + if err != nil { + return nil, err + } + parentFields := decodedParentEntries.Fields() + + decodedScaleEntries, err := DecodeManagedFields(scaleEntries) + if err != nil { + return nil, err + } + scaleFields := decodedScaleEntries.Fields() + + f := fieldpath.ManagedFields{} + t := map[string]*metav1.Time{} + + for manager, versionedSet := range parentFields { + // Get the main resource "replicas" path + path, ok := h.mappings[string(versionedSet.APIVersion())] + // Drop the entry if the APIVersion is unknown. + if !ok { + continue + } + + // If the parent entry does not have the replicas path or it is nil, just + // keep it as it is. The path is nil for Custom Resources without scale + // subresource. + if path == nil || !versionedSet.Set().Has(path) { + f[manager] = versionedSet + t[manager] = decodedParentEntries.Times()[manager] + continue + } + + if _, ok := scaleFields[manager]; !ok { + // "Steal" the replicas path from the main resource entry + newSet := versionedSet.Set().Difference(fieldpath.NewSet(path)) + + if !newSet.Empty() { + newVersionedSet := fieldpath.NewVersionedSet( + newSet, + versionedSet.APIVersion(), + versionedSet.Applied(), + ) + f[manager] = newVersionedSet + t[manager] = decodedParentEntries.Times()[manager] + } + } else { + // Field wasn't stolen, let's keep the entry as it is. + f[manager] = versionedSet + t[manager] = decodedParentEntries.Times()[manager] + delete(scaleFields, manager) + } + } + + for manager, versionedSet := range scaleFields { + if !versionedSet.Set().Has(replicasPathInScale) { + continue + } + newVersionedSet := fieldpath.NewVersionedSet( + fieldpath.NewSet(h.mappings[h.groupVersion.String()]), + fieldpath.APIVersion(h.groupVersion.String()), + versionedSet.Applied(), + ) + f[manager] = newVersionedSet + t[manager] = decodedParentEntries.Times()[manager] + } + + return managedFieldsEntries(internal.NewManaged(f, t)) +} + +func managedFieldsEntries(entries internal.ManagedInterface) ([]metav1.ManagedFieldsEntry, error) { + obj := &unstructured.Unstructured{Object: map[string]interface{}{}} + if err := internal.EncodeObjectManagedFields(obj, entries); err != nil { + return nil, err + } + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + return accessor.GetManagedFields(), nil +} diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler_test.go new file mode 100644 index 00000000000..96d40e70d5a --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/scalehandler_test.go @@ -0,0 +1,786 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldmanager + +import ( + "reflect" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +func TestTransformManagedFieldsToSubresource(t *testing.T) { + testTime, _ := time.ParseInLocation("2006-Jan-02", "2013-Feb-03", time.Local) + managedFieldTime := v1.NewTime(testTime) + + tests := []struct { + desc string + input []metav1.ManagedFieldsEntry + expected []metav1.ManagedFieldsEntry + }{ + { + desc: "filter one entry and transform it into a subresource entry", + input: []metav1.ManagedFieldsEntry{ + { + Manager: "manager-1", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:another-field":{}}}`)}, + }, + { + Manager: "manager-2", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Time: &managedFieldTime, + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "manager-2", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Time: &managedFieldTime, + }, + }, + }, + { + desc: "transform all entries", + input: []metav1.ManagedFieldsEntry{ + { + Manager: "manager-1", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + { + Manager: "manager-2", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + { + Manager: "manager-3", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "manager-1", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + { + Manager: "manager-2", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + { + Manager: "manager-3", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + }, + { + desc: "drops fields if the api version is unknown", + input: []metav1.ManagedFieldsEntry{ + { + Manager: "manager-1", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v10", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + }, + expected: nil, + }, + } + + for _, test := range tests { + handler := NewScaleHandler( + test.input, + schema.GroupVersion{Group: "apps", Version: "v1"}, + defaultMappings(), + ) + subresourceEntries, err := handler.ToSubresource() + if err != nil { + t.Fatalf("test %q - expected no error but got %v", test.desc, err) + } + + if !reflect.DeepEqual(subresourceEntries, test.expected) { + t.Fatalf("test %q - expected output to be:\n%v\n\nbut got:\n%v", test.desc, test.expected, subresourceEntries) + } + } +} + +func TestTransformingManagedFieldsToParent(t *testing.T) { + tests := []struct { + desc string + parent []metav1.ManagedFieldsEntry + subresource []metav1.ManagedFieldsEntry + expected []metav1.ManagedFieldsEntry + }{ + { + desc: "different-managers: apply -> update", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{},"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + }, + { + desc: "different-managers: apply -> apply", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{},"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + }, + }, + { + desc: "different-managers: update -> update", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{},"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + }, + }, + { + desc: "different-managers: update -> apply", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{},"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + }, + }, + { + desc: "same manager: apply -> apply", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{},"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + }, + { + desc: "same manager: update -> update", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{},"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + }, + { + desc: "same manager: update -> apply", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{},"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + }, + }, + { + desc: "same manager: apply -> update", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{},"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + }, + { + desc: "subresource doesn't own the path anymore", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:status":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + }, + }, + { + desc: "Subresource steals all the fields of the parent resource", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + }, + { + desc: "apply without stealing", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{},"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{},"f:selector":{}}}`)}, + }, + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + }, + { + desc: "drops the entry if the api version is unknown", + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + { + Manager: "another-manager", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v10", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + handler := NewScaleHandler( + test.parent, + schema.GroupVersion{Group: "apps", Version: "v1"}, + defaultMappings(), + ) + parentEntries, err := handler.ToParent(test.subresource) + if err != nil { + t.Fatalf("test: %q - expected no error but got %v", test.desc, err) + } + if !reflect.DeepEqual(parentEntries, test.expected) { + t.Fatalf("test: %q - expected output to be:\n%v\n\nbut got:\n%v", test.desc, test.expected, parentEntries) + } + }) + } +} + +func TestTransformingManagedFieldsToParentMultiVersion(t *testing.T) { + tests := []struct { + desc string + groupVersion schema.GroupVersion + mappings ResourcePathMappings + parent []metav1.ManagedFieldsEntry + subresource []metav1.ManagedFieldsEntry + expected []metav1.ManagedFieldsEntry + }{ + { + desc: "multi-version", + groupVersion: schema.GroupVersion{Group: "apps", Version: "v1"}, + mappings: ResourcePathMappings{ + "apps/v1": fieldpath.MakePathOrDie("spec", "the-replicas"), + "apps/v2": fieldpath.MakePathOrDie("spec", "not-the-replicas"), + }, + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:the-replicas":{},"f:selector":{}}}`)}, + }, + { + Manager: "test-other", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v2", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:not-the-replicas":{},"f:selector":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + { + Manager: "test-other", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "apps/v2", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "apps/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:the-replicas":{}}}`)}, + Subresource: "scale", + }, + }, + }, + { + desc: "Custom resource without scale subresource, scaling a version with `scale`", + groupVersion: schema.GroupVersion{Group: "mygroup", Version: "v1"}, + mappings: ResourcePathMappings{ + "mygroup/v1": fieldpath.MakePathOrDie("spec", "the-replicas"), + "mygroup/v2": nil, + }, + parent: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "mygroup/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:the-replicas":{},"f:selector":{}}}`)}, + }, + { + Manager: "test-other", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "mygroup/v2", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:test-other":{}}}`)}, + }, + }, + subresource: []metav1.ManagedFieldsEntry{ + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "autoscaling/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + Subresource: "scale", + }, + }, + expected: []metav1.ManagedFieldsEntry{ + { + Manager: "test", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "mygroup/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:selector":{}}}`)}, + }, + { + Manager: "test-other", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "mygroup/v2", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:test-other":{}}}`)}, + }, + { + Manager: "scale", + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: "mygroup/v1", + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:the-replicas":{}}}`)}, + Subresource: "scale", + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + handler := NewScaleHandler( + test.parent, + test.groupVersion, + test.mappings, + ) + parentEntries, err := handler.ToParent(test.subresource) + if err != nil { + t.Fatalf("test: %q - expected no error but got %v", test.desc, err) + } + if !reflect.DeepEqual(parentEntries, test.expected) { + t.Fatalf("test: %q - expected output to be:\n%v\n\nbut got:\n%v", test.desc, test.expected, parentEntries) + } + }) + } +} + +func defaultMappings() ResourcePathMappings { + return ResourcePathMappings{ + "apps/v1": fieldpath.MakePathOrDie("spec", "replicas"), + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go index bfa3a3f715a..1be9e763af4 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go @@ -76,7 +76,7 @@ func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter r func (f *structuredMergeManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { newObjVersioned, err := f.toVersioned(newObj) if err != nil { - return nil, nil, fmt.Errorf("failed to convert new object to proper version: %v", err) + return nil, nil, fmt.Errorf("failed to convert new object to proper version: %v (from %v to %v)", err, newObj.GetObjectKind().GroupVersionKind(), f.groupVersion) } liveObjVersioned, err := f.toVersioned(liveObj) if err != nil { diff --git a/test/integration/apiserver/apply/apply_test.go b/test/integration/apiserver/apply/apply_test.go index 71622b637af..fe4b5636214 100644 --- a/test/integration/apiserver/apply/apply_test.go +++ b/test/integration/apiserver/apply/apply_test.go @@ -3395,9 +3395,9 @@ func TestSubresourceField(t *testing.T) { AbsPath("/apis/apps/v1"). Namespace("default"). Resource("deployments"). - SubResource("status"). + SubResource("scale"). Name("deployment"). - Body([]byte(`{"status":{"unavailableReplicas":32}}`)). + Body([]byte(`{"spec":{"replicas":32}}`)). Param("fieldManager", "manager"). Do(context.TODO()). Get() @@ -3405,8 +3405,6 @@ func TestSubresourceField(t *testing.T) { t.Fatalf("Failed to update status: %v", err) } - // TODO (nodo): add test for "scale" once we start tracking managed fields (#82046) - deployment, err := client.AppsV1().Deployments("default").Get(context.TODO(), "deployment", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get object: %v", err) @@ -3414,15 +3412,15 @@ func TestSubresourceField(t *testing.T) { managedFields := deployment.GetManagedFields() if len(managedFields) != 2 { - t.Fatalf("Expected object to have 3 managed fields entries, got: %d", len(managedFields)) + t.Fatalf("Expected object to have 2 managed fields entries, got: %d", len(managedFields)) } if managedFields[0].Manager != "manager" || managedFields[0].Operation != "Apply" || managedFields[0].Subresource != "" { t.Fatalf(`Unexpected entry, got: %v`, managedFields[0]) } if managedFields[1].Manager != "manager" || managedFields[1].Operation != "Update" || - managedFields[1].Subresource != "status" || - string(managedFields[1].FieldsV1.Raw) != `{"f:status":{"f:unavailableReplicas":{}}}` { + managedFields[1].Subresource != "scale" || + string(managedFields[1].FieldsV1.Raw) != `{"f:spec":{"f:replicas":{}}}` { t.Fatalf(`Unexpected entry, got: %v`, managedFields[1]) } } diff --git a/test/integration/apiserver/apply/scale_test.go b/test/integration/apiserver/apply/scale_test.go new file mode 100644 index 00000000000..834af8741d1 --- /dev/null +++ b/test/integration/apiserver/apply/scale_test.go @@ -0,0 +1,459 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiserver + +import ( + "context" + "encoding/json" + "fmt" + "path" + "strings" + "testing" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + featuregatetesting "k8s.io/component-base/featuregate/testing" + deploymentstorage "k8s.io/kubernetes/pkg/registry/apps/deployment/storage" + replicasetstorage "k8s.io/kubernetes/pkg/registry/apps/replicaset/storage" + statefulsetstorage "k8s.io/kubernetes/pkg/registry/apps/statefulset/storage" + replicationcontrollerstorage "k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage" +) + +type scaleTest struct { + kind string + resource string + path string + validObj string +} + +func TestScaleAllResources(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)() + + _, client, closeFn := setup(t) + defer closeFn() + + tests := []scaleTest{ + { + kind: "Deployment", + resource: "deployments", + path: "/apis/apps/v1", + validObj: validAppsV1("Deployment"), + }, + { + kind: "StatefulSet", + resource: "statefulsets", + path: "/apis/apps/v1", + validObj: validAppsV1("StatefulSet"), + }, + { + kind: "ReplicaSet", + resource: "replicasets", + path: "/apis/apps/v1", + validObj: validAppsV1("ReplicaSet"), + }, + { + kind: "ReplicationController", + resource: "replicationcontrollers", + path: "/api/v1", + validObj: validV1ReplicationController(), + }, + } + + for _, test := range tests { + t.Run(test.kind, func(t *testing.T) { + validObject := []byte(test.validObj) + + // Create the object + _, err := client.CoreV1().RESTClient().Patch(types.ApplyPatchType). + AbsPath(test.path). + Namespace("default"). + Resource(test.resource). + Name("test"). + Param("fieldManager", "apply_test"). + Body(validObject). + Do(context.TODO()).Get() + if err != nil { + t.Fatalf("Failed to create object using apply: %v", err) + } + obj := retrieveObject(t, client, test.path, test.resource) + assertReplicasValue(t, obj, 1) + assertReplicasOwnership(t, obj, "apply_test") + + // Call scale subresource to update replicas + _, err = client.CoreV1().RESTClient(). + Patch(types.MergePatchType). + AbsPath(test.path). + Namespace("default"). + Resource(test.resource). + Name("test"). + SubResource("scale"). + Param("fieldManager", "scale_test"). + Body([]byte(`{"spec":{"replicas": 5}}`)). + Do(context.TODO()).Get() + if err != nil { + t.Fatalf("Failed to scale object: %v", err) + } + obj = retrieveObject(t, client, test.path, test.resource) + assertReplicasValue(t, obj, 5) + assertReplicasOwnership(t, obj, "scale_test") + + // Re-apply the original object, it should fail with conflict because replicas have changed + _, err = client.CoreV1().RESTClient().Patch(types.ApplyPatchType). + AbsPath(test.path). + Namespace("default"). + Resource(test.resource). + Name("test"). + Param("fieldManager", "apply_test"). + Body(validObject). + Do(context.TODO()).Get() + if !apierrors.IsConflict(err) { + t.Fatalf("Expected conflict when re-applying the original object, but got: %v", err) + } + + // Re-apply forcing the changes should succeed + _, err = client.CoreV1().RESTClient().Patch(types.ApplyPatchType). + AbsPath(test.path). + Namespace("default"). + Resource(test.resource). + Name("test"). + Param("fieldManager", "apply_test"). + Param("force", "true"). + Body(validObject). + Do(context.TODO()).Get() + if err != nil { + t.Fatalf("Error force-updating: %v", err) + } + obj = retrieveObject(t, client, test.path, test.resource) + assertReplicasValue(t, obj, 1) + assertReplicasOwnership(t, obj, "apply_test") + + // Run "Apply" with a scale object with a different number of replicas. It should generate a conflict. + _, err = client.CoreV1().RESTClient().Patch(types.ApplyPatchType). + AbsPath(test.path). + Namespace("default"). + Resource(test.resource). + SubResource("scale"). + Name("test"). + Param("fieldManager", "apply_scale"). + Body([]byte(`{"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"test","namespace":"default"},"spec":{"replicas":17}}`)). + Do(context.TODO()).Get() + if !apierrors.IsConflict(err) { + t.Fatalf("Expected conflict error but got: %v", err) + } + if !strings.Contains(err.Error(), "apply_test") { + t.Fatalf("Expected conflict with `apply_test` manager when but got: %v", err) + } + + // Same as before but force. Only the new manager should own .spec.replicas + _, err = client.CoreV1().RESTClient().Patch(types.ApplyPatchType). + AbsPath(test.path). + Namespace("default"). + Resource(test.resource). + SubResource("scale"). + Name("test"). + Param("fieldManager", "apply_scale"). + Param("force", "true"). + Body([]byte(`{"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"test","namespace":"default"},"spec":{"replicas":17}}`)). + Do(context.TODO()).Get() + if err != nil { + t.Fatalf("Error updating object by applying scale and forcing: %v ", err) + } + obj = retrieveObject(t, client, test.path, test.resource) + assertReplicasValue(t, obj, 17) + assertReplicasOwnership(t, obj, "apply_scale") + + // Replace scale object + _, err = client.CoreV1().RESTClient().Put(). + AbsPath(test.path). + Namespace("default"). + Resource(test.resource). + SubResource("scale"). + Name("test"). + Param("fieldManager", "replace_test"). + Body([]byte(`{"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"test","namespace":"default"},"spec":{"replicas":7}}`)). + Do(context.TODO()).Get() + if err != nil { + t.Fatalf("Error replacing object: %v", err) + } + obj = retrieveObject(t, client, test.path, test.resource) + assertReplicasValue(t, obj, 7) + assertReplicasOwnership(t, obj, "replace_test") + + // Apply the same number of replicas, both managers should own the field + _, err = client.CoreV1().RESTClient().Patch(types.ApplyPatchType). + AbsPath(test.path). + Namespace("default"). + Resource(test.resource). + SubResource("scale"). + Name("test"). + Param("fieldManager", "co_owning_test"). + Body([]byte(`{"kind":"Scale","apiVersion":"autoscaling/v1","metadata":{"name":"test","namespace":"default"},"spec":{"replicas":7}}`)). + Do(context.TODO()).Get() + if err != nil { + t.Fatalf("Error updating object: %v", err) + } + obj = retrieveObject(t, client, test.path, test.resource) + assertReplicasValue(t, obj, 7) + assertReplicasOwnership(t, obj, "replace_test", "co_owning_test") + + // Scaling again should make this manager the only owner of replicas + _, err = client.CoreV1().RESTClient().Patch(types.MergePatchType). + AbsPath(test.path). + Namespace("default"). + Resource(test.resource). + SubResource("scale"). + Name("test"). + Param("fieldManager", "scale_test"). + Body([]byte(`{"spec":{"replicas": 5}}`)). + Do(context.TODO()).Get() + if err != nil { + t.Fatalf("Error scaling object: %v", err) + } + obj = retrieveObject(t, client, test.path, test.resource) + assertReplicasValue(t, obj, 5) + assertReplicasOwnership(t, obj, "scale_test") + }) + } +} + +func TestScaleUpdateOnlyStatus(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)() + + _, client, closeFn := setup(t) + defer closeFn() + + resource := "deployments" + path := "/apis/apps/v1" + validObject := []byte(validAppsV1("Deployment")) + + // Create the object + _, err := client.CoreV1().RESTClient().Patch(types.ApplyPatchType). + AbsPath(path). + Namespace("default"). + Resource(resource). + Name("test"). + Param("fieldManager", "apply_test"). + Body(validObject). + Do(context.TODO()).Get() + if err != nil { + t.Fatalf("Failed to create object using apply: %v", err) + } + obj := retrieveObject(t, client, path, resource) + assertReplicasValue(t, obj, 1) + assertReplicasOwnership(t, obj, "apply_test") + + // Call scale subresource to update replicas + _, err = client.CoreV1().RESTClient(). + Patch(types.MergePatchType). + AbsPath(path). + Namespace("default"). + Resource(resource). + Name("test"). + SubResource("scale"). + Param("fieldManager", "scale_test"). + Body([]byte(`{"status":{"replicas": 42}}`)). + Do(context.TODO()).Get() + if err != nil { + t.Fatalf("Failed to scale object: %v", err) + } + obj = retrieveObject(t, client, path, resource) + assertReplicasValue(t, obj, 1) + assertReplicasOwnership(t, obj, "apply_test") +} + +func TestAllKnownVersionsAreInMappings(t *testing.T) { + cases := []struct { + groupKind schema.GroupKind + mappings fieldmanager.ResourcePathMappings + }{ + { + groupKind: schema.GroupKind{Group: "apps", Kind: "ReplicaSet"}, + mappings: replicasetstorage.ReplicasPathMappings(), + }, + { + groupKind: schema.GroupKind{Group: "apps", Kind: "StatefulSet"}, + mappings: statefulsetstorage.ReplicasPathMappings(), + }, + { + groupKind: schema.GroupKind{Group: "apps", Kind: "Deployment"}, + mappings: deploymentstorage.ReplicasPathMappings(), + }, + { + groupKind: schema.GroupKind{Group: "", Kind: "ReplicationController"}, + mappings: replicationcontrollerstorage.ReplicasPathMappings(), + }, + } + for _, c := range cases { + knownVersions := scheme.Scheme.VersionsForGroupKind(c.groupKind) + for _, version := range knownVersions { + if _, ok := c.mappings[version.String()]; !ok { + t.Errorf("missing version %v for %v mappings", version, c.groupKind) + } + } + + if len(knownVersions) != len(c.mappings) { + t.Errorf("%v mappings has extra items: %v vs %v", c.groupKind, c.mappings, knownVersions) + } + } +} + +func validAppsV1(kind string) string { + return fmt.Sprintf(`{ + "apiVersion": "apps/v1", + "kind": "%s", + "metadata": { + "name": "test" + }, + "spec": { + "replicas": 1, + "selector": { + "matchLabels": { + "app": "nginx" + } + }, + "template": { + "metadata": { + "labels": { + "app": "nginx" + } + }, + "spec": { + "containers": [{ + "name": "nginx", + "image": "nginx:latest" + }] + } + } + } + }`, kind) +} + +func validV1ReplicationController() string { + return `{ + "apiVersion": "v1", + "kind": "ReplicationController", + "metadata": { + "name": "test" + }, + "spec": { + "replicas": 1, + "selector": { + "app": "nginx" + }, + "template": { + "metadata": { + "labels": { + "app": "nginx" + } + }, + "spec": { + "containers": [{ + "name": "nginx", + "image": "nginx:latest" + }] + } + } + } + }` +} + +func retrieveObject(t *testing.T, client clientset.Interface, prefix, resource string) *unstructured.Unstructured { + t.Helper() + + urlPath := path.Join(prefix, "namespaces", "default", resource, "test") + bytes, err := client.CoreV1().RESTClient().Get().AbsPath(urlPath).DoRaw(context.TODO()) + if err != nil { + t.Fatalf("Failed to retrieve object: %v", err) + } + obj := &unstructured.Unstructured{} + if err := json.Unmarshal(bytes, obj); err != nil { + t.Fatalf("Error unmarshalling the retrieved object: %v", err) + } + return obj +} + +func assertReplicasValue(t *testing.T, obj *unstructured.Unstructured, value int) { + actualValue, found, err := unstructured.NestedInt64(obj.Object, "spec", "replicas") + + if err != nil { + t.Fatalf("Error when retriving replicas field: %v", err) + } + if !found { + t.Fatalf("Replicas field not found") + } + + if int(actualValue) != value { + t.Fatalf("Expected replicas field value to be %d but got %d", value, actualValue) + } +} + +func assertReplicasOwnership(t *testing.T, obj *unstructured.Unstructured, fieldManagers ...string) { + t.Helper() + + accessor, err := meta.Accessor(obj) + if err != nil { + t.Fatalf("Failed to get meta accessor for object: %v", err) + } + + seen := make(map[string]bool) + for _, m := range fieldManagers { + seen[m] = false + } + + for _, managedField := range accessor.GetManagedFields() { + var entryJSON map[string]interface{} + if err := json.Unmarshal(managedField.FieldsV1.Raw, &entryJSON); err != nil { + t.Fatalf("failed to read into json") + } + + spec, ok := entryJSON["f:spec"].(map[string]interface{}) + if !ok { + // continue with the next managedField, as we this field does not hold the spec entry + continue + } + + if _, ok := spec["f:replicas"]; !ok { + // continue with the next managedField, as we this field does not hold the spec.replicas entry + continue + } + + // check if the manager is one of the ones we expect + if _, ok := seen[managedField.Manager]; !ok { + t.Fatalf("Unexpected field manager, found %q, expected to be in: %v", managedField.Manager, seen) + } + + seen[managedField.Manager] = true + } + + var missingManagers []string + for manager, managerSeen := range seen { + if !managerSeen { + missingManagers = append(missingManagers, manager) + } + } + if len(missingManagers) > 0 { + t.Fatalf("replicas fields should be owned by %v", missingManagers) + } +}