mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 02:11:09 +00:00
add kubectl server-side apply migrate managedfields
in discussion with SIG, there is a strong interest in keeping the last-applied-configuration around for a bit longer as other tools transition for of it. This is OK since SSA maintains the annotation on kubectl's behalf on the server-side if it exists migrate client-side-apply fields to SSA when --serverside-side is used https://github.com/kubernetes/kubernetes/issues/107980 https://github.com/kubernetes/kubernetes/issues/108081 https://github.com/kubernetes/kubernetes/issues/107417 https://github.com/kubernetes/kubernetes/issues/112826 add test to make sure only one apply is needed after migration
This commit is contained in:
parent
26a6e12348
commit
33b9552e70
@ -42,6 +42,7 @@ require (
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2
|
||||
sigs.k8s.io/kustomize/kustomize/v4 v4.5.7
|
||||
sigs.k8s.io/kustomize/kyaml v0.13.9
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3
|
||||
sigs.k8s.io/yaml v1.3.0
|
||||
)
|
||||
|
||||
@ -87,7 +88,6 @@ require (
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.12.1 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -36,6 +37,7 @@ import (
|
||||
"k8s.io/cli-runtime/pkg/printers"
|
||||
"k8s.io/cli-runtime/pkg/resource"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/util/csaupgrade"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubectl/pkg/cmd/delete"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
@ -163,6 +165,9 @@ var (
|
||||
|
||||
warningNoLastAppliedConfigAnnotation = "Warning: resource %[1]s is missing the %[2]s annotation which is required by %[3]s apply. %[3]s apply should only be used on resources created declaratively by either %[3]s create --save-config or %[3]s apply. The missing annotation will be patched automatically.\n"
|
||||
warningChangesOnDeletingResource = "Warning: Detected changes to resource %[1]s which is currently being deleted.\n"
|
||||
warningMigrationLastAppliedFailed = "Warning: failed to migrate kubectl.kubernetes.io/last-applied-configuration for Server-Side Apply. This is non-fatal and will be retried next time you apply. Error: %[1]s\n"
|
||||
warningMigrationPatchFailed = "Warning: server rejected managed fields migration to Server-Side Apply. This is non-fatal and will be retried next time you apply. Error: %[1]s\n"
|
||||
warningMigrationReapplyFailed = "Warning: failed to re-apply configuration after performing Server-Side Apply migration. This is non-fatal and will be retried next time you apply. Error: %[1]s\n"
|
||||
)
|
||||
|
||||
// NewApplyFlags returns a default ApplyFlags
|
||||
@ -542,6 +547,40 @@ See https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts`
|
||||
|
||||
info.Refresh(obj, true)
|
||||
|
||||
// Migrate managed fields if necessary.
|
||||
//
|
||||
// By checking afterward instead of fetching the object beforehand and
|
||||
// unconditionally fetching we can make 3 network requests in the rare
|
||||
// case of migration and 1 request if migration is unnecessary.
|
||||
//
|
||||
// To check beforehand means 2 requests for most operations, and 3
|
||||
// requests in worst case.
|
||||
if err = o.saveLastApplyAnnotationIfNecessary(helper, info); err != nil {
|
||||
fmt.Fprintf(o.ErrOut, warningMigrationLastAppliedFailed, err.Error())
|
||||
} else if performedMigration, err := o.migrateToSSAIfNecessary(helper, info); err != nil {
|
||||
// Print-error as a warning.
|
||||
// This is a non-fatal error because object was successfully applied
|
||||
// above, but it might have issues since migration failed.
|
||||
//
|
||||
// This migration will be re-attempted if necessary upon next
|
||||
// apply.
|
||||
fmt.Fprintf(o.ErrOut, warningMigrationPatchFailed, err.Error())
|
||||
} else if performedMigration {
|
||||
if obj, err = helper.Patch(
|
||||
info.Namespace,
|
||||
info.Name,
|
||||
types.ApplyPatchType,
|
||||
data,
|
||||
&options,
|
||||
); err != nil {
|
||||
// Re-send original SSA patch (this will allow dropped fields to
|
||||
// finally be removed)
|
||||
fmt.Fprintf(o.ErrOut, warningMigrationReapplyFailed, err.Error())
|
||||
} else {
|
||||
info.Refresh(obj, false)
|
||||
}
|
||||
}
|
||||
|
||||
WarnIfDeleting(info.Object, o.ErrOut)
|
||||
|
||||
if err := o.MarkObjectVisited(info); err != nil {
|
||||
@ -660,6 +699,183 @@ See https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts`
|
||||
return nil
|
||||
}
|
||||
|
||||
// Saves the last-applied-configuration annotation in a separate SSA field manager
|
||||
// to prevent it from being dropped by users who have transitioned to SSA.
|
||||
//
|
||||
// If this operation is not performed, then the last-applied-configuration annotation
|
||||
// would be removed from the object upon the first SSA usage. We want to keep it
|
||||
// around for a few releases since it is required to downgrade to
|
||||
// SSA per [1] and [2]. This code should be removed once the annotation is
|
||||
// deprecated.
|
||||
//
|
||||
// - [1] https://kubernetes.io/docs/reference/using-api/server-side-apply/#downgrading-from-server-side-apply-to-client-side-apply
|
||||
// - [2] https://github.com/kubernetes/kubernetes/pull/90187
|
||||
//
|
||||
// If the annotation is not already present, or if it is already managed by the
|
||||
// separate SSA fieldmanager, this is a no-op.
|
||||
func (o *ApplyOptions) saveLastApplyAnnotationIfNecessary(
|
||||
helper *resource.Helper,
|
||||
info *resource.Info,
|
||||
) error {
|
||||
if o.FieldManager != fieldManagerServerSideApply {
|
||||
// There is no point in preserving the annotation if the field manager
|
||||
// will not remain default. This is because the server will not keep
|
||||
// the annotation up to date.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send an apply patch with the last-applied-annotation
|
||||
// so that it is not orphaned by SSA in the following patch:
|
||||
accessor, err := meta.Accessor(info.Object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the current annotations from the object.
|
||||
annots := accessor.GetAnnotations()
|
||||
if annots == nil {
|
||||
annots = map[string]string{}
|
||||
}
|
||||
|
||||
fieldManager := fieldManagerLastAppliedAnnotation
|
||||
originalAnnotation, hasAnnotation := annots[corev1.LastAppliedConfigAnnotation]
|
||||
|
||||
// If the annotation does not already exist, we do not do anything
|
||||
if !hasAnnotation {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If there is already an SSA field manager which owns the field, then there
|
||||
// is nothing to do here.
|
||||
if owners := csaupgrade.FindFieldsOwners(
|
||||
accessor.GetManagedFields(),
|
||||
metav1.ManagedFieldsOperationApply,
|
||||
lastAppliedAnnotationFieldPath,
|
||||
); len(owners) > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
justAnnotation := &unstructured.Unstructured{}
|
||||
justAnnotation.SetGroupVersionKind(info.Mapping.GroupVersionKind)
|
||||
justAnnotation.SetName(accessor.GetName())
|
||||
justAnnotation.SetNamespace(accessor.GetNamespace())
|
||||
justAnnotation.SetAnnotations(map[string]string{
|
||||
corev1.LastAppliedConfigAnnotation: originalAnnotation,
|
||||
})
|
||||
|
||||
modified, err := runtime.Encode(unstructured.UnstructuredJSONScheme, justAnnotation)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
helperCopy := *helper
|
||||
newObj, err := helperCopy.WithFieldManager(fieldManager).Patch(
|
||||
info.Namespace,
|
||||
info.Name,
|
||||
types.ApplyPatchType,
|
||||
modified,
|
||||
nil,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return info.Refresh(newObj, false)
|
||||
}
|
||||
|
||||
// Check if the returned object needs to have its kubectl-client-side-apply
|
||||
// managed fields migrated server-side-apply.
|
||||
//
|
||||
// field ownership metadata is stored in three places:
|
||||
// - server-side managed fields
|
||||
// - client-side managed fields
|
||||
// - and the last_applied_configuration annotation.
|
||||
//
|
||||
// The migration merges the client-side-managed fields into the
|
||||
// server-side-managed fields, leaving the last_applied_configuration
|
||||
// annotation in place. Server will keep the annotation up to date
|
||||
// after every server-side-apply where the following conditions are ment:
|
||||
//
|
||||
// 1. field manager is 'kubectl'
|
||||
// 2. annotation already exists
|
||||
func (o *ApplyOptions) migrateToSSAIfNecessary(
|
||||
helper *resource.Helper,
|
||||
info *resource.Info,
|
||||
) (migrated bool, err error) {
|
||||
accessor, err := meta.Accessor(info.Object)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// To determine which field managers were used by kubectl for client-side-apply
|
||||
// we search for a manager used in `Update` operations which owns the
|
||||
// last-applied-annotation.
|
||||
//
|
||||
// This is the last client-side-apply manager which changed the field.
|
||||
//
|
||||
// There may be multiple owners if multiple managers wrote the same exact
|
||||
// configuration. In this case there are multiple owners, we want to migrate
|
||||
// them all.
|
||||
csaManagers := csaupgrade.FindFieldsOwners(
|
||||
accessor.GetManagedFields(),
|
||||
metav1.ManagedFieldsOperationUpdate,
|
||||
lastAppliedAnnotationFieldPath)
|
||||
|
||||
managerNames := sets.New[string]()
|
||||
for _, entry := range csaManagers {
|
||||
managerNames.Insert(entry.Manager)
|
||||
}
|
||||
|
||||
// Re-attempt patch as many times as it is conflicting due to ResourceVersion
|
||||
// test failing
|
||||
for i := 0; i < maxPatchRetry; i++ {
|
||||
var patchData []byte
|
||||
var obj runtime.Object
|
||||
|
||||
patchData, err = csaupgrade.UpgradeManagedFieldsPatch(
|
||||
info.Object, managerNames, o.FieldManager)
|
||||
|
||||
if err != nil {
|
||||
// If patch generation failed there was likely a bug.
|
||||
return false, err
|
||||
} else if patchData == nil {
|
||||
// nil patch data means nothing to do - object is already migrated
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Send the patch to upgrade the managed fields if it is non-nil
|
||||
obj, err = helper.Patch(
|
||||
info.Namespace,
|
||||
info.Name,
|
||||
types.JSONPatchType,
|
||||
patchData,
|
||||
nil,
|
||||
)
|
||||
|
||||
if err == nil {
|
||||
// Stop retrying upon success.
|
||||
info.Refresh(obj, false)
|
||||
return true, nil
|
||||
} else if !errors.IsConflict(err) {
|
||||
// Only retry if there was a conflict
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Refresh the object for next iteration
|
||||
err = info.Get()
|
||||
if err != nil {
|
||||
// If there was an error fetching, return error
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// Reaching this point with non-nil error means there was a conflict and
|
||||
// max retries was hit
|
||||
// Return the last error witnessed (which will be a conflict)
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (o *ApplyOptions) shouldPrintObject() bool {
|
||||
// Print object only if output format other than "name" is specified
|
||||
shouldPrint := false
|
||||
@ -766,6 +982,16 @@ const (
|
||||
// for backward compatibility to not conflict with old versions
|
||||
// of kubectl server-side apply where `kubectl` has already been the field manager.
|
||||
fieldManagerServerSideApply = "kubectl"
|
||||
|
||||
fieldManagerLastAppliedAnnotation = "kubectl-last-applied"
|
||||
)
|
||||
|
||||
var (
|
||||
lastAppliedAnnotationFieldPath = fieldpath.NewSet(
|
||||
fieldpath.MakePathOrDie(
|
||||
"metadata", "annotations",
|
||||
corev1.LastAppliedConfigAnnotation),
|
||||
)
|
||||
)
|
||||
|
||||
// GetApplyFieldManagerFlag gets the field manager for kubectl apply
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
|
||||
openapi_v2 "github.com/google/gnostic/openapiv2"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@ -40,6 +41,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
sptest "k8s.io/apimachinery/pkg/util/strategicpatch/testing"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/cli-runtime/pkg/resource"
|
||||
@ -47,6 +49,7 @@ import (
|
||||
dynamicfakeclient "k8s.io/client-go/dynamic/fake"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/rest/fake"
|
||||
"k8s.io/client-go/util/csaupgrade"
|
||||
cmdtesting "k8s.io/kubectl/pkg/cmd/testing"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/scheme"
|
||||
@ -185,6 +188,7 @@ const (
|
||||
filenameRCLastAppliedArgs = "../../../testdata/apply/rc-lastapplied-args.yaml"
|
||||
filenameRCNoAnnotation = "../../../testdata/apply/rc-no-annotation.yaml"
|
||||
filenameRCLASTAPPLIED = "../../../testdata/apply/rc-lastapplied.yaml"
|
||||
filenameRCManagedFieldsLA = "../../../testdata/apply/rc-managedfields-lastapplied.yaml"
|
||||
filenameSVC = "../../../testdata/apply/service.yaml"
|
||||
filenameRCSVC = "../../../testdata/apply/rc-service.yaml"
|
||||
filenameNoExistRC = "../../../testdata/apply/rc-noexist.yaml"
|
||||
@ -710,6 +714,157 @@ func TestApplyPruneObjects(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that apply of object in need of CSA migration results in a call
|
||||
// to patch it.
|
||||
func TestApplyCSAMigration(t *testing.T) {
|
||||
cmdtesting.InitTestErrorHandler(t)
|
||||
nameRC, rcWithManagedFields := readAndAnnotateReplicationController(t, filenameRCManagedFieldsLA)
|
||||
pathRC := "/namespaces/test/replicationcontrollers/" + nameRC
|
||||
|
||||
tf := cmdtesting.NewTestFactory().WithNamespace("test")
|
||||
defer tf.Cleanup()
|
||||
|
||||
// The object after patch should be equivalent to the output of
|
||||
// csaupgrade.UpgradeManagedFields
|
||||
//
|
||||
// Parse object into unstructured, apply patch
|
||||
postPatchObj := &unstructured.Unstructured{}
|
||||
err := json.Unmarshal(rcWithManagedFields, &postPatchObj.Object)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedPatch, err := csaupgrade.UpgradeManagedFieldsPatch(postPatchObj, sets.New(FieldManagerClientSideApply), "kubectl")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = csaupgrade.UpgradeManagedFields(postPatchObj, sets.New("kubectl-client-side-apply"), "kubectl")
|
||||
require.NoError(t, err)
|
||||
|
||||
postPatchData, err := json.Marshal(postPatchObj)
|
||||
require.NoError(t, err)
|
||||
|
||||
patches := 0
|
||||
targetPatches := 2
|
||||
applies := 0
|
||||
|
||||
tf.UnstructuredClient = &fake.RESTClient{
|
||||
NegotiatedSerializer: resource.UnstructuredPlusDefaultContentConfig().NegotiatedSerializer,
|
||||
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
|
||||
switch p, m := req.URL.Path, req.Method; {
|
||||
case p == pathRC && m == "GET":
|
||||
// During retry loop for patch fetch is performed.
|
||||
// keep returning the unchanged data
|
||||
if patches < targetPatches {
|
||||
bodyRC := ioutil.NopCloser(bytes.NewReader(rcWithManagedFields))
|
||||
return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: bodyRC}, nil
|
||||
}
|
||||
|
||||
t.Fatalf("should not do a fetch in serverside-apply")
|
||||
return nil, nil
|
||||
case p == pathRC && m == "PATCH":
|
||||
if got := req.Header.Get("Content-Type"); got == string(types.ApplyPatchType) {
|
||||
defer func() {
|
||||
applies += 1
|
||||
}()
|
||||
|
||||
switch applies {
|
||||
case 0:
|
||||
// initial apply.
|
||||
// Just return the same object but with managed fields
|
||||
bodyRC := ioutil.NopCloser(bytes.NewReader(rcWithManagedFields))
|
||||
return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: bodyRC}, nil
|
||||
case 1:
|
||||
// Second apply should include only last apply annotation unmodified
|
||||
// Return new object
|
||||
// NOTE: on a real server this would also modify the managed fields
|
||||
// just return the same object unmodified. It is not so important
|
||||
// for this test for the last-applied to appear in new field
|
||||
// manager response, only that the client asks the server to do it
|
||||
bodyRC := ioutil.NopCloser(bytes.NewReader(rcWithManagedFields))
|
||||
return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: bodyRC}, nil
|
||||
case 2, 3:
|
||||
// Before the last apply we have formed our JSONPAtch so it
|
||||
// should reply now with the upgraded object
|
||||
bodyRC := ioutil.NopCloser(bytes.NewReader(postPatchData))
|
||||
return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: bodyRC}, nil
|
||||
default:
|
||||
require.Fail(t, "sent more apply requests than expected")
|
||||
return &http.Response{StatusCode: http.StatusBadRequest, Header: cmdtesting.DefaultHeader()}, nil
|
||||
}
|
||||
} else if got == string(types.JSONPatchType) {
|
||||
defer func() {
|
||||
patches += 1
|
||||
}()
|
||||
|
||||
// Require that the patch is equal to what is expected
|
||||
body, err := ioutil.ReadAll(req.Body)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedPatch, body)
|
||||
|
||||
switch patches {
|
||||
case targetPatches - 1:
|
||||
bodyRC := ioutil.NopCloser(bytes.NewReader(postPatchData))
|
||||
return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: bodyRC}, nil
|
||||
default:
|
||||
// Return conflict until the client has retried enough times
|
||||
return &http.Response{StatusCode: http.StatusConflict, Header: cmdtesting.DefaultHeader()}, nil
|
||||
|
||||
}
|
||||
} else {
|
||||
t.Fatalf("unexpected content-type: %s\n", got)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
default:
|
||||
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
|
||||
return nil, nil
|
||||
}
|
||||
}),
|
||||
}
|
||||
tf.OpenAPISchemaFunc = FakeOpenAPISchema.OpenAPISchemaFn
|
||||
tf.FakeOpenAPIGetter = FakeOpenAPISchema.OpenAPIGetter
|
||||
tf.ClientConfigVal = cmdtesting.DefaultClientConfig()
|
||||
|
||||
ioStreams, _, buf, errBuf := genericclioptions.NewTestIOStreams()
|
||||
cmd := NewCmdApply("kubectl", tf, ioStreams)
|
||||
cmd.Flags().Set("filename", filenameRC)
|
||||
cmd.Flags().Set("output", "yaml")
|
||||
cmd.Flags().Set("server-side", "true")
|
||||
cmd.Flags().Set("show-managed-fields", "true")
|
||||
cmd.Run(cmd, []string{})
|
||||
|
||||
// JSONPatch should have been attempted exactly the given number of times
|
||||
require.Equal(t, targetPatches, patches, "should retry as many times as a conflict was returned")
|
||||
require.Equal(t, 3, applies, "should perform specified # of apply calls upon migration")
|
||||
require.Empty(t, errBuf.String())
|
||||
|
||||
// ensure that in the future there will be no migrations necessary
|
||||
// (by showing migration is a no-op)
|
||||
|
||||
rc := &corev1.ReplicationController{}
|
||||
if err := runtime.DecodeInto(codec, buf.Bytes(), rc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
upgradedRC := rc.DeepCopyObject()
|
||||
err = csaupgrade.UpgradeManagedFields(upgradedRC, sets.New("kubectl-client-side-apply"), "kubectl")
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, rc.ManagedFields)
|
||||
require.Equal(t, rc, upgradedRC, "upgrading should be no-op in future")
|
||||
|
||||
// Apply the upgraded object.
|
||||
// Expect only a single PATCH call to apiserver
|
||||
ioStreams, _, _, errBuf = genericclioptions.NewTestIOStreams()
|
||||
cmd = NewCmdApply("kubectl", tf, ioStreams)
|
||||
cmd.Flags().Set("filename", filenameRC)
|
||||
cmd.Flags().Set("output", "yaml")
|
||||
cmd.Flags().Set("server-side", "true")
|
||||
cmd.Flags().Set("show-managed-fields", "true")
|
||||
cmd.Run(cmd, []string{})
|
||||
|
||||
require.Empty(t, errBuf)
|
||||
require.Equal(t, 4, applies, "only a single call to server-side apply should have been performed")
|
||||
require.Equal(t, targetPatches, patches, "no more json patches should have been needed")
|
||||
}
|
||||
|
||||
func TestApplyObjectOutput(t *testing.T) {
|
||||
cmdtesting.InitTestErrorHandler(t)
|
||||
nameRC, currentRC := readAndAnnotateReplicationController(t, filenameRC)
|
||||
|
102
staging/src/k8s.io/kubectl/testdata/apply/rc-managedfields-lastapplied.yaml
vendored
Normal file
102
staging/src/k8s.io/kubectl/testdata/apply/rc-managedfields-lastapplied.yaml
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
annotations:
|
||||
kubectl.kubernetes.io/last-applied-configuration: |
|
||||
{"apiVersion":"v1","kind":"ReplicationController","metadata":{"annotations":{},"labels":{"name":"test-rc"},"name":"test-rc","namespace":"test"},"spec":{"replicas":1,"template":{"metadata":{"labels":{"name":"test-rc"}},"spec":{"containers":[{"image":"nginx","name":"test-rc","ports":[{"containerPort":80}]}]}}}}
|
||||
creationTimestamp: "2022-10-06T20:46:22Z"
|
||||
generation: 1
|
||||
labels:
|
||||
name: test-rc
|
||||
managedFields:
|
||||
- apiVersion: v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
f:status:
|
||||
f:fullyLabeledReplicas: {}
|
||||
f:observedGeneration: {}
|
||||
f:replicas: {}
|
||||
manager: kube-controller-manager
|
||||
operation: Update
|
||||
subresource: status
|
||||
time: "2022-10-06T20:46:22Z"
|
||||
- apiVersion: v1
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
f:metadata:
|
||||
f:annotations:
|
||||
.: {}
|
||||
f:kubectl.kubernetes.io/last-applied-configuration: {}
|
||||
f:labels:
|
||||
.: {}
|
||||
f:name: {}
|
||||
f:spec:
|
||||
f:replicas: {}
|
||||
f:selector: {}
|
||||
f:template:
|
||||
.: {}
|
||||
f:metadata:
|
||||
.: {}
|
||||
f:creationTimestamp: {}
|
||||
f:labels:
|
||||
.: {}
|
||||
f:name: {}
|
||||
f:spec:
|
||||
.: {}
|
||||
f:containers:
|
||||
.: {}
|
||||
k:{"name":"test-rc"}:
|
||||
.: {}
|
||||
f:image: {}
|
||||
f:imagePullPolicy: {}
|
||||
f:name: {}
|
||||
f:ports:
|
||||
.: {}
|
||||
k:{"containerPort":80,"protocol":"TCP"}:
|
||||
.: {}
|
||||
f:containerPort: {}
|
||||
f:protocol: {}
|
||||
f:resources: {}
|
||||
f:terminationMessagePath: {}
|
||||
f:terminationMessagePolicy: {}
|
||||
f:dnsPolicy: {}
|
||||
f:restartPolicy: {}
|
||||
f:schedulerName: {}
|
||||
f:securityContext: {}
|
||||
f:terminationGracePeriodSeconds: {}
|
||||
manager: kubectl-client-side-apply
|
||||
operation: Update
|
||||
time: "2022-10-06T20:46:22Z"
|
||||
name: test-rc
|
||||
namespace: test
|
||||
resourceVersion: "290"
|
||||
uid: ad68b34c-d6c5-4d09-b50d-ef49c109778d
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
name: test-rc
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
name: test-rc
|
||||
spec:
|
||||
containers:
|
||||
- image: nginx
|
||||
imagePullPolicy: Always
|
||||
name: test-rc
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
status:
|
||||
fullyLabeledReplicas: 1
|
||||
observedGeneration: 1
|
||||
replicas: 1
|
@ -438,6 +438,165 @@ run_kubectl_server_side_apply_tests() {
|
||||
# clean-up
|
||||
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
||||
|
||||
# Test apply migration
|
||||
|
||||
# Create a configmap in the cluster with client-side apply:
|
||||
output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side=false -f - << __EOF__
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: test
|
||||
data:
|
||||
key: value
|
||||
legacy: unused
|
||||
__EOF__
|
||||
)
|
||||
|
||||
kube::test::if_has_string "${output_message}" 'configmap/test created'
|
||||
|
||||
# Apply the same manifest with --server-side flag, as per server-side-apply migration instructions:
|
||||
output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side -f - << __EOF__
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: test
|
||||
data:
|
||||
key: value
|
||||
legacy: unused
|
||||
__EOF__
|
||||
)
|
||||
|
||||
kube::test::if_has_string "${output_message}" 'configmap/test serverside-applied'
|
||||
|
||||
# Apply the object a third time using server-side-apply, but this time removing
|
||||
# a field and adding a field. Old versions of kubectl would not allow the field
|
||||
# to be removed
|
||||
output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side -f - << __EOF__
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: test
|
||||
data:
|
||||
key: value
|
||||
ssaKey: ssaValue
|
||||
__EOF__
|
||||
)
|
||||
|
||||
kube::test::if_has_string "${output_message}" 'configmap/test serverside-applied'
|
||||
|
||||
# Fetch the object and check to see that it does not have a field 'legacy'
|
||||
kube::test::get_object_assert "configmap test" "{{ .data.key }}" 'value'
|
||||
kube::test::get_object_assert "configmap test" "{{ .data.legacy }}" '<no value>'
|
||||
kube::test::get_object_assert "configmap test" "{{ .data.ssaKey }}" 'ssaValue'
|
||||
|
||||
# CSA the object after it has been server-side-applied and had a field removed
|
||||
# Add new key with client-side-apply. Also removes the field from server-side-apply
|
||||
output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side=false -f - << __EOF__
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: test
|
||||
data:
|
||||
key: value
|
||||
newKey: newValue
|
||||
__EOF__
|
||||
)
|
||||
|
||||
kube::test::get_object_assert "configmap test" "{{ .data.key }}" 'value'
|
||||
kube::test::get_object_assert "configmap test" "{{ .data.newKey }}" 'newValue'
|
||||
kube::test::get_object_assert "configmap test" "{{ .data.ssaKey }}" '<no value>'
|
||||
|
||||
# SSA the object without the field added above by CSA. Show that the object
|
||||
# on the server has had the field removed
|
||||
output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side -f - << __EOF__
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: test
|
||||
data:
|
||||
key: value
|
||||
ssaKey: ssaValue
|
||||
__EOF__
|
||||
)
|
||||
|
||||
# Fetch the object and check to see that it does not have a field 'newKey'
|
||||
kube::test::get_object_assert "configmap test" "{{ .data.key }}" 'value'
|
||||
kube::test::get_object_assert "configmap test" "{{ .data.newKey }}" '<no value>'
|
||||
kube::test::get_object_assert "configmap test" "{{ .data.ssaKey }}" 'ssaValue'
|
||||
|
||||
# Show that kubectl diff --server-side also functions after a migration
|
||||
output_message=$(kubectl diff "${kube_flags[@]:?}" --server-side -f - << __EOF__ || test $? -eq 1
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: test
|
||||
annotations:
|
||||
newAnnotation: newValue
|
||||
data:
|
||||
key: value
|
||||
newKey: newValue
|
||||
__EOF__
|
||||
)
|
||||
kube::test::if_has_string "${output_message}" '+ newKey: newValue'
|
||||
kube::test::if_has_string "${output_message}" '+ newAnnotation: newValue'
|
||||
|
||||
# clean-up
|
||||
kubectl "${kube_flags[@]:?}" delete configmap test
|
||||
|
||||
## Test to show that supplying a custom field manager to kubectl apply
|
||||
# does not prevent migration from client-side-apply to server-side-apply
|
||||
output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side=false --field-manager=myfm -f - << __EOF__
|
||||
apiVersion: v1
|
||||
data:
|
||||
key: value1
|
||||
legacy: value2
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ssa-test
|
||||
__EOF__
|
||||
)
|
||||
kube::test::if_has_string "$output_message" "configmap/ssa-test created"
|
||||
kube::test::get_object_assert "configmap ssa-test" "{{ .data.key }}" 'value1'
|
||||
|
||||
# show that after client-side applying with a custom field manager, the
|
||||
# last-applied-annotation is present
|
||||
grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get configmap ssa-test -o yaml "${kube_flags[@]:?}")"
|
||||
|
||||
# Migrate to server-side-apply by applying the same object
|
||||
output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side=true --field-manager=myfm -f - << __EOF__
|
||||
apiVersion: v1
|
||||
data:
|
||||
key: value1
|
||||
legacy: value2
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ssa-test
|
||||
__EOF__
|
||||
)
|
||||
kube::test::if_has_string "$output_message" "configmap/ssa-test serverside-applied"
|
||||
kube::test::get_object_assert "configmap ssa-test" "{{ .data.key }}" 'value1'
|
||||
|
||||
# show that after migrating to SSA with a custom field manager, the
|
||||
# last-applied-annotation is dropped
|
||||
! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get configmap ssa-test -o yaml "${kube_flags[@]:?}")" || exit 1
|
||||
|
||||
# Change a field without having any conflict and also drop a field in the same patch
|
||||
output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side=true --field-manager=myfm -f - << __EOF__
|
||||
apiVersion: v1
|
||||
data:
|
||||
key: value2
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ssa-test
|
||||
__EOF__
|
||||
)
|
||||
kube::test::if_has_string "$output_message" "configmap/ssa-test serverside-applied"
|
||||
kube::test::get_object_assert "configmap ssa-test" "{{ .data.key }}" 'value2'
|
||||
kube::test::get_object_assert "configmap ssa-test" "{{ .data.legacy }}" '<no value>'
|
||||
|
||||
# Clean up
|
||||
kubectl delete configmap ssa-test
|
||||
|
||||
## kubectl apply dry-run on CR
|
||||
# Create CRD
|
||||
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
|
||||
|
Loading…
Reference in New Issue
Block a user