From 360c348d0eb5be8c7c9720d5bfda16dbe04e8d15 Mon Sep 17 00:00:00 2001 From: "Julian V. Modesto" Date: Wed, 4 Mar 2020 22:04:01 -0500 Subject: [PATCH] Set kubectl field manager names --- .../k8s.io/cli-runtime/pkg/resource/helper.go | 20 +++++++++ .../kubectl/pkg/cmd/annotate/annotate.go | 5 ++- .../src/k8s.io/kubectl/pkg/cmd/apply/apply.go | 44 ++++++++++++++++--- .../pkg/cmd/apply/apply_edit_last_applied.go | 1 + .../k8s.io/kubectl/pkg/cmd/apply/patcher.go | 2 +- .../kubectl/pkg/cmd/autoscale/autoscale.go | 5 +++ .../k8s.io/kubectl/pkg/cmd/create/create.go | 13 +++++- .../pkg/cmd/create/create_clusterrole.go | 5 +++ .../cmd/create/create_clusterrolebinding.go | 1 + .../pkg/cmd/create/create_configmap.go | 1 + .../kubectl/pkg/cmd/create/create_cronjob.go | 5 +++ .../pkg/cmd/create/create_deployment.go | 1 + .../kubectl/pkg/cmd/create/create_job.go | 6 ++- .../pkg/cmd/create/create_namespace.go | 1 + .../kubectl/pkg/cmd/create/create_pdb.go | 1 + .../pkg/cmd/create/create_priorityclass.go | 1 + .../kubectl/pkg/cmd/create/create_quota.go | 1 + .../kubectl/pkg/cmd/create/create_role.go | 6 ++- .../pkg/cmd/create/create_rolebinding.go | 1 + .../kubectl/pkg/cmd/create/create_secret.go | 3 ++ .../kubectl/pkg/cmd/create/create_service.go | 6 ++- .../pkg/cmd/create/create_serviceaccount.go | 1 + .../src/k8s.io/kubectl/pkg/cmd/diff/diff.go | 7 ++- .../src/k8s.io/kubectl/pkg/cmd/edit/edit.go | 2 +- .../k8s.io/kubectl/pkg/cmd/expose/expose.go | 4 ++ .../src/k8s.io/kubectl/pkg/cmd/label/label.go | 5 ++- .../src/k8s.io/kubectl/pkg/cmd/patch/patch.go | 5 ++- .../k8s.io/kubectl/pkg/cmd/replace/replace.go | 8 +++- .../kubectl/pkg/cmd/rollout/rollout_pause.go | 7 ++- .../pkg/cmd/rollout/rollout_restart.go | 7 ++- .../kubectl/pkg/cmd/rollout/rollout_resume.go | 7 ++- staging/src/k8s.io/kubectl/pkg/cmd/run/run.go | 3 ++ .../src/k8s.io/kubectl/pkg/cmd/set/set_env.go | 5 ++- .../k8s.io/kubectl/pkg/cmd/set/set_image.go | 3 ++ .../kubectl/pkg/cmd/set/set_resources.go | 3 ++ .../kubectl/pkg/cmd/set/set_selector.go | 3 ++ .../kubectl/pkg/cmd/set/set_serviceaccount.go | 3 ++ .../k8s.io/kubectl/pkg/cmd/set/set_subject.go | 3 ++ .../src/k8s.io/kubectl/pkg/cmd/taint/taint.go | 3 ++ .../pkg/cmd/util/editor/editoptions.go | 16 +++++-- .../k8s.io/kubectl/pkg/cmd/util/helpers.go | 5 ++- test/cmd/apply.sh | 10 +++++ test/cmd/apps.sh | 15 +++++++ test/cmd/core.sh | 33 ++++++++++++++ test/cmd/diff.sh | 26 ++++++++++- test/cmd/node-management.sh | 3 ++ 46 files changed, 287 insertions(+), 29 deletions(-) diff --git a/staging/src/k8s.io/cli-runtime/pkg/resource/helper.go b/staging/src/k8s.io/cli-runtime/pkg/resource/helper.go index beebd805440..e01a8af0271 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/resource/helper.go +++ b/staging/src/k8s.io/cli-runtime/pkg/resource/helper.go @@ -46,6 +46,10 @@ type Helper struct { // and on resources that support dry-run. If the apiserver or the resource // does not support dry-run, then the change will be persisted to storage. ServerDryRun bool + + // FieldManager is the name associated with the actor or entity that is making + // changes. + FieldManager string } // NewHelper creates a Helper from a ResourceMapping @@ -64,6 +68,13 @@ func (m *Helper) DryRun(dryRun bool) *Helper { return m } +// WithFieldManager sets the field manager option to indicate the actor or entity +// that is making changes in a create or update operation. +func (m *Helper) WithFieldManager(fieldManager string) *Helper { + m.FieldManager = fieldManager + return m +} + func (m *Helper) Get(namespace, name string, export bool) (runtime.Object, error) { req := m.RESTClient.Get(). NamespaceIfScoped(namespace, m.NamespaceScoped). @@ -141,6 +152,9 @@ func (m *Helper) CreateWithOptions(namespace string, modify bool, obj runtime.Ob if m.ServerDryRun { options.DryRun = []string{metav1.DryRunAll} } + if m.FieldManager != "" { + options.FieldManager = m.FieldManager + } if modify { // Attempt to version the object based on client logic. version, err := metadataAccessor.ResourceVersion(obj) @@ -174,6 +188,9 @@ func (m *Helper) Patch(namespace, name string, pt types.PatchType, data []byte, if m.ServerDryRun { options.DryRun = []string{metav1.DryRunAll} } + if m.FieldManager != "" { + options.FieldManager = m.FieldManager + } return m.RESTClient.Patch(pt). NamespaceIfScoped(namespace, m.NamespaceScoped). Resource(m.Resource). @@ -190,6 +207,9 @@ func (m *Helper) Replace(namespace, name string, overwrite bool, obj runtime.Obj if m.ServerDryRun { options.DryRun = []string{metav1.DryRunAll} } + if m.FieldManager != "" { + options.FieldManager = m.FieldManager + } // Attempt to version the object based on client logic. version, err := metadataAccessor.ResourceVersion(obj) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go b/staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go index ed4a0be2ba3..2140bd60a6c 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go @@ -55,6 +55,7 @@ type AnnotateOptions struct { local bool dryRunStrategy cmdutil.DryRunStrategy dryRunVerifier *resource.DryRunVerifier + fieldManager string all bool resourceVersion string selector string @@ -150,6 +151,7 @@ func NewCmdAnnotate(parent string, f cmdutil.Factory, ioStreams genericclioption usage := "identifying the resource to update the annotation" cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage) cmdutil.AddDryRunFlag(cmd) + cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-annotate") return cmd } @@ -329,7 +331,8 @@ func (o AnnotateOptions) RunAnnotate() error { } helper := resource. NewHelper(client, mapping). - DryRun(o.dryRunStrategy == cmdutil.DryRunServer) + DryRun(o.dryRunStrategy == cmdutil.DryRunServer). + WithFieldManager(o.fieldManager) if createdPatch { outputObj, err = helper.Patch(namespace, name, types.MergePatchType, patchBytes, nil) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go b/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go index edae087f3f8..6beb4e127d7 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go @@ -196,6 +196,7 @@ func NewCmdApply(baseName string, f cmdutil.Factory, ioStreams genericclioptions cmd.Flags().MarkHidden("server-dry-run") cmdutil.AddDryRunFlag(cmd) cmdutil.AddServerSideApplyFlags(cmd) + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, FieldManagerClientSideApply) // apply subcommands cmd.AddCommand(NewCmdApplyViewLastApplied(f, ioStreams)) @@ -223,7 +224,7 @@ func (o *ApplyOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { return err } o.DryRunVerifier = resource.NewDryRunVerifier(o.DynamicClient, discoveryClient) - o.FieldManager = cmdutil.GetFieldManagerFlag(cmd) + o.FieldManager = GetApplyFieldManagerFlag(cmd, o.ServerSideApply) if o.ForceConflicts && !o.ServerSideApply { return fmt.Errorf("--force-conflicts only works with --server-side") @@ -414,11 +415,10 @@ func (o *ApplyOptions) applyOneObject(info *resource.Info) error { } options := metav1.PatchOptions{ - Force: &o.ForceConflicts, - FieldManager: o.FieldManager, + Force: &o.ForceConflicts, } - - helper := resource.NewHelper(info.Client, info.Mapping) + helper := resource.NewHelper(info.Client, info.Mapping). + WithFieldManager(o.FieldManager) if o.DryRunStrategy == cmdutil.DryRunServer { if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { return err @@ -495,7 +495,8 @@ See http://k8s.io/docs/reference/using-api/api-concepts/#conflicts`, err) if o.DryRunStrategy != cmdutil.DryRunClient { // Then create the resource and skip the three-way merge - helper := resource.NewHelper(info.Client, info.Mapping) + helper := resource.NewHelper(info.Client, info.Mapping). + WithFieldManager(o.FieldManager) if o.DryRunStrategy == cmdutil.DryRunServer { if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { return cmdutil.AddSourceToErr("creating", info.Source, err) @@ -670,3 +671,34 @@ func (o *ApplyOptions) PrintAndPrunePostProcessor() func() error { return nil } } + +const ( + // FieldManagerClientSideApply is the default client-side apply field manager. + // + // The default field manager is not `kubectl-apply` to distinguish from + // server-side apply. + FieldManagerClientSideApply = "kubectl-client-side-apply" + // The default server-side apply field manager is `kubectl` + // instead of a field manager like `kubectl-server-side-apply` + // for backward compatibility to not conflict with old versions + // of kubectl server-side apply where `kubectl` has already been the field manager. + fieldManagerServerSideApply = "kubectl" +) + +// GetApplyFieldManagerFlag gets the field manager for kubectl apply +// if it is not set. +// +// The default field manager is not `kubectl-apply` to distinguish between +// client-side and server-side apply. +func GetApplyFieldManagerFlag(cmd *cobra.Command, serverSide bool) string { + // The field manager flag was set + if cmd.Flag("field-manager").Changed { + return cmdutil.GetFlagString(cmd, "field-manager") + } + + if serverSide { + return fieldManagerServerSideApply + } + + return FieldManagerClientSideApply +} diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_edit_last_applied.go b/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_edit_last_applied.go index b46dfa2b2e6..37e54c1af0b 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_edit_last_applied.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_edit_last_applied.go @@ -83,6 +83,7 @@ func NewCmdApplyEditLastApplied(f cmdutil.Factory, ioStreams genericclioptions.I cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage) cmd.Flags().BoolVar(&o.WindowsLineEndings, "windows-line-endings", o.WindowsLineEndings, "Defaults to the line ending native to your platform.") + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, FieldManagerClientSideApply) return cmd } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/apply/patcher.go b/staging/src/k8s.io/kubectl/pkg/cmd/apply/patcher.go index 5c57aa5bce2..c634fae20ae 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/apply/patcher.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/apply/patcher.go @@ -82,7 +82,7 @@ func newPatcher(o *ApplyOptions, info *resource.Info) (*Patcher, error) { return &Patcher{ Mapping: info.Mapping, - Helper: resource.NewHelper(info.Client, info.Mapping), + Helper: resource.NewHelper(info.Client, info.Mapping).WithFieldManager(o.FieldManager), DynamicClient: o.DynamicClient, Overwrite: o.Overwrite, BackOff: clockwork.NewRealClock(), diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go b/staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go index 50d48c7387c..ed60f37162b 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go @@ -79,6 +79,7 @@ type AutoscaleOptions struct { dryRunVerifier *resource.DryRunVerifier builder *resource.Builder generatorFunc func(string, *meta.RESTMapping) (generate.StructuredGenerator, error) + fieldManager string HPAClient autoscalingv1client.HorizontalPodAutoscalersGetter scaleKindResolver scale.ScaleKindResolver @@ -131,6 +132,7 @@ func NewCmdAutoscale(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) * cmdutil.AddDryRunFlag(cmd) cmdutil.AddFilenameOptionFlags(cmd, o.FilenameOptions, "identifying the resource to autoscale.") cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-autoscale") return cmd } @@ -272,6 +274,9 @@ func (o *AutoscaleOptions) Run() error { } createOptions := metav1.CreateOptions{} + if o.fieldManager != "" { + createOptions.FieldManager = o.fieldManager + } if o.dryRunStrategy == cmdutil.DryRunServer { if err := o.dryRunVerifier.HasSupport(hpa.GroupVersionKind()); err != nil { return err diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create.go index 71a0dc4016e..a6166187398 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create.go @@ -54,6 +54,8 @@ type CreateOptions struct { DryRunStrategy cmdutil.DryRunStrategy DryRunVerifier *resource.DryRunVerifier + fieldManager string + FilenameOptions resource.FilenameOptions Selector string EditBeforeCreate bool @@ -130,6 +132,7 @@ func NewCmdCreate(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cob cmdutil.AddDryRunFlag(cmd) cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)") cmd.Flags().StringVar(&o.Raw, "raw", o.Raw, "Raw URI to POST to the server. Uses the transport specified by the kubeconfig file.") + cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-create") o.PrintFlags.AddFlags(cmd) @@ -233,7 +236,7 @@ func (o *CreateOptions) RunCreate(f cmdutil.Factory, cmd *cobra.Command) error { } if o.EditBeforeCreate { - return RunEditOnCreate(f, o.PrintFlags, o.RecordFlags, o.IOStreams, cmd, &o.FilenameOptions) + return RunEditOnCreate(f, o.PrintFlags, o.RecordFlags, o.IOStreams, cmd, &o.FilenameOptions, o.fieldManager) } schema, err := f.Validator(cmdutil.GetFlagBool(cmd, "validate")) if err != nil { @@ -281,6 +284,7 @@ func (o *CreateOptions) RunCreate(f cmdutil.Factory, cmd *cobra.Command) error { obj, err := resource. NewHelper(info.Client, info.Mapping). DryRun(o.DryRunStrategy == cmdutil.DryRunServer). + WithFieldManager(o.fieldManager). Create(info.Namespace, true, info.Object) if err != nil { return cmdutil.AddSourceToErr("creating", info.Source, err) @@ -302,7 +306,7 @@ func (o *CreateOptions) RunCreate(f cmdutil.Factory, cmd *cobra.Command) error { } // RunEditOnCreate performs edit on creation -func RunEditOnCreate(f cmdutil.Factory, printFlags *genericclioptions.PrintFlags, recordFlags *genericclioptions.RecordFlags, ioStreams genericclioptions.IOStreams, cmd *cobra.Command, options *resource.FilenameOptions) error { +func RunEditOnCreate(f cmdutil.Factory, printFlags *genericclioptions.PrintFlags, recordFlags *genericclioptions.RecordFlags, ioStreams genericclioptions.IOStreams, cmd *cobra.Command, options *resource.FilenameOptions, fieldManager string) error { editOptions := editor.NewEditOptions(editor.EditBeforeCreateMode, ioStreams) editOptions.FilenameOptions = *options editOptions.ValidateOptions = cmdutil.ValidateOptions{ @@ -311,6 +315,7 @@ func RunEditOnCreate(f cmdutil.Factory, printFlags *genericclioptions.PrintFlags editOptions.PrintFlags = printFlags editOptions.ApplyAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) editOptions.RecordFlags = recordFlags + editOptions.FieldManager = "kubectl-create" err := editOptions.Complete(f, []string{}, cmd) if err != nil { @@ -343,6 +348,7 @@ type CreateSubcommandOptions struct { DryRunStrategy cmdutil.DryRunStrategy DryRunVerifier *resource.DryRunVerifier CreateAnnotation bool + FieldManager string Namespace string EnforceNamespace bool @@ -446,6 +452,9 @@ func (o *CreateSubcommandOptions) Run() error { o.Namespace = "" } createOptions := metav1.CreateOptions{} + if o.FieldManager != "" { + createOptions.FieldManager = o.FieldManager + } if o.DryRunStrategy == cmdutil.DryRunServer { if err := o.DryRunVerifier.HasSupport(mapping.GroupVersionKind); err != nil { return err diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go index a884b27bc42..113f8d8eb2a 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go @@ -64,6 +64,7 @@ type CreateClusterRoleOptions struct { *CreateRoleOptions NonResourceURLs []string AggregationRule map[string]string + FieldManager string } // NewCmdCreateClusterRole initializes and returns new ClusterRoles command @@ -95,6 +96,7 @@ func NewCmdCreateClusterRole(f cmdutil.Factory, ioStreams genericclioptions.IOSt cmd.Flags().StringSlice("resource", []string{}, "Resource that the rule applies to") cmd.Flags().StringArrayVar(&c.ResourceNames, "resource-name", c.ResourceNames, "Resource in the white list that the rule applies to, repeat this flag for multiple items") cmd.Flags().Var(cliflag.NewMapStringString(&c.AggregationRule), "aggregation-rule", "An aggregation label selector for combining ClusterRoles.") + cmdutil.AddFieldManagerFlagVar(cmd, &c.FieldManager, "kubectl-create") return cmd } @@ -202,6 +204,9 @@ func (c *CreateClusterRoleOptions) RunCreateRole() error { // Create ClusterRole. if c.DryRunStrategy != cmdutil.DryRunClient { createOptions := metav1.CreateOptions{} + if c.FieldManager != "" { + createOptions.FieldManager = c.FieldManager + } if c.DryRunStrategy == cmdutil.DryRunServer { if err := c.DryRunVerifier.HasSupport(clusterRole.GroupVersionKind()); err != nil { return err diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go index 544b091dcf0..0cd5661d7d8 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go @@ -69,6 +69,7 @@ func NewCmdCreateClusterRoleBinding(f cmdutil.Factory, ioStreams genericclioptio cmd.Flags().StringArray("user", []string{}, "Usernames to bind to the clusterrole") cmd.Flags().StringArray("group", []string{}, "Groups to bind to the clusterrole") cmd.Flags().StringArray("serviceaccount", []string{}, "Service accounts to bind to the clusterrole, in the format :") + cmdutil.AddFieldManagerFlagVar(cmd, &o.CreateSubcommandOptions.FieldManager, "kubectl-create") return cmd } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go index ebb669f6462..b93c87db445 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go @@ -90,6 +90,7 @@ func NewCmdCreateConfigMap(f cmdutil.Factory, ioStreams genericclioptions.IOStre cmd.Flags().StringArray("from-literal", []string{}, "Specify a key and literal value to insert in configmap (i.e. mykey=somevalue)") cmd.Flags().String("from-env-file", "", "Specify the path to a file to read lines of key=val pairs to create a configmap (i.e. a Docker .env file).") cmd.Flags().Bool("append-hash", false, "Append a hash of the configmap to its name.") + cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create") return cmd } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go index 3e0258d6452..af172b77953 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go @@ -66,6 +66,7 @@ type CreateCronJobOptions struct { DryRunStrategy cmdutil.DryRunStrategy DryRunVerifier *resource.DryRunVerifier Builder *resource.Builder + FieldManager string genericclioptions.IOStreams } @@ -101,6 +102,7 @@ func NewCmdCreateCronJob(f cmdutil.Factory, ioStreams genericclioptions.IOStream cmd.Flags().StringVar(&o.Image, "image", o.Image, "Image name to run.") cmd.Flags().StringVar(&o.Schedule, "schedule", o.Schedule, "A schedule in the Cron format the job should be run with.") cmd.Flags().StringVar(&o.Restart, "restart", o.Restart, "job's restart policy. supported values: OnFailure, Never") + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") return cmd } @@ -174,6 +176,9 @@ func (o *CreateCronJobOptions) Run() error { if o.DryRunStrategy != cmdutil.DryRunClient { createOptions := metav1.CreateOptions{} + if o.FieldManager != "" { + createOptions.FieldManager = o.FieldManager + } if o.DryRunStrategy == cmdutil.DryRunServer { if err := o.DryRunVerifier.HasSupport(cronjob.GroupVersionKind()); err != nil { return err diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go index 1718d75daa3..6dc26726877 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go @@ -69,6 +69,7 @@ func NewCmdCreateDeployment(f cmdutil.Factory, ioStreams genericclioptions.IOStr cmdutil.AddGeneratorFlags(cmd, "") cmd.Flags().StringSlice("image", []string{}, "Image name to run.") cmd.MarkFlagRequired("image") + cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create") return cmd } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go index 000951bc3c3..a1f015d649e 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go @@ -67,6 +67,7 @@ type CreateJobOptions struct { DryRunStrategy cmdutil.DryRunStrategy DryRunVerifier *resource.DryRunVerifier Builder *resource.Builder + FieldManager string genericclioptions.IOStreams } @@ -101,7 +102,7 @@ func NewCmdCreateJob(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) * cmdutil.AddDryRunFlag(cmd) cmd.Flags().StringVar(&o.Image, "image", o.Image, "Image name to run.") cmd.Flags().StringVar(&o.From, "from", o.From, "The name of the resource to create a Job from (only cronjob is supported).") - + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") return cmd } @@ -201,6 +202,9 @@ func (o *CreateJobOptions) Run() error { } if o.DryRunStrategy != cmdutil.DryRunClient { createOptions := metav1.CreateOptions{} + if o.FieldManager != "" { + createOptions.FieldManager = o.FieldManager + } if o.DryRunStrategy == cmdutil.DryRunServer { if err := o.DryRunVerifier.HasSupport(job.GroupVersionKind()); err != nil { return err diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go index 7e89c6f9164..49a4b3c962f 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go @@ -65,6 +65,7 @@ func NewCmdCreateNamespace(f cmdutil.Factory, ioStreams genericclioptions.IOStre cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddValidateFlags(cmd) cmdutil.AddGeneratorFlags(cmd, generateversioned.NamespaceV1GeneratorName) + cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create") return cmd } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go index 033a78813d2..569895d11eb 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go @@ -74,6 +74,7 @@ func NewCmdCreatePodDisruptionBudget(f cmdutil.Factory, ioStreams genericcliopti cmd.Flags().String("min-available", "", i18n.T("The minimum number or percentage of available pods this budget requires.")) cmd.Flags().String("max-unavailable", "", i18n.T("The maximum number or percentage of unavailable pods this budget requires.")) cmd.Flags().String("selector", "", i18n.T("A label selector to use for this budget. Only equality-based selector requirements are supported.")) + cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create") return cmd } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go index bbc5f8006f9..2388b840cd5 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go @@ -77,6 +77,7 @@ func NewCmdCreatePriorityClass(f cmdutil.Factory, ioStreams genericclioptions.IO cmd.Flags().Bool("global-default", false, i18n.T("global-default specifies whether this PriorityClass should be considered as the default priority.")) cmd.Flags().String("description", "", i18n.T("description is an arbitrary string that usually provides guidelines on when this priority class should be used.")) cmd.Flags().String("preemption-policy", "", i18n.T("preemption-policy is the policy for preempting pods with lower priority.")) + cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create") return cmd } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go index 41741988166..c78546e953b 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go @@ -70,6 +70,7 @@ func NewCmdCreateQuota(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) cmdutil.AddGeneratorFlags(cmd, generateversioned.ResourceQuotaV1GeneratorName) cmd.Flags().String("hard", "", i18n.T("A comma-delimited set of resource=quantity pairs that define a hard limit.")) cmd.Flags().String("scopes", "", i18n.T("A comma-delimited set of quota scopes that must all match each object tracked by the quota.")) + cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create") return cmd } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go index 7db2de24f19..12db67d8e6a 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go @@ -134,6 +134,7 @@ type CreateRoleOptions struct { Client clientgorbacv1.RbacV1Interface Mapper meta.RESTMapper PrintObj func(obj runtime.Object) error + FieldManager string genericclioptions.IOStreams } @@ -172,7 +173,7 @@ func NewCmdCreateRole(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) cmd.Flags().StringSliceVar(&o.Verbs, "verb", o.Verbs, "Verb that applies to the resources contained in the rule") cmd.Flags().StringSlice("resource", []string{}, "Resource that the rule applies to") cmd.Flags().StringArrayVar(&o.ResourceNames, "resource-name", o.ResourceNames, "Resource in the white list that the rule applies to, repeat this flag for multiple items") - + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") return cmd } @@ -355,6 +356,9 @@ func (o *CreateRoleOptions) RunCreateRole() error { // Create role. if o.DryRunStrategy != cmdutil.DryRunClient { createOptions := metav1.CreateOptions{} + if o.FieldManager != "" { + createOptions.FieldManager = o.FieldManager + } if o.DryRunStrategy == cmdutil.DryRunServer { if err := o.DryRunVerifier.HasSupport(role.GroupVersionKind()); err != nil { return err diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go index f37bcdf1764..aa680746c52 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go @@ -69,6 +69,7 @@ func NewCmdCreateRoleBinding(f cmdutil.Factory, ioStreams genericclioptions.IOSt cmd.Flags().StringArray("user", []string{}, "Usernames to bind to the role") cmd.Flags().StringArray("group", []string{}, "Groups to bind to the role") cmd.Flags().StringArray("serviceaccount", []string{}, "Service accounts to bind to the role, in the format :") + cmdutil.AddFieldManagerFlagVar(cmd, &o.CreateSubcommandOptions.FieldManager, "kubectl-create") return cmd } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go index 932ec2114f1..10282f6d5be 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go @@ -106,6 +106,7 @@ func NewCmdCreateSecretGeneric(f cmdutil.Factory, ioStreams genericclioptions.IO cmd.Flags().String("from-env-file", "", "Specify the path to a file to read lines of key=val pairs to create a secret (i.e. a Docker .env file).") cmd.Flags().String("type", "", i18n.T("The type of secret to create")) cmd.Flags().Bool("append-hash", false, "Append a hash of the secret to its name.") + cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create") return cmd } @@ -196,6 +197,7 @@ func NewCmdCreateSecretDockerRegistry(f cmdutil.Factory, ioStreams genericcliopt cmd.Flags().String("docker-server", "https://index.docker.io/v1/", i18n.T("Server location for Docker registry")) cmd.Flags().Bool("append-hash", false, "Append a hash of the secret to its name.") cmd.Flags().StringSlice("from-file", []string{}, "Key files can be specified using their file path, in which case a default name will be given to them, or optionally with a name and file path, in which case the given name will be used. Specifying a directory will iterate each named file in the directory that is a valid secret key.") + cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create") return cmd } @@ -284,6 +286,7 @@ func NewCmdCreateSecretTLS(f cmdutil.Factory, ioStreams genericclioptions.IOStre cmd.Flags().String("cert", "", i18n.T("Path to PEM encoded public key certificate.")) cmd.Flags().String("key", "", i18n.T("Path to private key associated with given certificate.")) cmd.Flags().Bool("append-hash", false, "Append a hash of the secret to its name.") + cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create") return cmd } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go index 54bd2248607..9111d1003be 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go @@ -19,7 +19,7 @@ package create import ( "github.com/spf13/cobra" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/cli-runtime/pkg/genericclioptions" cmdutil "k8s.io/kubectl/pkg/cmd/util" "k8s.io/kubectl/pkg/generate" @@ -91,6 +91,7 @@ func NewCmdCreateServiceClusterIP(f cmdutil.Factory, ioStreams genericclioptions cmdutil.AddGeneratorFlags(cmd, generateversioned.ServiceClusterIPGeneratorV1Name) addPortFlags(cmd) cmd.Flags().String("clusterip", "", i18n.T("Assign your own ClusterIP or set to 'None' for a 'headless' service (no loadbalancing).")) + cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create") return cmd } @@ -164,6 +165,7 @@ func NewCmdCreateServiceNodePort(f cmdutil.Factory, ioStreams genericclioptions. cmdutil.AddValidateFlags(cmd) cmdutil.AddGeneratorFlags(cmd, generateversioned.ServiceNodePortGeneratorV1Name) cmd.Flags().Int("node-port", 0, "Port used to expose the service on each node in a cluster.") + cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create") addPortFlags(cmd) return cmd } @@ -234,6 +236,7 @@ func NewCmdCreateServiceLoadBalancer(f cmdutil.Factory, ioStreams genericcliopti cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddValidateFlags(cmd) cmdutil.AddGeneratorFlags(cmd, generateversioned.ServiceLoadBalancerGeneratorV1Name) + cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create") addPortFlags(cmd) return cmd } @@ -310,6 +313,7 @@ func NewCmdCreateServiceExternalName(f cmdutil.Factory, ioStreams genericcliopti addPortFlags(cmd) cmd.Flags().String("external-name", "", i18n.T("External name of service")) cmd.MarkFlagRequired("external-name") + cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create") return cmd } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go index 1963b24ef63..20cb73dc81e 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go @@ -65,6 +65,7 @@ func NewCmdCreateServiceAccount(f cmdutil.Factory, ioStreams genericclioptions.I cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddValidateFlags(cmd) cmdutil.AddGeneratorFlags(cmd, generateversioned.ServiceAccountV1GeneratorName) + cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create") return cmd } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go b/staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go index a90e2222c43..0fabfd5e49d 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go @@ -150,6 +150,7 @@ func NewCmdDiff(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.C usage := "contains the configuration to diff" cmdutil.AddFilenameOptionFlags(cmd, &options.FilenameOptions, usage) cmdutil.AddServerSideApplyFlags(cmd) + cmdutil.AddFieldManagerFlagVar(cmd, &options.FieldManager, apply.FieldManagerClientSideApply) return cmd } @@ -312,7 +313,9 @@ func (obj InfoObject) Live() runtime.Object { // Returns the "merged" object, as it would look like if applied or // created. func (obj InfoObject) Merged() (runtime.Object, error) { - helper := resource.NewHelper(obj.Info.Client, obj.Info.Mapping).DryRun(true) + helper := resource.NewHelper(obj.Info.Client, obj.Info.Mapping). + DryRun(true). + WithFieldManager(obj.FieldManager) if obj.ServerSideApply { data, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj.LocalObj) if err != nil { @@ -444,7 +447,7 @@ func (o *DiffOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { } o.ServerSideApply = cmdutil.GetServerSideApplyFlag(cmd) - o.FieldManager = cmdutil.GetFieldManagerFlag(cmd) + o.FieldManager = apply.GetApplyFieldManagerFlag(cmd, o.ServerSideApply) o.ForceConflicts = cmdutil.GetForceConflictsFlag(cmd) if o.ForceConflicts && !o.ServerSideApply { return fmt.Errorf("--force-conflicts only works with --server-side") diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go b/staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go index 993a2df0b93..f9a6015280c 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go @@ -96,7 +96,7 @@ func NewCmdEdit(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra cmd.Flags().BoolVarP(&o.OutputPatch, "output-patch", "", o.OutputPatch, "Output the patch if the resource is edited.") cmd.Flags().BoolVar(&o.WindowsLineEndings, "windows-line-endings", o.WindowsLineEndings, "Defaults to the line ending native to your platform.") - + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-edit") cmdutil.AddApplyAnnotationVarFlags(cmd, &o.ApplyAnnotation) return cmd } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go b/staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go index a5bb4787448..2488ff1b63a 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go @@ -91,6 +91,8 @@ type ExposeServiceOptions struct { DryRunVerifier *resource.DryRunVerifier EnforceNamespace bool + fieldManager string + Generators func(string) map[string]generate.Generator CanBeExposed polymorphichelpers.CanBeExposedFunc MapBasedSelectorForObject func(runtime.Object) (string, error) @@ -157,6 +159,7 @@ func NewCmdExposeService(f cmdutil.Factory, streams genericclioptions.IOStreams) cmd.Flags().String("name", "", i18n.T("The name for the newly created object.")) cmd.Flags().String("session-affinity", "", i18n.T("If non-empty, set the session affinity for the service to this; legal values: 'None', 'ClientIP'")) cmd.Flags().String("cluster-ip", "", i18n.T("ClusterIP to be assigned to the service. Leave empty to auto-allocate, or set to 'None' to create a headless service.")) + cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-expose") usage := "identifying the resource to expose a service" cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage) @@ -363,6 +366,7 @@ func (o *ExposeServiceOptions) RunExpose(cmd *cobra.Command, args []string) erro actualObject, err := resource. NewHelper(client, objMapping). DryRun(o.DryRunStrategy == cmdutil.DryRunServer). + WithFieldManager(o.fieldManager). Create(o.Namespace, false, asUnstructured) if err != nil { return err diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/label/label.go b/staging/src/k8s.io/kubectl/pkg/cmd/label/label.go index 1e0b0eed644..f674d63ede5 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/label/label.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/label/label.go @@ -62,6 +62,7 @@ type LabelOptions struct { selector string fieldSelector string outputFormat string + fieldManager string // results of arg parsing resources []string @@ -150,6 +151,7 @@ func NewCmdLabel(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobr usage := "identifying the resource to update the labels" cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage) cmdutil.AddDryRunFlag(cmd) + cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-label") return cmd } @@ -334,7 +336,8 @@ func (o *LabelOptions) RunLabel() error { return err } helper := resource.NewHelper(client, mapping). - DryRun(o.dryRunStrategy == cmdutil.DryRunServer) + DryRun(o.dryRunStrategy == cmdutil.DryRunServer). + WithFieldManager(o.fieldManager) if createdPatch { outputObj, err = helper.Patch(namespace, name, types.MergePatchType, patchBytes, nil) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go b/staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go index b67f9a9fab0..8c9057f0fc0 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go @@ -66,6 +66,7 @@ type PatchOptions struct { args []string builder *resource.Builder unstructuredClientForMapping func(mapping *meta.RESTMapping) (resource.RESTClient, error) + fieldManager string genericclioptions.IOStreams } @@ -127,6 +128,7 @@ func NewCmdPatch(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobr cmdutil.AddDryRunFlag(cmd) cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, "identifying the resource to update") cmd.Flags().BoolVar(&o.Local, "local", o.Local, "If true, patch will operate on the content of the file, not the server-side resource.") + cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-patch") return cmd } @@ -238,7 +240,8 @@ func (o *PatchOptions) RunPatch() error { helper := resource. NewHelper(client, mapping). - DryRun(o.dryRunStrategy == cmdutil.DryRunServer) + DryRun(o.dryRunStrategy == cmdutil.DryRunServer). + WithFieldManager(o.fieldManager) patchedObj, err := helper.Patch(namespace, name, patchType, patchBytes, nil) if err != nil { return err diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go b/staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go index 7b514f82ad7..0e24eba0314 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go @@ -93,6 +93,8 @@ type ReplaceOptions struct { Recorder genericclioptions.Recorder genericclioptions.IOStreams + + fieldManager string } func NewReplaceOptions(streams genericclioptions.IOStreams) *ReplaceOptions { @@ -129,6 +131,7 @@ func NewCmdReplace(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobr cmdutil.AddDryRunFlag(cmd) cmd.Flags().StringVar(&o.Raw, "raw", o.Raw, "Raw URI to PUT to the server. Uses the transport specified by the kubeconfig file.") + cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-replace") return cmd } @@ -292,6 +295,7 @@ func (o *ReplaceOptions) Run(f cmdutil.Factory) error { obj, err := resource. NewHelper(info.Client, info.Mapping). DryRun(o.DryRunStrategy == cmdutil.DryRunServer). + WithFieldManager(o.fieldManager). Replace(info.Namespace, info.Name, true, info.Object) if err != nil { return cmdutil.AddSourceToErr("replacing", info.Source, err) @@ -382,7 +386,9 @@ func (o *ReplaceOptions) forceReplace() error { klog.V(4).Infof("error recording current command: %v", err) } - obj, err := resource.NewHelper(info.Client, info.Mapping).Create(info.Namespace, true, info.Object) + obj, err := resource.NewHelper(info.Client, info.Mapping). + WithFieldManager(o.fieldManager). + Create(info.Namespace, true, info.Object) if err != nil { return err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go b/staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go index 645806abd15..49afa891af3 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go @@ -48,6 +48,8 @@ type PauseOptions struct { resource.FilenameOptions genericclioptions.IOStreams + + fieldManager string } var ( @@ -92,6 +94,7 @@ func NewCmdRolloutPause(f cmdutil.Factory, streams genericclioptions.IOStreams) usage := "identifying the resource to get from a server." cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage) + cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-rollout") return cmd } @@ -173,7 +176,9 @@ func (o *PauseOptions) RunPause() error { continue } - obj, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) + obj, err := resource.NewHelper(info.Client, info.Mapping). + WithFieldManager(o.fieldManager). + Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) if err != nil { allErrs = append(allErrs, fmt.Errorf("failed to patch: %v", err)) continue diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_restart.go b/staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_restart.go index 4ef7b54af2f..4492b56af30 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_restart.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_restart.go @@ -48,6 +48,8 @@ type RestartOptions struct { resource.FilenameOptions genericclioptions.IOStreams + + fieldManager string } var ( @@ -94,6 +96,7 @@ func NewCmdRolloutRestart(f cmdutil.Factory, streams genericclioptions.IOStreams usage := "identifying the resource to get from a server." cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage) + cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-rollout") o.PrintFlags.AddFlags(cmd) return cmd } @@ -169,7 +172,9 @@ func (o RestartOptions) RunRestart() error { allErrs = append(allErrs, fmt.Errorf("failed to create patch for %v: empty patch", info.Name)) } - obj, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) + obj, err := resource.NewHelper(info.Client, info.Mapping). + WithFieldManager(o.fieldManager). + Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) if err != nil { allErrs = append(allErrs, fmt.Errorf("failed to patch: %v", err)) continue diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go b/staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go index b41593f9972..524983cef46 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go @@ -49,6 +49,8 @@ type ResumeOptions struct { resource.FilenameOptions genericclioptions.IOStreams + + fieldManager string } var ( @@ -94,6 +96,7 @@ func NewCmdRolloutResume(f cmdutil.Factory, streams genericclioptions.IOStreams) usage := "identifying the resource to get from a server." cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage) + cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-rollout") o.PrintFlags.AddFlags(cmd) return cmd } @@ -177,7 +180,9 @@ func (o ResumeOptions) RunResume() error { continue } - obj, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) + obj, err := resource.NewHelper(info.Client, info.Mapping). + WithFieldManager(o.fieldManager). + Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) if err != nil { allErrs = append(allErrs, fmt.Errorf("failed to patch: %v", err)) continue diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/run/run.go b/staging/src/k8s.io/kubectl/pkg/cmd/run/run.go index 62820995ccc..342108243c8 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/run/run.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/run/run.go @@ -123,6 +123,7 @@ type RunOptions struct { Quiet bool Schedule string TTY bool + fieldManager string genericclioptions.IOStreams } @@ -198,6 +199,7 @@ func addRunFlags(cmd *cobra.Command, opt *RunOptions) { cmd.Flags().BoolVar(&opt.Quiet, "quiet", opt.Quiet, "If true, suppress prompt messages.") cmd.Flags().StringVar(&opt.Schedule, "schedule", opt.Schedule, i18n.T("A schedule in the Cron format the job should be run with.")) cmd.Flags().MarkDeprecated("schedule", "has no effect and will be removed in the future.") + cmdutil.AddFieldManagerFlagVar(cmd, &opt.fieldManager, "kubectl-run") } func (o *RunOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { @@ -662,6 +664,7 @@ func (o *RunOptions) createGeneratedObject(f cmdutil.Factory, cmd *cobra.Command actualObj, err = resource. NewHelper(client, mapping). DryRun(o.DryRunStrategy == cmdutil.DryRunServer). + WithFieldManager(o.fieldManager). Create(namespace, false, obj) if err != nil { return nil, err diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_env.go b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_env.go index eaadad5342e..009ee746869 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_env.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_env.go @@ -25,7 +25,7 @@ import ( "github.com/spf13/cobra" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -113,6 +113,7 @@ type EnvOptions struct { From string Prefix string Keys []string + fieldManager string PrintObj printers.ResourcePrinterFunc @@ -171,6 +172,7 @@ func NewCmdEnv(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Co cmd.Flags().BoolVar(&o.Local, "local", o.Local, "If true, set env will NOT contact api-server but run locally.") cmd.Flags().BoolVar(&o.All, "all", o.All, "If true, select all resources in the namespace of the specified resource types") cmd.Flags().BoolVar(&o.Overwrite, "overwrite", o.Overwrite, "If true, allow environment to be overwritten, otherwise reject updates that overwrite existing environment.") + cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-set") o.PrintFlags.AddFlags(cmd) @@ -512,6 +514,7 @@ func (o *EnvOptions) RunEnv() error { actual, err := resource. NewHelper(info.Client, info.Mapping). DryRun(o.dryRunStrategy == cmdutil.DryRunServer). + WithFieldManager(o.fieldManager). Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) if err != nil { allErrs = append(allErrs, fmt.Errorf("failed to patch env update to pod template: %v", err)) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go index cf97c29c36c..ce7bb84fd9e 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go @@ -52,6 +52,7 @@ type SetImageOptions struct { Output string Local bool ResolveImage ImageResolver + fieldManager string PrintObj printers.ResourcePrinterFunc Recorder genericclioptions.Recorder @@ -125,6 +126,7 @@ func NewCmdImage(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra. cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)") cmd.Flags().BoolVar(&o.Local, "local", o.Local, "If true, set image will NOT contact api-server but run locally.") cmdutil.AddDryRunFlag(cmd) + cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-set") return cmd } @@ -286,6 +288,7 @@ func (o *SetImageOptions) Run() error { actual, err := resource. NewHelper(info.Client, info.Mapping). DryRun(o.DryRunStrategy == cmdutil.DryRunServer). + WithFieldManager(o.fieldManager). Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) if err != nil { allErrs = append(allErrs, fmt.Errorf("failed to patch image update to pod template: %v", err)) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go index 611e326f316..789781bdc47 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go @@ -73,6 +73,7 @@ type SetResourcesOptions struct { Output string All bool Local bool + fieldManager string DryRunStrategy cmdutil.DryRunStrategy @@ -136,6 +137,7 @@ func NewCmdResources(f cmdutil.Factory, streams genericclioptions.IOStreams) *co cmdutil.AddDryRunFlag(cmd) cmd.Flags().StringVar(&o.Limits, "limits", o.Limits, "The resource requirement requests for this container. For example, 'cpu=100m,memory=256Mi'. Note that server side components may assign requests depending on the server configuration, such as limit ranges.") cmd.Flags().StringVar(&o.Requests, "requests", o.Requests, "The resource requirement requests for this container. For example, 'cpu=100m,memory=256Mi'. Note that server side components may assign requests depending on the server configuration, such as limit ranges.") + cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-set") return cmd } @@ -301,6 +303,7 @@ func (o *SetResourcesOptions) Run() error { actual, err := resource. NewHelper(info.Client, info.Mapping). DryRun(o.DryRunStrategy == cmdutil.DryRunServer). + WithFieldManager(o.fieldManager). Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) if err != nil { allErrs = append(allErrs, fmt.Errorf("failed to patch resources update to pod template %v", err)) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go index df235c86236..d73a82de8db 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go @@ -46,6 +46,7 @@ type SetSelectorOptions struct { RecordFlags *genericclioptions.RecordFlags dryRunStrategy cmdutil.DryRunStrategy dryRunVerifier *resource.DryRunVerifier + fieldManager string // set by args resources []string @@ -113,6 +114,7 @@ func NewCmdSelector(f cmdutil.Factory, streams genericclioptions.IOStreams) *cob o.ResourceBuilderFlags.AddFlags(cmd.Flags()) o.PrintFlags.AddFlags(cmd) o.RecordFlags.AddFlags(cmd) + cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-set") cmd.Flags().StringVarP(&o.resourceVersion, "resource-version", "", o.resourceVersion, "If non-empty, the selectors update will only succeed if this is the current resource-version for the object. Only valid when specifying a single resource.") cmdutil.AddDryRunFlag(cmd) @@ -227,6 +229,7 @@ func (o *SetSelectorOptions) RunSelector() error { actual, err := resource. NewHelper(info.Client, info.Mapping). DryRun(o.dryRunStrategy == cmdutil.DryRunServer). + WithFieldManager(o.fieldManager). Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) if err != nil { return err diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go index bc7ac2b9f4e..14713ddf41c 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go @@ -71,6 +71,7 @@ type SetServiceAccountOptions struct { updatePodSpecForObject polymorphichelpers.UpdatePodSpecForObjectFunc infos []*resource.Info serviceAccountName string + fieldManager string PrintObj printers.ResourcePrinterFunc Recorder genericclioptions.Recorder @@ -115,6 +116,7 @@ func NewCmdServiceAccount(f cmdutil.Factory, streams genericclioptions.IOStreams cmd.Flags().BoolVar(&o.all, "all", o.all, "Select all resources, including uninitialized ones, in the namespace of the specified resource types") cmd.Flags().BoolVar(&o.local, "local", o.local, "If true, set serviceaccount will NOT contact api-server but run locally.") cmdutil.AddDryRunFlag(cmd) + cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-set") return cmd } @@ -224,6 +226,7 @@ func (o *SetServiceAccountOptions) Run() error { actual, err := resource. NewHelper(info.Client, info.Mapping). DryRun(o.dryRunStrategy == cmdutil.DryRunServer). + WithFieldManager(o.fieldManager). Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) if err != nil { patchErrs = append(patchErrs, fmt.Errorf("failed to patch ServiceAccountName %v", err)) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go index 488a9ea2984..043fd5e4010 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go @@ -68,6 +68,7 @@ type SubjectOptions struct { DryRunStrategy cmdutil.DryRunStrategy DryRunVerifier *resource.DryRunVerifier Local bool + fieldManager string Users []string Groups []string @@ -115,6 +116,7 @@ func NewCmdSubject(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobr cmd.Flags().StringArrayVar(&o.Users, "user", o.Users, "Usernames to bind to the role") cmd.Flags().StringArrayVar(&o.Groups, "group", o.Groups, "Groups to bind to the role") cmd.Flags().StringArrayVar(&o.ServiceAccounts, "serviceaccount", o.ServiceAccounts, "Service accounts to bind to the role") + cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-set") return cmd } @@ -281,6 +283,7 @@ func (o *SubjectOptions) Run(fn updateSubjects) error { actual, err := resource. NewHelper(info.Client, info.Mapping). DryRun(o.DryRunStrategy == cmdutil.DryRunServer). + WithFieldManager(o.fieldManager). Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) if err != nil { allErrs = append(allErrs, fmt.Errorf("failed to patch subjects to rolebinding: %v", err)) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go b/staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go index bf7e7909701..b18ab98f748 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go @@ -56,6 +56,7 @@ type TaintOptions struct { selector string overwrite bool all bool + fieldManager string ClientForMapping func(*meta.RESTMapping) (resource.RESTClient, error) @@ -122,6 +123,7 @@ func NewCmdTaint(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra. cmd.Flags().StringVarP(&options.selector, "selector", "l", options.selector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)") cmd.Flags().BoolVar(&options.overwrite, "overwrite", options.overwrite, "If true, allow taints to be overwritten, otherwise reject taint updates that overwrite existing taints.") cmd.Flags().BoolVar(&options.all, "all", options.all, "Select all nodes in the cluster") + cmdutil.AddFieldManagerFlagVar(cmd, &options.fieldManager, "kubectl-taint") return cmd } @@ -339,6 +341,7 @@ func (o TaintOptions) RunTaint() error { } helper := resource. NewHelper(client, mapping). + WithFieldManager(o.fieldManager). DryRun(o.DryRunStrategy == cmdutil.DryRunServer) var outputObj runtime.Object diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/util/editor/editoptions.go b/staging/src/k8s.io/kubectl/pkg/cmd/util/editor/editoptions.go index bc063aab644..f4c432d00f8 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/util/editor/editoptions.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/util/editor/editoptions.go @@ -28,7 +28,7 @@ import ( goruntime "runtime" "strings" - "github.com/evanphx/json-patch" + jsonpatch "github.com/evanphx/json-patch" "github.com/spf13/cobra" "k8s.io/klog" @@ -78,6 +78,8 @@ type EditOptions struct { f cmdutil.Factory editPrinterOptions *editPrinterOptions updatedResultGetter func(data []byte) *resource.Result + + FieldManager string } // NewEditOptions returns an initialized EditOptions instance @@ -498,7 +500,7 @@ func (o *EditOptions) annotationPatch(update *resource.Info) error { if err != nil { return err } - helper := resource.NewHelper(client, mapping) + helper := resource.NewHelper(client, mapping).WithFieldManager(o.FieldManager) _, err = helper.Patch(o.CmdNamespace, update.Name, patchType, patch, nil) if err != nil { return err @@ -628,7 +630,9 @@ func (o *EditOptions) visitToPatch(originalInfos []*resource.Info, patchVisitor fmt.Fprintf(o.Out, "Patch: %s\n", string(patch)) } - patched, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, patchType, patch, nil) + patched, err := resource.NewHelper(info.Client, info.Mapping). + WithFieldManager(o.FieldManager). + Patch(info.Namespace, info.Name, patchType, patch, nil) if err != nil { fmt.Fprintln(o.ErrOut, results.addError(err, info)) return nil @@ -645,9 +649,13 @@ func (o *EditOptions) visitToPatch(originalInfos []*resource.Info, patchVisitor func (o *EditOptions) visitToCreate(createVisitor resource.Visitor) error { err := createVisitor.Visit(func(info *resource.Info, incomingErr error) error { - if err := resource.CreateAndRefresh(info); err != nil { + obj, err := resource.NewHelper(info.Client, info.Mapping). + WithFieldManager(o.FieldManager). + Create(info.Namespace, true, info.Object) + if err != nil { return err } + info.Refresh(obj, true) printer, err := o.ToPrinter("created") if err != nil { return err diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go b/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go index 323f60c2a92..406ec4dfb4c 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go @@ -430,10 +430,13 @@ func AddDryRunFlag(cmd *cobra.Command) { cmd.Flags().Lookup("dry-run").NoOptDefVal = "unchanged" } +func AddFieldManagerFlagVar(cmd *cobra.Command, p *string, defaultFieldManager string) { + cmd.Flags().StringVar(p, "field-manager", defaultFieldManager, "Name of the manager used to track field ownership.") +} + func AddServerSideApplyFlags(cmd *cobra.Command) { cmd.Flags().Bool("server-side", false, "If true, apply runs in the server instead of the client.") cmd.Flags().Bool("force-conflicts", false, "If true, server-side apply will force the changes against conflicts.") - cmd.Flags().String("field-manager", "kubectl", "Name of the manager used to track field ownership.") } func AddPodRunningTimeoutFlag(cmd *cobra.Command, defaultTimeout time.Duration) { diff --git a/test/cmd/apply.sh b/test/cmd/apply.sh index a2019c5075e..68b6283d4e9 100755 --- a/test/cmd/apply.sh +++ b/test/cmd/apply.sh @@ -34,6 +34,9 @@ run_kubectl_apply_tests() { kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label' # Post-Condition: pod "test-pod" has configuration annotation grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]:?}")" + # pod has field manager for kubectl client-side apply + output_message=$(kubectl get -f hack/testdata/pod.yaml -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1) + kube::test::if_has_string "${output_message}" 'kubectl-client-side-apply' # Clean up kubectl delete pods test-pod "${kube_flags[@]:?}" @@ -354,6 +357,13 @@ run_kubectl_server_side_apply_tests() { kubectl apply --server-side -f hack/testdata/pod.yaml "${kube_flags[@]:?}" # Post-Condition: pod "test-pod" is created kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label' + # pod has field manager for kubectl server-side apply + output_message=$(kubectl get -f hack/testdata/pod.yaml -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1) + kube::test::if_has_string "${output_message}" 'kubectl' + # pod has custom field manager + kubectl apply --server-side --field-manager=my-field-manager --force-conflicts -f hack/testdata/pod.yaml "${kube_flags[@]:?}" + output_message=$(kubectl get -f hack/testdata/pod.yaml -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1) + kube::test::if_has_string "${output_message}" 'my-field-manager' # Clean up kubectl delete pods test-pod "${kube_flags[@]:?}" diff --git a/test/cmd/apps.sh b/test/cmd/apps.sh index af608071019..d7056eb4885 100755 --- a/test/cmd/apps.sh +++ b/test/cmd/apps.sh @@ -42,6 +42,9 @@ run_daemonset_tests() { kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '3' kubectl set resources daemonsets/bind "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '4' + # pod has field for kubectl set field manager + output_message=$(kubectl get daemonsets bind -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1) + kube::test::if_has_string "${output_message}" 'kubectl-set' # Rollout restart should change generation kubectl rollout restart daemonset/bind "${kube_flags[@]:?}" @@ -335,6 +338,10 @@ run_deployment_tests() { newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')" rs="$(kubectl get rs "${newrs}" -o yaml)" kube::test::if_has_string "${rs}" "deployment.kubernetes.io/revision: \"6\"" + # Deployment has field for kubectl rollout field manager + output_message=$(kubectl get deployment nginx -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1) + kube::test::if_has_string "${output_message}" 'kubectl-rollout' + # Create second deployment ${SED} "s/name: nginx$/name: nginx2/" hack/testdata/deployment-revision1.yaml | kubectl create -f - "${kube_flags[@]:?}" # Deletion of both deployments should not be blocked kubectl delete deployment nginx2 "${kube_flags[@]:?}" @@ -653,6 +660,10 @@ run_rs_tests() { kubectl set serviceaccount rs/frontend "${kube_flags[@]:?}" serviceaccount1 kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '5' + # RS has field for kubectl set field manager + output_message=$(kubectl get rs frontend -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1) + kube::test::if_has_string "${output_message}" 'kubectl-set' + ### Delete replica set with id # Pre-condition: frontend replica set exists kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:' @@ -692,6 +703,10 @@ run_rs_tests() { # autoscale 2~3 pods, no CPU utilization specified, replica set specified by name kubectl autoscale rs frontend "${kube_flags[@]:?}" --min=2 --max=3 kube::test::get_object_assert 'hpa frontend' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '2 3 80' + # HorizontalPodAutoscaler has field for kubectl autoscale field manager + output_message=$(kubectl get hpa frontend -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1) + kube::test::if_has_string "${output_message}" 'kubectl-autoscale' + # Clean up kubectl delete hpa frontend "${kube_flags[@]:?}" # autoscale without specifying --max should fail ! kubectl autoscale rs frontend "${kube_flags[@]:?}" || exit 1 diff --git a/test/cmd/core.sh b/test/cmd/core.sh index 7b2ecb8492e..6c1fe739860 100755 --- a/test/cmd/core.sh +++ b/test/cmd/core.sh @@ -80,6 +80,9 @@ run_pod_tests() { kube::test::get_object_assert 'pod valid-pod' "{{$id_field}}" 'valid-pod' kube::test::get_object_assert 'pod/valid-pod' "{{$id_field}}" 'valid-pod' kube::test::get_object_assert 'pods/valid-pod' "{{$id_field}}" 'valid-pod' + # pod has field manager for kubectl create + output_message=$(kubectl get -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1) + kube::test::if_has_string "${output_message}" 'kubectl-create' # Repeat above test using jsonpath template kube::test::get_object_jsonpath_assert pods "{.items[*]$id_field}" 'valid-pod' kube::test::get_object_jsonpath_assert 'pod valid-pod' "{$id_field}" 'valid-pod' @@ -364,6 +367,9 @@ run_pod_tests() { kubectl annotate pods valid-pod emptyannotation="" "${kube_flags[@]}" # Post-condition: valid pod contains "emptyannotation" with no value kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field}.emptyannotation}}" '' + # pod has field for kubectl annotate field manager + output_message=$(kubectl get pod valid-pod -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1) + kube::test::if_has_string "${output_message}" 'kubectl-annotate' ### Record label change # Pre-condition: valid-pod does not have record annotation @@ -372,6 +378,9 @@ run_pod_tests() { kubectl label pods valid-pod record-change=true --record=true "${kube_flags[@]}" # Post-condition: valid-pod has record annotation kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*--record=true.*" + # pod has field for kubectl label field manager + output_message=$(kubectl get pod valid-pod -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1) + kube::test::if_has_string "${output_message}" 'kubectl-label' ### Do not record label change # Command @@ -451,6 +460,11 @@ run_pod_tests() { # Post-condition: service named modified and rc named modified are created kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" 'modified:' kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'modified:' + # resources have field manager for kubectl create + output_message=$(kubectl get service/modified -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1) + kube::test::if_has_string "${output_message}" 'kubectl-create' + output_message=$(kubectl get rc/modified -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1) + kube::test::if_has_string "${output_message}" 'kubectl-create' # Clean up kubectl delete service/modified "${kube_flags[@]}" kubectl delete rc/modified "${kube_flags[@]}" @@ -521,6 +535,10 @@ run_pod_tests() { # Post-condition: valid-pod POD has expected image kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'k8s.gcr.io/pause:3.2:' + # pod has field for kubectl patch field manager + output_message=$(kubectl get pod valid-pod -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1) + kube::test::if_has_string "${output_message}" 'kubectl-patch' + ## If resourceVersion is specified in the patch, it will be treated as a precondition, i.e., if the resourceVersion is different from that is stored in the server, the Patch should be rejected ERROR_FILE="${KUBE_TEMP}/conflict-error" ## If the resourceVersion is the same as the one stored in the server, the patch will be applied. @@ -561,6 +579,10 @@ run_pod_tests() { # Post-condition: spec.container.name = "replaced-k8s-serve-hostname" kube::test::get_object_assert 'pod valid-pod' "{{(index .spec.containers 0).name}}" 'replaced-k8s-serve-hostname' + # Pod has field manager for kubectl replace + output_message=$(kubectl get pod valid-pod -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1) + kube::test::if_has_string "${output_message}" 'kubectl-replace' + ## check replace --grace-period requires --force output_message=$(! kubectl replace "${kube_flags[@]}" --grace-period=1 -f /tmp/tmp-valid-pod.json 2>&1) kube::test::if_has_string "${output_message}" '\-\-grace-period must have \-\-force specified' @@ -635,6 +657,9 @@ __EOF__ grep -q 'Patch:' <<< "$(EDITOR=/tmp/tmp-editor.sh kubectl edit "${kube_flags[@]}" pods/valid-pod --output-patch=true)" # Post-condition: valid-pod POD has image k8s.gcr.io/serve_hostname kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'k8s.gcr.io/serve_hostname:' + # pod has field for kubectl edit field manager + output_message=$(kubectl get pod valid-pod -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1) + kube::test::if_has_string "${output_message}" 'kubectl-edit' # cleaning rm /tmp/tmp-editor.sh @@ -976,6 +1001,8 @@ run_service_tests() { # Show dry-run works on running selector kubectl set selector services redis-master role=padawan --dry-run=client -o yaml "${kube_flags[@]}" kubectl set selector services redis-master role=padawan --dry-run=server -o yaml "${kube_flags[@]}" + output_message=$(kubectl get services redis-master -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1) + kube::test::if_has_string "${output_message}" 'kubectl-set' ! kubectl set selector services redis-master role=padawan --local -o yaml "${kube_flags[@]}" || exit 1 kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:" # --resource-version= succeeds @@ -1111,12 +1138,18 @@ __EOF__ # Check result kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'testmetadata:' kube::test::get_object_assert 'service testmetadata' "{{.metadata.annotations}}" "map\[zone-context:home\]" + # pod has field for kubectl run field manager + output_message=$(kubectl get pod testmetadata -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1) + kube::test::if_has_string "${output_message}" 'kubectl-run' ### Expose pod as a new service # Command kubectl expose pod testmetadata --port=1000 --target-port=80 --type=NodePort --name=exposemetadata --overrides='{ "metadata": { "annotations": { "zone-context": "work" } } } ' # Check result kube::test::get_object_assert 'service exposemetadata' "{{.metadata.annotations}}" "map\[zone-context:work\]" + # Service has field manager for kubectl expose + output_message=$(kubectl get service exposemetadata -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1) + kube::test::if_has_string "${output_message}" 'kubectl-expose' # Clean-Up # Command diff --git a/test/cmd/diff.sh b/test/cmd/diff.sh index f0bf6f7e3d3..11a47689b5d 100755 --- a/test/cmd/diff.sh +++ b/test/cmd/diff.sh @@ -54,7 +54,6 @@ run_kubectl_diff_tests() { kube::test::if_has_string "${resourceVersion}" "${initialResourceVersion}" # Test found diff with server-side apply - kubectl apply -f hack/testdata/pod.yaml output_message=$(kubectl diff -f hack/testdata/pod-changed.yaml --server-side --force-conflicts || test $? -eq 1) kube::test::if_has_string "${output_message}" 'k8s.gcr.io/pause:3.0' @@ -65,6 +64,31 @@ run_kubectl_diff_tests() { # Test that we have a return code bigger than 1 if there is an error when diffing kubectl diff -f hack/testdata/invalid-pod.yaml || test $? -gt 1 + # Cleanup + kubectl delete -f hack/testdata/pod.yaml + + kube::log::status "Testing kubectl diff with server-side apply" + + # Test that kubectl diff --server-side works when the live object doesn't exist + output_message=$(! kubectl diff --server-side -f hack/testdata/pod.yaml) + kube::test::if_has_string "${output_message}" 'test-pod' + # Ensure diff --server-side only dry-runs and doesn't persist change + kube::test::get_object_assert 'pod' "{{range.items}}{{ if eq ${id_field:?} \\\"test-pod\\\" }}found{{end}}{{end}}:" ':' + + # Server-side apply the Pod + kubectl apply --server-side -f hack/testdata/pod.yaml + kube::test::get_object_assert 'pod' "{{range.items}}{{ if eq ${id_field:?} \\\"test-pod\\\" }}found{{end}}{{end}}:" 'found:' + + # Make sure that --server-side diffing the resource right after returns nothing (0 exit code). + kubectl diff --server-side -f hack/testdata/pod.yaml + + # Make sure that for kubectl diff --server-side: + # 1. the exit code for diff is 1 because it found a difference + # 2. the difference contains the changed image + output_message=$(kubectl diff --server-side -f hack/testdata/pod-changed.yaml || test $? -eq 1) + kube::test::if_has_string "${output_message}" 'k8s.gcr.io/pause:3.0' + + # Cleanup kubectl delete -f hack/testdata/pod.yaml set +o nounset diff --git a/test/cmd/node-management.sh b/test/cmd/node-management.sh index 1a12af304cb..7ce4341cebf 100755 --- a/test/cmd/node-management.sh +++ b/test/cmd/node-management.sh @@ -87,6 +87,9 @@ __EOF__ # taint can add a taint (:) kubectl taint node 127.0.0.1 dedicated:PreferNoSchedule kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "dedicated=:PreferNoSchedule" + # Node has field manager for kubectl taint + output_message=$(kubectl get node 127.0.0.1 -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1) + kube::test::if_has_string "${output_message}" 'kubectl-taint' # Dry-run remove a taint kubectl taint node 127.0.0.1 --dry-run=client dedicated- kubectl taint node 127.0.0.1 --dry-run=server dedicated-