Set kubectl field manager names

This commit is contained in:
Julian V. Modesto 2020-03-04 22:04:01 -05:00
parent c1fe194e06
commit 360c348d0e
46 changed files with 287 additions and 29 deletions

View File

@ -46,6 +46,10 @@ type Helper struct {
// and on resources that support dry-run. If the apiserver or the resource // and on resources that support dry-run. If the apiserver or the resource
// does not support dry-run, then the change will be persisted to storage. // does not support dry-run, then the change will be persisted to storage.
ServerDryRun bool ServerDryRun bool
// FieldManager is the name associated with the actor or entity that is making
// changes.
FieldManager string
} }
// NewHelper creates a Helper from a ResourceMapping // NewHelper creates a Helper from a ResourceMapping
@ -64,6 +68,13 @@ func (m *Helper) DryRun(dryRun bool) *Helper {
return m return m
} }
// WithFieldManager sets the field manager option to indicate the actor or entity
// that is making changes in a create or update operation.
func (m *Helper) WithFieldManager(fieldManager string) *Helper {
m.FieldManager = fieldManager
return m
}
func (m *Helper) Get(namespace, name string, export bool) (runtime.Object, error) { func (m *Helper) Get(namespace, name string, export bool) (runtime.Object, error) {
req := m.RESTClient.Get(). req := m.RESTClient.Get().
NamespaceIfScoped(namespace, m.NamespaceScoped). NamespaceIfScoped(namespace, m.NamespaceScoped).
@ -141,6 +152,9 @@ func (m *Helper) CreateWithOptions(namespace string, modify bool, obj runtime.Ob
if m.ServerDryRun { if m.ServerDryRun {
options.DryRun = []string{metav1.DryRunAll} options.DryRun = []string{metav1.DryRunAll}
} }
if m.FieldManager != "" {
options.FieldManager = m.FieldManager
}
if modify { if modify {
// Attempt to version the object based on client logic. // Attempt to version the object based on client logic.
version, err := metadataAccessor.ResourceVersion(obj) version, err := metadataAccessor.ResourceVersion(obj)
@ -174,6 +188,9 @@ func (m *Helper) Patch(namespace, name string, pt types.PatchType, data []byte,
if m.ServerDryRun { if m.ServerDryRun {
options.DryRun = []string{metav1.DryRunAll} options.DryRun = []string{metav1.DryRunAll}
} }
if m.FieldManager != "" {
options.FieldManager = m.FieldManager
}
return m.RESTClient.Patch(pt). return m.RESTClient.Patch(pt).
NamespaceIfScoped(namespace, m.NamespaceScoped). NamespaceIfScoped(namespace, m.NamespaceScoped).
Resource(m.Resource). Resource(m.Resource).
@ -190,6 +207,9 @@ func (m *Helper) Replace(namespace, name string, overwrite bool, obj runtime.Obj
if m.ServerDryRun { if m.ServerDryRun {
options.DryRun = []string{metav1.DryRunAll} options.DryRun = []string{metav1.DryRunAll}
} }
if m.FieldManager != "" {
options.FieldManager = m.FieldManager
}
// Attempt to version the object based on client logic. // Attempt to version the object based on client logic.
version, err := metadataAccessor.ResourceVersion(obj) version, err := metadataAccessor.ResourceVersion(obj)

View File

@ -55,6 +55,7 @@ type AnnotateOptions struct {
local bool local bool
dryRunStrategy cmdutil.DryRunStrategy dryRunStrategy cmdutil.DryRunStrategy
dryRunVerifier *resource.DryRunVerifier dryRunVerifier *resource.DryRunVerifier
fieldManager string
all bool all bool
resourceVersion string resourceVersion string
selector string selector string
@ -150,6 +151,7 @@ func NewCmdAnnotate(parent string, f cmdutil.Factory, ioStreams genericclioption
usage := "identifying the resource to update the annotation" usage := "identifying the resource to update the annotation"
cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage) cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage)
cmdutil.AddDryRunFlag(cmd) cmdutil.AddDryRunFlag(cmd)
cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-annotate")
return cmd return cmd
} }
@ -329,7 +331,8 @@ func (o AnnotateOptions) RunAnnotate() error {
} }
helper := resource. helper := resource.
NewHelper(client, mapping). NewHelper(client, mapping).
DryRun(o.dryRunStrategy == cmdutil.DryRunServer) DryRun(o.dryRunStrategy == cmdutil.DryRunServer).
WithFieldManager(o.fieldManager)
if createdPatch { if createdPatch {
outputObj, err = helper.Patch(namespace, name, types.MergePatchType, patchBytes, nil) outputObj, err = helper.Patch(namespace, name, types.MergePatchType, patchBytes, nil)

View File

@ -196,6 +196,7 @@ func NewCmdApply(baseName string, f cmdutil.Factory, ioStreams genericclioptions
cmd.Flags().MarkHidden("server-dry-run") cmd.Flags().MarkHidden("server-dry-run")
cmdutil.AddDryRunFlag(cmd) cmdutil.AddDryRunFlag(cmd)
cmdutil.AddServerSideApplyFlags(cmd) cmdutil.AddServerSideApplyFlags(cmd)
cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, FieldManagerClientSideApply)
// apply subcommands // apply subcommands
cmd.AddCommand(NewCmdApplyViewLastApplied(f, ioStreams)) cmd.AddCommand(NewCmdApplyViewLastApplied(f, ioStreams))
@ -223,7 +224,7 @@ func (o *ApplyOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error {
return err return err
} }
o.DryRunVerifier = resource.NewDryRunVerifier(o.DynamicClient, discoveryClient) o.DryRunVerifier = resource.NewDryRunVerifier(o.DynamicClient, discoveryClient)
o.FieldManager = cmdutil.GetFieldManagerFlag(cmd) o.FieldManager = GetApplyFieldManagerFlag(cmd, o.ServerSideApply)
if o.ForceConflicts && !o.ServerSideApply { if o.ForceConflicts && !o.ServerSideApply {
return fmt.Errorf("--force-conflicts only works with --server-side") return fmt.Errorf("--force-conflicts only works with --server-side")
@ -414,11 +415,10 @@ func (o *ApplyOptions) applyOneObject(info *resource.Info) error {
} }
options := metav1.PatchOptions{ options := metav1.PatchOptions{
Force: &o.ForceConflicts, Force: &o.ForceConflicts,
FieldManager: o.FieldManager,
} }
helper := resource.NewHelper(info.Client, info.Mapping).
helper := resource.NewHelper(info.Client, info.Mapping) WithFieldManager(o.FieldManager)
if o.DryRunStrategy == cmdutil.DryRunServer { if o.DryRunStrategy == cmdutil.DryRunServer {
if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil {
return err return err
@ -495,7 +495,8 @@ See http://k8s.io/docs/reference/using-api/api-concepts/#conflicts`, err)
if o.DryRunStrategy != cmdutil.DryRunClient { if o.DryRunStrategy != cmdutil.DryRunClient {
// Then create the resource and skip the three-way merge // Then create the resource and skip the three-way merge
helper := resource.NewHelper(info.Client, info.Mapping) helper := resource.NewHelper(info.Client, info.Mapping).
WithFieldManager(o.FieldManager)
if o.DryRunStrategy == cmdutil.DryRunServer { if o.DryRunStrategy == cmdutil.DryRunServer {
if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil {
return cmdutil.AddSourceToErr("creating", info.Source, err) return cmdutil.AddSourceToErr("creating", info.Source, err)
@ -670,3 +671,34 @@ func (o *ApplyOptions) PrintAndPrunePostProcessor() func() error {
return nil return nil
} }
} }
const (
// FieldManagerClientSideApply is the default client-side apply field manager.
//
// The default field manager is not `kubectl-apply` to distinguish from
// server-side apply.
FieldManagerClientSideApply = "kubectl-client-side-apply"
// The default server-side apply field manager is `kubectl`
// instead of a field manager like `kubectl-server-side-apply`
// for backward compatibility to not conflict with old versions
// of kubectl server-side apply where `kubectl` has already been the field manager.
fieldManagerServerSideApply = "kubectl"
)
// GetApplyFieldManagerFlag gets the field manager for kubectl apply
// if it is not set.
//
// The default field manager is not `kubectl-apply` to distinguish between
// client-side and server-side apply.
func GetApplyFieldManagerFlag(cmd *cobra.Command, serverSide bool) string {
// The field manager flag was set
if cmd.Flag("field-manager").Changed {
return cmdutil.GetFlagString(cmd, "field-manager")
}
if serverSide {
return fieldManagerServerSideApply
}
return FieldManagerClientSideApply
}

View File

@ -83,6 +83,7 @@ func NewCmdApplyEditLastApplied(f cmdutil.Factory, ioStreams genericclioptions.I
cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage) cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage)
cmd.Flags().BoolVar(&o.WindowsLineEndings, "windows-line-endings", o.WindowsLineEndings, cmd.Flags().BoolVar(&o.WindowsLineEndings, "windows-line-endings", o.WindowsLineEndings,
"Defaults to the line ending native to your platform.") "Defaults to the line ending native to your platform.")
cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, FieldManagerClientSideApply)
return cmd return cmd
} }

View File

@ -82,7 +82,7 @@ func newPatcher(o *ApplyOptions, info *resource.Info) (*Patcher, error) {
return &Patcher{ return &Patcher{
Mapping: info.Mapping, Mapping: info.Mapping,
Helper: resource.NewHelper(info.Client, info.Mapping), Helper: resource.NewHelper(info.Client, info.Mapping).WithFieldManager(o.FieldManager),
DynamicClient: o.DynamicClient, DynamicClient: o.DynamicClient,
Overwrite: o.Overwrite, Overwrite: o.Overwrite,
BackOff: clockwork.NewRealClock(), BackOff: clockwork.NewRealClock(),

View File

@ -79,6 +79,7 @@ type AutoscaleOptions struct {
dryRunVerifier *resource.DryRunVerifier dryRunVerifier *resource.DryRunVerifier
builder *resource.Builder builder *resource.Builder
generatorFunc func(string, *meta.RESTMapping) (generate.StructuredGenerator, error) generatorFunc func(string, *meta.RESTMapping) (generate.StructuredGenerator, error)
fieldManager string
HPAClient autoscalingv1client.HorizontalPodAutoscalersGetter HPAClient autoscalingv1client.HorizontalPodAutoscalersGetter
scaleKindResolver scale.ScaleKindResolver scaleKindResolver scale.ScaleKindResolver
@ -131,6 +132,7 @@ func NewCmdAutoscale(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *
cmdutil.AddDryRunFlag(cmd) cmdutil.AddDryRunFlag(cmd)
cmdutil.AddFilenameOptionFlags(cmd, o.FilenameOptions, "identifying the resource to autoscale.") cmdutil.AddFilenameOptionFlags(cmd, o.FilenameOptions, "identifying the resource to autoscale.")
cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-autoscale")
return cmd return cmd
} }
@ -272,6 +274,9 @@ func (o *AutoscaleOptions) Run() error {
} }
createOptions := metav1.CreateOptions{} createOptions := metav1.CreateOptions{}
if o.fieldManager != "" {
createOptions.FieldManager = o.fieldManager
}
if o.dryRunStrategy == cmdutil.DryRunServer { if o.dryRunStrategy == cmdutil.DryRunServer {
if err := o.dryRunVerifier.HasSupport(hpa.GroupVersionKind()); err != nil { if err := o.dryRunVerifier.HasSupport(hpa.GroupVersionKind()); err != nil {
return err return err

View File

@ -54,6 +54,8 @@ type CreateOptions struct {
DryRunStrategy cmdutil.DryRunStrategy DryRunStrategy cmdutil.DryRunStrategy
DryRunVerifier *resource.DryRunVerifier DryRunVerifier *resource.DryRunVerifier
fieldManager string
FilenameOptions resource.FilenameOptions FilenameOptions resource.FilenameOptions
Selector string Selector string
EditBeforeCreate bool EditBeforeCreate bool
@ -130,6 +132,7 @@ func NewCmdCreate(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cob
cmdutil.AddDryRunFlag(cmd) cmdutil.AddDryRunFlag(cmd)
cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)") cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
cmd.Flags().StringVar(&o.Raw, "raw", o.Raw, "Raw URI to POST to the server. Uses the transport specified by the kubeconfig file.") cmd.Flags().StringVar(&o.Raw, "raw", o.Raw, "Raw URI to POST to the server. Uses the transport specified by the kubeconfig file.")
cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-create")
o.PrintFlags.AddFlags(cmd) o.PrintFlags.AddFlags(cmd)
@ -233,7 +236,7 @@ func (o *CreateOptions) RunCreate(f cmdutil.Factory, cmd *cobra.Command) error {
} }
if o.EditBeforeCreate { if o.EditBeforeCreate {
return RunEditOnCreate(f, o.PrintFlags, o.RecordFlags, o.IOStreams, cmd, &o.FilenameOptions) return RunEditOnCreate(f, o.PrintFlags, o.RecordFlags, o.IOStreams, cmd, &o.FilenameOptions, o.fieldManager)
} }
schema, err := f.Validator(cmdutil.GetFlagBool(cmd, "validate")) schema, err := f.Validator(cmdutil.GetFlagBool(cmd, "validate"))
if err != nil { if err != nil {
@ -281,6 +284,7 @@ func (o *CreateOptions) RunCreate(f cmdutil.Factory, cmd *cobra.Command) error {
obj, err := resource. obj, err := resource.
NewHelper(info.Client, info.Mapping). NewHelper(info.Client, info.Mapping).
DryRun(o.DryRunStrategy == cmdutil.DryRunServer). DryRun(o.DryRunStrategy == cmdutil.DryRunServer).
WithFieldManager(o.fieldManager).
Create(info.Namespace, true, info.Object) Create(info.Namespace, true, info.Object)
if err != nil { if err != nil {
return cmdutil.AddSourceToErr("creating", info.Source, err) return cmdutil.AddSourceToErr("creating", info.Source, err)
@ -302,7 +306,7 @@ func (o *CreateOptions) RunCreate(f cmdutil.Factory, cmd *cobra.Command) error {
} }
// RunEditOnCreate performs edit on creation // RunEditOnCreate performs edit on creation
func RunEditOnCreate(f cmdutil.Factory, printFlags *genericclioptions.PrintFlags, recordFlags *genericclioptions.RecordFlags, ioStreams genericclioptions.IOStreams, cmd *cobra.Command, options *resource.FilenameOptions) error { func RunEditOnCreate(f cmdutil.Factory, printFlags *genericclioptions.PrintFlags, recordFlags *genericclioptions.RecordFlags, ioStreams genericclioptions.IOStreams, cmd *cobra.Command, options *resource.FilenameOptions, fieldManager string) error {
editOptions := editor.NewEditOptions(editor.EditBeforeCreateMode, ioStreams) editOptions := editor.NewEditOptions(editor.EditBeforeCreateMode, ioStreams)
editOptions.FilenameOptions = *options editOptions.FilenameOptions = *options
editOptions.ValidateOptions = cmdutil.ValidateOptions{ editOptions.ValidateOptions = cmdutil.ValidateOptions{
@ -311,6 +315,7 @@ func RunEditOnCreate(f cmdutil.Factory, printFlags *genericclioptions.PrintFlags
editOptions.PrintFlags = printFlags editOptions.PrintFlags = printFlags
editOptions.ApplyAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) editOptions.ApplyAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag)
editOptions.RecordFlags = recordFlags editOptions.RecordFlags = recordFlags
editOptions.FieldManager = "kubectl-create"
err := editOptions.Complete(f, []string{}, cmd) err := editOptions.Complete(f, []string{}, cmd)
if err != nil { if err != nil {
@ -343,6 +348,7 @@ type CreateSubcommandOptions struct {
DryRunStrategy cmdutil.DryRunStrategy DryRunStrategy cmdutil.DryRunStrategy
DryRunVerifier *resource.DryRunVerifier DryRunVerifier *resource.DryRunVerifier
CreateAnnotation bool CreateAnnotation bool
FieldManager string
Namespace string Namespace string
EnforceNamespace bool EnforceNamespace bool
@ -446,6 +452,9 @@ func (o *CreateSubcommandOptions) Run() error {
o.Namespace = "" o.Namespace = ""
} }
createOptions := metav1.CreateOptions{} createOptions := metav1.CreateOptions{}
if o.FieldManager != "" {
createOptions.FieldManager = o.FieldManager
}
if o.DryRunStrategy == cmdutil.DryRunServer { if o.DryRunStrategy == cmdutil.DryRunServer {
if err := o.DryRunVerifier.HasSupport(mapping.GroupVersionKind); err != nil { if err := o.DryRunVerifier.HasSupport(mapping.GroupVersionKind); err != nil {
return err return err

View File

@ -64,6 +64,7 @@ type CreateClusterRoleOptions struct {
*CreateRoleOptions *CreateRoleOptions
NonResourceURLs []string NonResourceURLs []string
AggregationRule map[string]string AggregationRule map[string]string
FieldManager string
} }
// NewCmdCreateClusterRole initializes and returns new ClusterRoles command // NewCmdCreateClusterRole initializes and returns new ClusterRoles command
@ -95,6 +96,7 @@ func NewCmdCreateClusterRole(f cmdutil.Factory, ioStreams genericclioptions.IOSt
cmd.Flags().StringSlice("resource", []string{}, "Resource that the rule applies to") cmd.Flags().StringSlice("resource", []string{}, "Resource that the rule applies to")
cmd.Flags().StringArrayVar(&c.ResourceNames, "resource-name", c.ResourceNames, "Resource in the white list that the rule applies to, repeat this flag for multiple items") cmd.Flags().StringArrayVar(&c.ResourceNames, "resource-name", c.ResourceNames, "Resource in the white list that the rule applies to, repeat this flag for multiple items")
cmd.Flags().Var(cliflag.NewMapStringString(&c.AggregationRule), "aggregation-rule", "An aggregation label selector for combining ClusterRoles.") cmd.Flags().Var(cliflag.NewMapStringString(&c.AggregationRule), "aggregation-rule", "An aggregation label selector for combining ClusterRoles.")
cmdutil.AddFieldManagerFlagVar(cmd, &c.FieldManager, "kubectl-create")
return cmd return cmd
} }
@ -202,6 +204,9 @@ func (c *CreateClusterRoleOptions) RunCreateRole() error {
// Create ClusterRole. // Create ClusterRole.
if c.DryRunStrategy != cmdutil.DryRunClient { if c.DryRunStrategy != cmdutil.DryRunClient {
createOptions := metav1.CreateOptions{} createOptions := metav1.CreateOptions{}
if c.FieldManager != "" {
createOptions.FieldManager = c.FieldManager
}
if c.DryRunStrategy == cmdutil.DryRunServer { if c.DryRunStrategy == cmdutil.DryRunServer {
if err := c.DryRunVerifier.HasSupport(clusterRole.GroupVersionKind()); err != nil { if err := c.DryRunVerifier.HasSupport(clusterRole.GroupVersionKind()); err != nil {
return err return err

View File

@ -69,6 +69,7 @@ func NewCmdCreateClusterRoleBinding(f cmdutil.Factory, ioStreams genericclioptio
cmd.Flags().StringArray("user", []string{}, "Usernames to bind to the clusterrole") cmd.Flags().StringArray("user", []string{}, "Usernames to bind to the clusterrole")
cmd.Flags().StringArray("group", []string{}, "Groups to bind to the clusterrole") cmd.Flags().StringArray("group", []string{}, "Groups to bind to the clusterrole")
cmd.Flags().StringArray("serviceaccount", []string{}, "Service accounts to bind to the clusterrole, in the format <namespace>:<name>") cmd.Flags().StringArray("serviceaccount", []string{}, "Service accounts to bind to the clusterrole, in the format <namespace>:<name>")
cmdutil.AddFieldManagerFlagVar(cmd, &o.CreateSubcommandOptions.FieldManager, "kubectl-create")
return cmd return cmd
} }

View File

@ -90,6 +90,7 @@ func NewCmdCreateConfigMap(f cmdutil.Factory, ioStreams genericclioptions.IOStre
cmd.Flags().StringArray("from-literal", []string{}, "Specify a key and literal value to insert in configmap (i.e. mykey=somevalue)") cmd.Flags().StringArray("from-literal", []string{}, "Specify a key and literal value to insert in configmap (i.e. mykey=somevalue)")
cmd.Flags().String("from-env-file", "", "Specify the path to a file to read lines of key=val pairs to create a configmap (i.e. a Docker .env file).") cmd.Flags().String("from-env-file", "", "Specify the path to a file to read lines of key=val pairs to create a configmap (i.e. a Docker .env file).")
cmd.Flags().Bool("append-hash", false, "Append a hash of the configmap to its name.") cmd.Flags().Bool("append-hash", false, "Append a hash of the configmap to its name.")
cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create")
return cmd return cmd
} }

View File

@ -66,6 +66,7 @@ type CreateCronJobOptions struct {
DryRunStrategy cmdutil.DryRunStrategy DryRunStrategy cmdutil.DryRunStrategy
DryRunVerifier *resource.DryRunVerifier DryRunVerifier *resource.DryRunVerifier
Builder *resource.Builder Builder *resource.Builder
FieldManager string
genericclioptions.IOStreams genericclioptions.IOStreams
} }
@ -101,6 +102,7 @@ func NewCmdCreateCronJob(f cmdutil.Factory, ioStreams genericclioptions.IOStream
cmd.Flags().StringVar(&o.Image, "image", o.Image, "Image name to run.") cmd.Flags().StringVar(&o.Image, "image", o.Image, "Image name to run.")
cmd.Flags().StringVar(&o.Schedule, "schedule", o.Schedule, "A schedule in the Cron format the job should be run with.") cmd.Flags().StringVar(&o.Schedule, "schedule", o.Schedule, "A schedule in the Cron format the job should be run with.")
cmd.Flags().StringVar(&o.Restart, "restart", o.Restart, "job's restart policy. supported values: OnFailure, Never") cmd.Flags().StringVar(&o.Restart, "restart", o.Restart, "job's restart policy. supported values: OnFailure, Never")
cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create")
return cmd return cmd
} }
@ -174,6 +176,9 @@ func (o *CreateCronJobOptions) Run() error {
if o.DryRunStrategy != cmdutil.DryRunClient { if o.DryRunStrategy != cmdutil.DryRunClient {
createOptions := metav1.CreateOptions{} createOptions := metav1.CreateOptions{}
if o.FieldManager != "" {
createOptions.FieldManager = o.FieldManager
}
if o.DryRunStrategy == cmdutil.DryRunServer { if o.DryRunStrategy == cmdutil.DryRunServer {
if err := o.DryRunVerifier.HasSupport(cronjob.GroupVersionKind()); err != nil { if err := o.DryRunVerifier.HasSupport(cronjob.GroupVersionKind()); err != nil {
return err return err

View File

@ -69,6 +69,7 @@ func NewCmdCreateDeployment(f cmdutil.Factory, ioStreams genericclioptions.IOStr
cmdutil.AddGeneratorFlags(cmd, "") cmdutil.AddGeneratorFlags(cmd, "")
cmd.Flags().StringSlice("image", []string{}, "Image name to run.") cmd.Flags().StringSlice("image", []string{}, "Image name to run.")
cmd.MarkFlagRequired("image") cmd.MarkFlagRequired("image")
cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create")
return cmd return cmd
} }

View File

@ -67,6 +67,7 @@ type CreateJobOptions struct {
DryRunStrategy cmdutil.DryRunStrategy DryRunStrategy cmdutil.DryRunStrategy
DryRunVerifier *resource.DryRunVerifier DryRunVerifier *resource.DryRunVerifier
Builder *resource.Builder Builder *resource.Builder
FieldManager string
genericclioptions.IOStreams genericclioptions.IOStreams
} }
@ -101,7 +102,7 @@ func NewCmdCreateJob(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *
cmdutil.AddDryRunFlag(cmd) cmdutil.AddDryRunFlag(cmd)
cmd.Flags().StringVar(&o.Image, "image", o.Image, "Image name to run.") cmd.Flags().StringVar(&o.Image, "image", o.Image, "Image name to run.")
cmd.Flags().StringVar(&o.From, "from", o.From, "The name of the resource to create a Job from (only cronjob is supported).") cmd.Flags().StringVar(&o.From, "from", o.From, "The name of the resource to create a Job from (only cronjob is supported).")
cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create")
return cmd return cmd
} }
@ -201,6 +202,9 @@ func (o *CreateJobOptions) Run() error {
} }
if o.DryRunStrategy != cmdutil.DryRunClient { if o.DryRunStrategy != cmdutil.DryRunClient {
createOptions := metav1.CreateOptions{} createOptions := metav1.CreateOptions{}
if o.FieldManager != "" {
createOptions.FieldManager = o.FieldManager
}
if o.DryRunStrategy == cmdutil.DryRunServer { if o.DryRunStrategy == cmdutil.DryRunServer {
if err := o.DryRunVerifier.HasSupport(job.GroupVersionKind()); err != nil { if err := o.DryRunVerifier.HasSupport(job.GroupVersionKind()); err != nil {
return err return err

View File

@ -65,6 +65,7 @@ func NewCmdCreateNamespace(f cmdutil.Factory, ioStreams genericclioptions.IOStre
cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd) cmdutil.AddValidateFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, generateversioned.NamespaceV1GeneratorName) cmdutil.AddGeneratorFlags(cmd, generateversioned.NamespaceV1GeneratorName)
cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create")
return cmd return cmd
} }

View File

@ -74,6 +74,7 @@ func NewCmdCreatePodDisruptionBudget(f cmdutil.Factory, ioStreams genericcliopti
cmd.Flags().String("min-available", "", i18n.T("The minimum number or percentage of available pods this budget requires.")) cmd.Flags().String("min-available", "", i18n.T("The minimum number or percentage of available pods this budget requires."))
cmd.Flags().String("max-unavailable", "", i18n.T("The maximum number or percentage of unavailable pods this budget requires.")) cmd.Flags().String("max-unavailable", "", i18n.T("The maximum number or percentage of unavailable pods this budget requires."))
cmd.Flags().String("selector", "", i18n.T("A label selector to use for this budget. Only equality-based selector requirements are supported.")) cmd.Flags().String("selector", "", i18n.T("A label selector to use for this budget. Only equality-based selector requirements are supported."))
cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create")
return cmd return cmd
} }

View File

@ -77,6 +77,7 @@ func NewCmdCreatePriorityClass(f cmdutil.Factory, ioStreams genericclioptions.IO
cmd.Flags().Bool("global-default", false, i18n.T("global-default specifies whether this PriorityClass should be considered as the default priority.")) cmd.Flags().Bool("global-default", false, i18n.T("global-default specifies whether this PriorityClass should be considered as the default priority."))
cmd.Flags().String("description", "", i18n.T("description is an arbitrary string that usually provides guidelines on when this priority class should be used.")) cmd.Flags().String("description", "", i18n.T("description is an arbitrary string that usually provides guidelines on when this priority class should be used."))
cmd.Flags().String("preemption-policy", "", i18n.T("preemption-policy is the policy for preempting pods with lower priority.")) cmd.Flags().String("preemption-policy", "", i18n.T("preemption-policy is the policy for preempting pods with lower priority."))
cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create")
return cmd return cmd
} }

View File

@ -70,6 +70,7 @@ func NewCmdCreateQuota(f cmdutil.Factory, ioStreams genericclioptions.IOStreams)
cmdutil.AddGeneratorFlags(cmd, generateversioned.ResourceQuotaV1GeneratorName) cmdutil.AddGeneratorFlags(cmd, generateversioned.ResourceQuotaV1GeneratorName)
cmd.Flags().String("hard", "", i18n.T("A comma-delimited set of resource=quantity pairs that define a hard limit.")) cmd.Flags().String("hard", "", i18n.T("A comma-delimited set of resource=quantity pairs that define a hard limit."))
cmd.Flags().String("scopes", "", i18n.T("A comma-delimited set of quota scopes that must all match each object tracked by the quota.")) cmd.Flags().String("scopes", "", i18n.T("A comma-delimited set of quota scopes that must all match each object tracked by the quota."))
cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create")
return cmd return cmd
} }

View File

@ -134,6 +134,7 @@ type CreateRoleOptions struct {
Client clientgorbacv1.RbacV1Interface Client clientgorbacv1.RbacV1Interface
Mapper meta.RESTMapper Mapper meta.RESTMapper
PrintObj func(obj runtime.Object) error PrintObj func(obj runtime.Object) error
FieldManager string
genericclioptions.IOStreams genericclioptions.IOStreams
} }
@ -172,7 +173,7 @@ func NewCmdCreateRole(f cmdutil.Factory, ioStreams genericclioptions.IOStreams)
cmd.Flags().StringSliceVar(&o.Verbs, "verb", o.Verbs, "Verb that applies to the resources contained in the rule") cmd.Flags().StringSliceVar(&o.Verbs, "verb", o.Verbs, "Verb that applies to the resources contained in the rule")
cmd.Flags().StringSlice("resource", []string{}, "Resource that the rule applies to") cmd.Flags().StringSlice("resource", []string{}, "Resource that the rule applies to")
cmd.Flags().StringArrayVar(&o.ResourceNames, "resource-name", o.ResourceNames, "Resource in the white list that the rule applies to, repeat this flag for multiple items") cmd.Flags().StringArrayVar(&o.ResourceNames, "resource-name", o.ResourceNames, "Resource in the white list that the rule applies to, repeat this flag for multiple items")
cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create")
return cmd return cmd
} }
@ -355,6 +356,9 @@ func (o *CreateRoleOptions) RunCreateRole() error {
// Create role. // Create role.
if o.DryRunStrategy != cmdutil.DryRunClient { if o.DryRunStrategy != cmdutil.DryRunClient {
createOptions := metav1.CreateOptions{} createOptions := metav1.CreateOptions{}
if o.FieldManager != "" {
createOptions.FieldManager = o.FieldManager
}
if o.DryRunStrategy == cmdutil.DryRunServer { if o.DryRunStrategy == cmdutil.DryRunServer {
if err := o.DryRunVerifier.HasSupport(role.GroupVersionKind()); err != nil { if err := o.DryRunVerifier.HasSupport(role.GroupVersionKind()); err != nil {
return err return err

View File

@ -69,6 +69,7 @@ func NewCmdCreateRoleBinding(f cmdutil.Factory, ioStreams genericclioptions.IOSt
cmd.Flags().StringArray("user", []string{}, "Usernames to bind to the role") cmd.Flags().StringArray("user", []string{}, "Usernames to bind to the role")
cmd.Flags().StringArray("group", []string{}, "Groups to bind to the role") cmd.Flags().StringArray("group", []string{}, "Groups to bind to the role")
cmd.Flags().StringArray("serviceaccount", []string{}, "Service accounts to bind to the role, in the format <namespace>:<name>") cmd.Flags().StringArray("serviceaccount", []string{}, "Service accounts to bind to the role, in the format <namespace>:<name>")
cmdutil.AddFieldManagerFlagVar(cmd, &o.CreateSubcommandOptions.FieldManager, "kubectl-create")
return cmd return cmd
} }

View File

@ -106,6 +106,7 @@ func NewCmdCreateSecretGeneric(f cmdutil.Factory, ioStreams genericclioptions.IO
cmd.Flags().String("from-env-file", "", "Specify the path to a file to read lines of key=val pairs to create a secret (i.e. a Docker .env file).") cmd.Flags().String("from-env-file", "", "Specify the path to a file to read lines of key=val pairs to create a secret (i.e. a Docker .env file).")
cmd.Flags().String("type", "", i18n.T("The type of secret to create")) cmd.Flags().String("type", "", i18n.T("The type of secret to create"))
cmd.Flags().Bool("append-hash", false, "Append a hash of the secret to its name.") cmd.Flags().Bool("append-hash", false, "Append a hash of the secret to its name.")
cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create")
return cmd return cmd
} }
@ -196,6 +197,7 @@ func NewCmdCreateSecretDockerRegistry(f cmdutil.Factory, ioStreams genericcliopt
cmd.Flags().String("docker-server", "https://index.docker.io/v1/", i18n.T("Server location for Docker registry")) cmd.Flags().String("docker-server", "https://index.docker.io/v1/", i18n.T("Server location for Docker registry"))
cmd.Flags().Bool("append-hash", false, "Append a hash of the secret to its name.") cmd.Flags().Bool("append-hash", false, "Append a hash of the secret to its name.")
cmd.Flags().StringSlice("from-file", []string{}, "Key files can be specified using their file path, in which case a default name will be given to them, or optionally with a name and file path, in which case the given name will be used. Specifying a directory will iterate each named file in the directory that is a valid secret key.") cmd.Flags().StringSlice("from-file", []string{}, "Key files can be specified using their file path, in which case a default name will be given to them, or optionally with a name and file path, in which case the given name will be used. Specifying a directory will iterate each named file in the directory that is a valid secret key.")
cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create")
return cmd return cmd
} }
@ -284,6 +286,7 @@ func NewCmdCreateSecretTLS(f cmdutil.Factory, ioStreams genericclioptions.IOStre
cmd.Flags().String("cert", "", i18n.T("Path to PEM encoded public key certificate.")) cmd.Flags().String("cert", "", i18n.T("Path to PEM encoded public key certificate."))
cmd.Flags().String("key", "", i18n.T("Path to private key associated with given certificate.")) cmd.Flags().String("key", "", i18n.T("Path to private key associated with given certificate."))
cmd.Flags().Bool("append-hash", false, "Append a hash of the secret to its name.") cmd.Flags().Bool("append-hash", false, "Append a hash of the secret to its name.")
cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create")
return cmd return cmd
} }

View File

@ -19,7 +19,7 @@ package create
import ( import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions"
cmdutil "k8s.io/kubectl/pkg/cmd/util" cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/generate" "k8s.io/kubectl/pkg/generate"
@ -91,6 +91,7 @@ func NewCmdCreateServiceClusterIP(f cmdutil.Factory, ioStreams genericclioptions
cmdutil.AddGeneratorFlags(cmd, generateversioned.ServiceClusterIPGeneratorV1Name) cmdutil.AddGeneratorFlags(cmd, generateversioned.ServiceClusterIPGeneratorV1Name)
addPortFlags(cmd) addPortFlags(cmd)
cmd.Flags().String("clusterip", "", i18n.T("Assign your own ClusterIP or set to 'None' for a 'headless' service (no loadbalancing).")) cmd.Flags().String("clusterip", "", i18n.T("Assign your own ClusterIP or set to 'None' for a 'headless' service (no loadbalancing)."))
cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create")
return cmd return cmd
} }
@ -164,6 +165,7 @@ func NewCmdCreateServiceNodePort(f cmdutil.Factory, ioStreams genericclioptions.
cmdutil.AddValidateFlags(cmd) cmdutil.AddValidateFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, generateversioned.ServiceNodePortGeneratorV1Name) cmdutil.AddGeneratorFlags(cmd, generateversioned.ServiceNodePortGeneratorV1Name)
cmd.Flags().Int("node-port", 0, "Port used to expose the service on each node in a cluster.") cmd.Flags().Int("node-port", 0, "Port used to expose the service on each node in a cluster.")
cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create")
addPortFlags(cmd) addPortFlags(cmd)
return cmd return cmd
} }
@ -234,6 +236,7 @@ func NewCmdCreateServiceLoadBalancer(f cmdutil.Factory, ioStreams genericcliopti
cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd) cmdutil.AddValidateFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, generateversioned.ServiceLoadBalancerGeneratorV1Name) cmdutil.AddGeneratorFlags(cmd, generateversioned.ServiceLoadBalancerGeneratorV1Name)
cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create")
addPortFlags(cmd) addPortFlags(cmd)
return cmd return cmd
} }
@ -310,6 +313,7 @@ func NewCmdCreateServiceExternalName(f cmdutil.Factory, ioStreams genericcliopti
addPortFlags(cmd) addPortFlags(cmd)
cmd.Flags().String("external-name", "", i18n.T("External name of service")) cmd.Flags().String("external-name", "", i18n.T("External name of service"))
cmd.MarkFlagRequired("external-name") cmd.MarkFlagRequired("external-name")
cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create")
return cmd return cmd
} }

View File

@ -65,6 +65,7 @@ func NewCmdCreateServiceAccount(f cmdutil.Factory, ioStreams genericclioptions.I
cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd) cmdutil.AddValidateFlags(cmd)
cmdutil.AddGeneratorFlags(cmd, generateversioned.ServiceAccountV1GeneratorName) cmdutil.AddGeneratorFlags(cmd, generateversioned.ServiceAccountV1GeneratorName)
cmdutil.AddFieldManagerFlagVar(cmd, &options.CreateSubcommandOptions.FieldManager, "kubectl-create")
return cmd return cmd
} }

View File

@ -150,6 +150,7 @@ func NewCmdDiff(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.C
usage := "contains the configuration to diff" usage := "contains the configuration to diff"
cmdutil.AddFilenameOptionFlags(cmd, &options.FilenameOptions, usage) cmdutil.AddFilenameOptionFlags(cmd, &options.FilenameOptions, usage)
cmdutil.AddServerSideApplyFlags(cmd) cmdutil.AddServerSideApplyFlags(cmd)
cmdutil.AddFieldManagerFlagVar(cmd, &options.FieldManager, apply.FieldManagerClientSideApply)
return cmd return cmd
} }
@ -312,7 +313,9 @@ func (obj InfoObject) Live() runtime.Object {
// Returns the "merged" object, as it would look like if applied or // Returns the "merged" object, as it would look like if applied or
// created. // created.
func (obj InfoObject) Merged() (runtime.Object, error) { func (obj InfoObject) Merged() (runtime.Object, error) {
helper := resource.NewHelper(obj.Info.Client, obj.Info.Mapping).DryRun(true) helper := resource.NewHelper(obj.Info.Client, obj.Info.Mapping).
DryRun(true).
WithFieldManager(obj.FieldManager)
if obj.ServerSideApply { if obj.ServerSideApply {
data, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj.LocalObj) data, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj.LocalObj)
if err != nil { if err != nil {
@ -444,7 +447,7 @@ func (o *DiffOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error {
} }
o.ServerSideApply = cmdutil.GetServerSideApplyFlag(cmd) o.ServerSideApply = cmdutil.GetServerSideApplyFlag(cmd)
o.FieldManager = cmdutil.GetFieldManagerFlag(cmd) o.FieldManager = apply.GetApplyFieldManagerFlag(cmd, o.ServerSideApply)
o.ForceConflicts = cmdutil.GetForceConflictsFlag(cmd) o.ForceConflicts = cmdutil.GetForceConflictsFlag(cmd)
if o.ForceConflicts && !o.ServerSideApply { if o.ForceConflicts && !o.ServerSideApply {
return fmt.Errorf("--force-conflicts only works with --server-side") return fmt.Errorf("--force-conflicts only works with --server-side")

View File

@ -96,7 +96,7 @@ func NewCmdEdit(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra
cmd.Flags().BoolVarP(&o.OutputPatch, "output-patch", "", o.OutputPatch, "Output the patch if the resource is edited.") cmd.Flags().BoolVarP(&o.OutputPatch, "output-patch", "", o.OutputPatch, "Output the patch if the resource is edited.")
cmd.Flags().BoolVar(&o.WindowsLineEndings, "windows-line-endings", o.WindowsLineEndings, cmd.Flags().BoolVar(&o.WindowsLineEndings, "windows-line-endings", o.WindowsLineEndings,
"Defaults to the line ending native to your platform.") "Defaults to the line ending native to your platform.")
cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-edit")
cmdutil.AddApplyAnnotationVarFlags(cmd, &o.ApplyAnnotation) cmdutil.AddApplyAnnotationVarFlags(cmd, &o.ApplyAnnotation)
return cmd return cmd
} }

View File

@ -91,6 +91,8 @@ type ExposeServiceOptions struct {
DryRunVerifier *resource.DryRunVerifier DryRunVerifier *resource.DryRunVerifier
EnforceNamespace bool EnforceNamespace bool
fieldManager string
Generators func(string) map[string]generate.Generator Generators func(string) map[string]generate.Generator
CanBeExposed polymorphichelpers.CanBeExposedFunc CanBeExposed polymorphichelpers.CanBeExposedFunc
MapBasedSelectorForObject func(runtime.Object) (string, error) MapBasedSelectorForObject func(runtime.Object) (string, error)
@ -157,6 +159,7 @@ func NewCmdExposeService(f cmdutil.Factory, streams genericclioptions.IOStreams)
cmd.Flags().String("name", "", i18n.T("The name for the newly created object.")) cmd.Flags().String("name", "", i18n.T("The name for the newly created object."))
cmd.Flags().String("session-affinity", "", i18n.T("If non-empty, set the session affinity for the service to this; legal values: 'None', 'ClientIP'")) cmd.Flags().String("session-affinity", "", i18n.T("If non-empty, set the session affinity for the service to this; legal values: 'None', 'ClientIP'"))
cmd.Flags().String("cluster-ip", "", i18n.T("ClusterIP to be assigned to the service. Leave empty to auto-allocate, or set to 'None' to create a headless service.")) cmd.Flags().String("cluster-ip", "", i18n.T("ClusterIP to be assigned to the service. Leave empty to auto-allocate, or set to 'None' to create a headless service."))
cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-expose")
usage := "identifying the resource to expose a service" usage := "identifying the resource to expose a service"
cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage) cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage)
@ -363,6 +366,7 @@ func (o *ExposeServiceOptions) RunExpose(cmd *cobra.Command, args []string) erro
actualObject, err := resource. actualObject, err := resource.
NewHelper(client, objMapping). NewHelper(client, objMapping).
DryRun(o.DryRunStrategy == cmdutil.DryRunServer). DryRun(o.DryRunStrategy == cmdutil.DryRunServer).
WithFieldManager(o.fieldManager).
Create(o.Namespace, false, asUnstructured) Create(o.Namespace, false, asUnstructured)
if err != nil { if err != nil {
return err return err

View File

@ -62,6 +62,7 @@ type LabelOptions struct {
selector string selector string
fieldSelector string fieldSelector string
outputFormat string outputFormat string
fieldManager string
// results of arg parsing // results of arg parsing
resources []string resources []string
@ -150,6 +151,7 @@ func NewCmdLabel(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobr
usage := "identifying the resource to update the labels" usage := "identifying the resource to update the labels"
cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage) cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage)
cmdutil.AddDryRunFlag(cmd) cmdutil.AddDryRunFlag(cmd)
cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-label")
return cmd return cmd
} }
@ -334,7 +336,8 @@ func (o *LabelOptions) RunLabel() error {
return err return err
} }
helper := resource.NewHelper(client, mapping). helper := resource.NewHelper(client, mapping).
DryRun(o.dryRunStrategy == cmdutil.DryRunServer) DryRun(o.dryRunStrategy == cmdutil.DryRunServer).
WithFieldManager(o.fieldManager)
if createdPatch { if createdPatch {
outputObj, err = helper.Patch(namespace, name, types.MergePatchType, patchBytes, nil) outputObj, err = helper.Patch(namespace, name, types.MergePatchType, patchBytes, nil)

View File

@ -66,6 +66,7 @@ type PatchOptions struct {
args []string args []string
builder *resource.Builder builder *resource.Builder
unstructuredClientForMapping func(mapping *meta.RESTMapping) (resource.RESTClient, error) unstructuredClientForMapping func(mapping *meta.RESTMapping) (resource.RESTClient, error)
fieldManager string
genericclioptions.IOStreams genericclioptions.IOStreams
} }
@ -127,6 +128,7 @@ func NewCmdPatch(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobr
cmdutil.AddDryRunFlag(cmd) cmdutil.AddDryRunFlag(cmd)
cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, "identifying the resource to update") cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, "identifying the resource to update")
cmd.Flags().BoolVar(&o.Local, "local", o.Local, "If true, patch will operate on the content of the file, not the server-side resource.") cmd.Flags().BoolVar(&o.Local, "local", o.Local, "If true, patch will operate on the content of the file, not the server-side resource.")
cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-patch")
return cmd return cmd
} }
@ -238,7 +240,8 @@ func (o *PatchOptions) RunPatch() error {
helper := resource. helper := resource.
NewHelper(client, mapping). NewHelper(client, mapping).
DryRun(o.dryRunStrategy == cmdutil.DryRunServer) DryRun(o.dryRunStrategy == cmdutil.DryRunServer).
WithFieldManager(o.fieldManager)
patchedObj, err := helper.Patch(namespace, name, patchType, patchBytes, nil) patchedObj, err := helper.Patch(namespace, name, patchType, patchBytes, nil)
if err != nil { if err != nil {
return err return err

View File

@ -93,6 +93,8 @@ type ReplaceOptions struct {
Recorder genericclioptions.Recorder Recorder genericclioptions.Recorder
genericclioptions.IOStreams genericclioptions.IOStreams
fieldManager string
} }
func NewReplaceOptions(streams genericclioptions.IOStreams) *ReplaceOptions { func NewReplaceOptions(streams genericclioptions.IOStreams) *ReplaceOptions {
@ -129,6 +131,7 @@ func NewCmdReplace(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobr
cmdutil.AddDryRunFlag(cmd) cmdutil.AddDryRunFlag(cmd)
cmd.Flags().StringVar(&o.Raw, "raw", o.Raw, "Raw URI to PUT to the server. Uses the transport specified by the kubeconfig file.") cmd.Flags().StringVar(&o.Raw, "raw", o.Raw, "Raw URI to PUT to the server. Uses the transport specified by the kubeconfig file.")
cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-replace")
return cmd return cmd
} }
@ -292,6 +295,7 @@ func (o *ReplaceOptions) Run(f cmdutil.Factory) error {
obj, err := resource. obj, err := resource.
NewHelper(info.Client, info.Mapping). NewHelper(info.Client, info.Mapping).
DryRun(o.DryRunStrategy == cmdutil.DryRunServer). DryRun(o.DryRunStrategy == cmdutil.DryRunServer).
WithFieldManager(o.fieldManager).
Replace(info.Namespace, info.Name, true, info.Object) Replace(info.Namespace, info.Name, true, info.Object)
if err != nil { if err != nil {
return cmdutil.AddSourceToErr("replacing", info.Source, err) return cmdutil.AddSourceToErr("replacing", info.Source, err)
@ -382,7 +386,9 @@ func (o *ReplaceOptions) forceReplace() error {
klog.V(4).Infof("error recording current command: %v", err) klog.V(4).Infof("error recording current command: %v", err)
} }
obj, err := resource.NewHelper(info.Client, info.Mapping).Create(info.Namespace, true, info.Object) obj, err := resource.NewHelper(info.Client, info.Mapping).
WithFieldManager(o.fieldManager).
Create(info.Namespace, true, info.Object)
if err != nil { if err != nil {
return err return err
} }

View File

@ -48,6 +48,8 @@ type PauseOptions struct {
resource.FilenameOptions resource.FilenameOptions
genericclioptions.IOStreams genericclioptions.IOStreams
fieldManager string
} }
var ( var (
@ -92,6 +94,7 @@ func NewCmdRolloutPause(f cmdutil.Factory, streams genericclioptions.IOStreams)
usage := "identifying the resource to get from a server." usage := "identifying the resource to get from a server."
cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage) cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage)
cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-rollout")
return cmd return cmd
} }
@ -173,7 +176,9 @@ func (o *PauseOptions) RunPause() error {
continue continue
} }
obj, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) obj, err := resource.NewHelper(info.Client, info.Mapping).
WithFieldManager(o.fieldManager).
Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil)
if err != nil { if err != nil {
allErrs = append(allErrs, fmt.Errorf("failed to patch: %v", err)) allErrs = append(allErrs, fmt.Errorf("failed to patch: %v", err))
continue continue

View File

@ -48,6 +48,8 @@ type RestartOptions struct {
resource.FilenameOptions resource.FilenameOptions
genericclioptions.IOStreams genericclioptions.IOStreams
fieldManager string
} }
var ( var (
@ -94,6 +96,7 @@ func NewCmdRolloutRestart(f cmdutil.Factory, streams genericclioptions.IOStreams
usage := "identifying the resource to get from a server." usage := "identifying the resource to get from a server."
cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage) cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage)
cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-rollout")
o.PrintFlags.AddFlags(cmd) o.PrintFlags.AddFlags(cmd)
return cmd return cmd
} }
@ -169,7 +172,9 @@ func (o RestartOptions) RunRestart() error {
allErrs = append(allErrs, fmt.Errorf("failed to create patch for %v: empty patch", info.Name)) allErrs = append(allErrs, fmt.Errorf("failed to create patch for %v: empty patch", info.Name))
} }
obj, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) obj, err := resource.NewHelper(info.Client, info.Mapping).
WithFieldManager(o.fieldManager).
Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil)
if err != nil { if err != nil {
allErrs = append(allErrs, fmt.Errorf("failed to patch: %v", err)) allErrs = append(allErrs, fmt.Errorf("failed to patch: %v", err))
continue continue

View File

@ -49,6 +49,8 @@ type ResumeOptions struct {
resource.FilenameOptions resource.FilenameOptions
genericclioptions.IOStreams genericclioptions.IOStreams
fieldManager string
} }
var ( var (
@ -94,6 +96,7 @@ func NewCmdRolloutResume(f cmdutil.Factory, streams genericclioptions.IOStreams)
usage := "identifying the resource to get from a server." usage := "identifying the resource to get from a server."
cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage) cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage)
cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-rollout")
o.PrintFlags.AddFlags(cmd) o.PrintFlags.AddFlags(cmd)
return cmd return cmd
} }
@ -177,7 +180,9 @@ func (o ResumeOptions) RunResume() error {
continue continue
} }
obj, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) obj, err := resource.NewHelper(info.Client, info.Mapping).
WithFieldManager(o.fieldManager).
Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil)
if err != nil { if err != nil {
allErrs = append(allErrs, fmt.Errorf("failed to patch: %v", err)) allErrs = append(allErrs, fmt.Errorf("failed to patch: %v", err))
continue continue

View File

@ -123,6 +123,7 @@ type RunOptions struct {
Quiet bool Quiet bool
Schedule string Schedule string
TTY bool TTY bool
fieldManager string
genericclioptions.IOStreams genericclioptions.IOStreams
} }
@ -198,6 +199,7 @@ func addRunFlags(cmd *cobra.Command, opt *RunOptions) {
cmd.Flags().BoolVar(&opt.Quiet, "quiet", opt.Quiet, "If true, suppress prompt messages.") cmd.Flags().BoolVar(&opt.Quiet, "quiet", opt.Quiet, "If true, suppress prompt messages.")
cmd.Flags().StringVar(&opt.Schedule, "schedule", opt.Schedule, i18n.T("A schedule in the Cron format the job should be run with.")) cmd.Flags().StringVar(&opt.Schedule, "schedule", opt.Schedule, i18n.T("A schedule in the Cron format the job should be run with."))
cmd.Flags().MarkDeprecated("schedule", "has no effect and will be removed in the future.") cmd.Flags().MarkDeprecated("schedule", "has no effect and will be removed in the future.")
cmdutil.AddFieldManagerFlagVar(cmd, &opt.fieldManager, "kubectl-run")
} }
func (o *RunOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { func (o *RunOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error {
@ -662,6 +664,7 @@ func (o *RunOptions) createGeneratedObject(f cmdutil.Factory, cmd *cobra.Command
actualObj, err = resource. actualObj, err = resource.
NewHelper(client, mapping). NewHelper(client, mapping).
DryRun(o.DryRunStrategy == cmdutil.DryRunServer). DryRun(o.DryRunStrategy == cmdutil.DryRunServer).
WithFieldManager(o.fieldManager).
Create(namespace, false, obj) Create(namespace, false, obj)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -25,7 +25,7 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/api/meta" meta "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
@ -113,6 +113,7 @@ type EnvOptions struct {
From string From string
Prefix string Prefix string
Keys []string Keys []string
fieldManager string
PrintObj printers.ResourcePrinterFunc PrintObj printers.ResourcePrinterFunc
@ -171,6 +172,7 @@ func NewCmdEnv(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Co
cmd.Flags().BoolVar(&o.Local, "local", o.Local, "If true, set env will NOT contact api-server but run locally.") cmd.Flags().BoolVar(&o.Local, "local", o.Local, "If true, set env will NOT contact api-server but run locally.")
cmd.Flags().BoolVar(&o.All, "all", o.All, "If true, select all resources in the namespace of the specified resource types") cmd.Flags().BoolVar(&o.All, "all", o.All, "If true, select all resources in the namespace of the specified resource types")
cmd.Flags().BoolVar(&o.Overwrite, "overwrite", o.Overwrite, "If true, allow environment to be overwritten, otherwise reject updates that overwrite existing environment.") cmd.Flags().BoolVar(&o.Overwrite, "overwrite", o.Overwrite, "If true, allow environment to be overwritten, otherwise reject updates that overwrite existing environment.")
cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-set")
o.PrintFlags.AddFlags(cmd) o.PrintFlags.AddFlags(cmd)
@ -512,6 +514,7 @@ func (o *EnvOptions) RunEnv() error {
actual, err := resource. actual, err := resource.
NewHelper(info.Client, info.Mapping). NewHelper(info.Client, info.Mapping).
DryRun(o.dryRunStrategy == cmdutil.DryRunServer). DryRun(o.dryRunStrategy == cmdutil.DryRunServer).
WithFieldManager(o.fieldManager).
Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil)
if err != nil { if err != nil {
allErrs = append(allErrs, fmt.Errorf("failed to patch env update to pod template: %v", err)) allErrs = append(allErrs, fmt.Errorf("failed to patch env update to pod template: %v", err))

View File

@ -52,6 +52,7 @@ type SetImageOptions struct {
Output string Output string
Local bool Local bool
ResolveImage ImageResolver ResolveImage ImageResolver
fieldManager string
PrintObj printers.ResourcePrinterFunc PrintObj printers.ResourcePrinterFunc
Recorder genericclioptions.Recorder Recorder genericclioptions.Recorder
@ -125,6 +126,7 @@ func NewCmdImage(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.
cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)") cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
cmd.Flags().BoolVar(&o.Local, "local", o.Local, "If true, set image will NOT contact api-server but run locally.") cmd.Flags().BoolVar(&o.Local, "local", o.Local, "If true, set image will NOT contact api-server but run locally.")
cmdutil.AddDryRunFlag(cmd) cmdutil.AddDryRunFlag(cmd)
cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-set")
return cmd return cmd
} }
@ -286,6 +288,7 @@ func (o *SetImageOptions) Run() error {
actual, err := resource. actual, err := resource.
NewHelper(info.Client, info.Mapping). NewHelper(info.Client, info.Mapping).
DryRun(o.DryRunStrategy == cmdutil.DryRunServer). DryRun(o.DryRunStrategy == cmdutil.DryRunServer).
WithFieldManager(o.fieldManager).
Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil)
if err != nil { if err != nil {
allErrs = append(allErrs, fmt.Errorf("failed to patch image update to pod template: %v", err)) allErrs = append(allErrs, fmt.Errorf("failed to patch image update to pod template: %v", err))

View File

@ -73,6 +73,7 @@ type SetResourcesOptions struct {
Output string Output string
All bool All bool
Local bool Local bool
fieldManager string
DryRunStrategy cmdutil.DryRunStrategy DryRunStrategy cmdutil.DryRunStrategy
@ -136,6 +137,7 @@ func NewCmdResources(f cmdutil.Factory, streams genericclioptions.IOStreams) *co
cmdutil.AddDryRunFlag(cmd) cmdutil.AddDryRunFlag(cmd)
cmd.Flags().StringVar(&o.Limits, "limits", o.Limits, "The resource requirement requests for this container. For example, 'cpu=100m,memory=256Mi'. Note that server side components may assign requests depending on the server configuration, such as limit ranges.") cmd.Flags().StringVar(&o.Limits, "limits", o.Limits, "The resource requirement requests for this container. For example, 'cpu=100m,memory=256Mi'. Note that server side components may assign requests depending on the server configuration, such as limit ranges.")
cmd.Flags().StringVar(&o.Requests, "requests", o.Requests, "The resource requirement requests for this container. For example, 'cpu=100m,memory=256Mi'. Note that server side components may assign requests depending on the server configuration, such as limit ranges.") cmd.Flags().StringVar(&o.Requests, "requests", o.Requests, "The resource requirement requests for this container. For example, 'cpu=100m,memory=256Mi'. Note that server side components may assign requests depending on the server configuration, such as limit ranges.")
cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-set")
return cmd return cmd
} }
@ -301,6 +303,7 @@ func (o *SetResourcesOptions) Run() error {
actual, err := resource. actual, err := resource.
NewHelper(info.Client, info.Mapping). NewHelper(info.Client, info.Mapping).
DryRun(o.DryRunStrategy == cmdutil.DryRunServer). DryRun(o.DryRunStrategy == cmdutil.DryRunServer).
WithFieldManager(o.fieldManager).
Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil)
if err != nil { if err != nil {
allErrs = append(allErrs, fmt.Errorf("failed to patch resources update to pod template %v", err)) allErrs = append(allErrs, fmt.Errorf("failed to patch resources update to pod template %v", err))

View File

@ -46,6 +46,7 @@ type SetSelectorOptions struct {
RecordFlags *genericclioptions.RecordFlags RecordFlags *genericclioptions.RecordFlags
dryRunStrategy cmdutil.DryRunStrategy dryRunStrategy cmdutil.DryRunStrategy
dryRunVerifier *resource.DryRunVerifier dryRunVerifier *resource.DryRunVerifier
fieldManager string
// set by args // set by args
resources []string resources []string
@ -113,6 +114,7 @@ func NewCmdSelector(f cmdutil.Factory, streams genericclioptions.IOStreams) *cob
o.ResourceBuilderFlags.AddFlags(cmd.Flags()) o.ResourceBuilderFlags.AddFlags(cmd.Flags())
o.PrintFlags.AddFlags(cmd) o.PrintFlags.AddFlags(cmd)
o.RecordFlags.AddFlags(cmd) o.RecordFlags.AddFlags(cmd)
cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-set")
cmd.Flags().StringVarP(&o.resourceVersion, "resource-version", "", o.resourceVersion, "If non-empty, the selectors update will only succeed if this is the current resource-version for the object. Only valid when specifying a single resource.") cmd.Flags().StringVarP(&o.resourceVersion, "resource-version", "", o.resourceVersion, "If non-empty, the selectors update will only succeed if this is the current resource-version for the object. Only valid when specifying a single resource.")
cmdutil.AddDryRunFlag(cmd) cmdutil.AddDryRunFlag(cmd)
@ -227,6 +229,7 @@ func (o *SetSelectorOptions) RunSelector() error {
actual, err := resource. actual, err := resource.
NewHelper(info.Client, info.Mapping). NewHelper(info.Client, info.Mapping).
DryRun(o.dryRunStrategy == cmdutil.DryRunServer). DryRun(o.dryRunStrategy == cmdutil.DryRunServer).
WithFieldManager(o.fieldManager).
Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil)
if err != nil { if err != nil {
return err return err

View File

@ -71,6 +71,7 @@ type SetServiceAccountOptions struct {
updatePodSpecForObject polymorphichelpers.UpdatePodSpecForObjectFunc updatePodSpecForObject polymorphichelpers.UpdatePodSpecForObjectFunc
infos []*resource.Info infos []*resource.Info
serviceAccountName string serviceAccountName string
fieldManager string
PrintObj printers.ResourcePrinterFunc PrintObj printers.ResourcePrinterFunc
Recorder genericclioptions.Recorder Recorder genericclioptions.Recorder
@ -115,6 +116,7 @@ func NewCmdServiceAccount(f cmdutil.Factory, streams genericclioptions.IOStreams
cmd.Flags().BoolVar(&o.all, "all", o.all, "Select all resources, including uninitialized ones, in the namespace of the specified resource types") cmd.Flags().BoolVar(&o.all, "all", o.all, "Select all resources, including uninitialized ones, in the namespace of the specified resource types")
cmd.Flags().BoolVar(&o.local, "local", o.local, "If true, set serviceaccount will NOT contact api-server but run locally.") cmd.Flags().BoolVar(&o.local, "local", o.local, "If true, set serviceaccount will NOT contact api-server but run locally.")
cmdutil.AddDryRunFlag(cmd) cmdutil.AddDryRunFlag(cmd)
cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-set")
return cmd return cmd
} }
@ -224,6 +226,7 @@ func (o *SetServiceAccountOptions) Run() error {
actual, err := resource. actual, err := resource.
NewHelper(info.Client, info.Mapping). NewHelper(info.Client, info.Mapping).
DryRun(o.dryRunStrategy == cmdutil.DryRunServer). DryRun(o.dryRunStrategy == cmdutil.DryRunServer).
WithFieldManager(o.fieldManager).
Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil)
if err != nil { if err != nil {
patchErrs = append(patchErrs, fmt.Errorf("failed to patch ServiceAccountName %v", err)) patchErrs = append(patchErrs, fmt.Errorf("failed to patch ServiceAccountName %v", err))

View File

@ -68,6 +68,7 @@ type SubjectOptions struct {
DryRunStrategy cmdutil.DryRunStrategy DryRunStrategy cmdutil.DryRunStrategy
DryRunVerifier *resource.DryRunVerifier DryRunVerifier *resource.DryRunVerifier
Local bool Local bool
fieldManager string
Users []string Users []string
Groups []string Groups []string
@ -115,6 +116,7 @@ func NewCmdSubject(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobr
cmd.Flags().StringArrayVar(&o.Users, "user", o.Users, "Usernames to bind to the role") cmd.Flags().StringArrayVar(&o.Users, "user", o.Users, "Usernames to bind to the role")
cmd.Flags().StringArrayVar(&o.Groups, "group", o.Groups, "Groups to bind to the role") cmd.Flags().StringArrayVar(&o.Groups, "group", o.Groups, "Groups to bind to the role")
cmd.Flags().StringArrayVar(&o.ServiceAccounts, "serviceaccount", o.ServiceAccounts, "Service accounts to bind to the role") cmd.Flags().StringArrayVar(&o.ServiceAccounts, "serviceaccount", o.ServiceAccounts, "Service accounts to bind to the role")
cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-set")
return cmd return cmd
} }
@ -281,6 +283,7 @@ func (o *SubjectOptions) Run(fn updateSubjects) error {
actual, err := resource. actual, err := resource.
NewHelper(info.Client, info.Mapping). NewHelper(info.Client, info.Mapping).
DryRun(o.DryRunStrategy == cmdutil.DryRunServer). DryRun(o.DryRunStrategy == cmdutil.DryRunServer).
WithFieldManager(o.fieldManager).
Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil)
if err != nil { if err != nil {
allErrs = append(allErrs, fmt.Errorf("failed to patch subjects to rolebinding: %v", err)) allErrs = append(allErrs, fmt.Errorf("failed to patch subjects to rolebinding: %v", err))

View File

@ -56,6 +56,7 @@ type TaintOptions struct {
selector string selector string
overwrite bool overwrite bool
all bool all bool
fieldManager string
ClientForMapping func(*meta.RESTMapping) (resource.RESTClient, error) ClientForMapping func(*meta.RESTMapping) (resource.RESTClient, error)
@ -122,6 +123,7 @@ func NewCmdTaint(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.
cmd.Flags().StringVarP(&options.selector, "selector", "l", options.selector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)") cmd.Flags().StringVarP(&options.selector, "selector", "l", options.selector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
cmd.Flags().BoolVar(&options.overwrite, "overwrite", options.overwrite, "If true, allow taints to be overwritten, otherwise reject taint updates that overwrite existing taints.") cmd.Flags().BoolVar(&options.overwrite, "overwrite", options.overwrite, "If true, allow taints to be overwritten, otherwise reject taint updates that overwrite existing taints.")
cmd.Flags().BoolVar(&options.all, "all", options.all, "Select all nodes in the cluster") cmd.Flags().BoolVar(&options.all, "all", options.all, "Select all nodes in the cluster")
cmdutil.AddFieldManagerFlagVar(cmd, &options.fieldManager, "kubectl-taint")
return cmd return cmd
} }
@ -339,6 +341,7 @@ func (o TaintOptions) RunTaint() error {
} }
helper := resource. helper := resource.
NewHelper(client, mapping). NewHelper(client, mapping).
WithFieldManager(o.fieldManager).
DryRun(o.DryRunStrategy == cmdutil.DryRunServer) DryRun(o.DryRunStrategy == cmdutil.DryRunServer)
var outputObj runtime.Object var outputObj runtime.Object

View File

@ -28,7 +28,7 @@ import (
goruntime "runtime" goruntime "runtime"
"strings" "strings"
"github.com/evanphx/json-patch" jsonpatch "github.com/evanphx/json-patch"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"k8s.io/klog" "k8s.io/klog"
@ -78,6 +78,8 @@ type EditOptions struct {
f cmdutil.Factory f cmdutil.Factory
editPrinterOptions *editPrinterOptions editPrinterOptions *editPrinterOptions
updatedResultGetter func(data []byte) *resource.Result updatedResultGetter func(data []byte) *resource.Result
FieldManager string
} }
// NewEditOptions returns an initialized EditOptions instance // NewEditOptions returns an initialized EditOptions instance
@ -498,7 +500,7 @@ func (o *EditOptions) annotationPatch(update *resource.Info) error {
if err != nil { if err != nil {
return err return err
} }
helper := resource.NewHelper(client, mapping) helper := resource.NewHelper(client, mapping).WithFieldManager(o.FieldManager)
_, err = helper.Patch(o.CmdNamespace, update.Name, patchType, patch, nil) _, err = helper.Patch(o.CmdNamespace, update.Name, patchType, patch, nil)
if err != nil { if err != nil {
return err return err
@ -628,7 +630,9 @@ func (o *EditOptions) visitToPatch(originalInfos []*resource.Info, patchVisitor
fmt.Fprintf(o.Out, "Patch: %s\n", string(patch)) fmt.Fprintf(o.Out, "Patch: %s\n", string(patch))
} }
patched, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, patchType, patch, nil) patched, err := resource.NewHelper(info.Client, info.Mapping).
WithFieldManager(o.FieldManager).
Patch(info.Namespace, info.Name, patchType, patch, nil)
if err != nil { if err != nil {
fmt.Fprintln(o.ErrOut, results.addError(err, info)) fmt.Fprintln(o.ErrOut, results.addError(err, info))
return nil return nil
@ -645,9 +649,13 @@ func (o *EditOptions) visitToPatch(originalInfos []*resource.Info, patchVisitor
func (o *EditOptions) visitToCreate(createVisitor resource.Visitor) error { func (o *EditOptions) visitToCreate(createVisitor resource.Visitor) error {
err := createVisitor.Visit(func(info *resource.Info, incomingErr error) error { err := createVisitor.Visit(func(info *resource.Info, incomingErr error) error {
if err := resource.CreateAndRefresh(info); err != nil { obj, err := resource.NewHelper(info.Client, info.Mapping).
WithFieldManager(o.FieldManager).
Create(info.Namespace, true, info.Object)
if err != nil {
return err return err
} }
info.Refresh(obj, true)
printer, err := o.ToPrinter("created") printer, err := o.ToPrinter("created")
if err != nil { if err != nil {
return err return err

View File

@ -430,10 +430,13 @@ func AddDryRunFlag(cmd *cobra.Command) {
cmd.Flags().Lookup("dry-run").NoOptDefVal = "unchanged" cmd.Flags().Lookup("dry-run").NoOptDefVal = "unchanged"
} }
func AddFieldManagerFlagVar(cmd *cobra.Command, p *string, defaultFieldManager string) {
cmd.Flags().StringVar(p, "field-manager", defaultFieldManager, "Name of the manager used to track field ownership.")
}
func AddServerSideApplyFlags(cmd *cobra.Command) { func AddServerSideApplyFlags(cmd *cobra.Command) {
cmd.Flags().Bool("server-side", false, "If true, apply runs in the server instead of the client.") cmd.Flags().Bool("server-side", false, "If true, apply runs in the server instead of the client.")
cmd.Flags().Bool("force-conflicts", false, "If true, server-side apply will force the changes against conflicts.") cmd.Flags().Bool("force-conflicts", false, "If true, server-side apply will force the changes against conflicts.")
cmd.Flags().String("field-manager", "kubectl", "Name of the manager used to track field ownership.")
} }
func AddPodRunningTimeoutFlag(cmd *cobra.Command, defaultTimeout time.Duration) { func AddPodRunningTimeoutFlag(cmd *cobra.Command, defaultTimeout time.Duration) {

View File

@ -34,6 +34,9 @@ run_kubectl_apply_tests() {
kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label' kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label'
# Post-Condition: pod "test-pod" has configuration annotation # Post-Condition: pod "test-pod" has configuration annotation
grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]:?}")" grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]:?}")"
# pod has field manager for kubectl client-side apply
output_message=$(kubectl get -f hack/testdata/pod.yaml -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
kube::test::if_has_string "${output_message}" 'kubectl-client-side-apply'
# Clean up # Clean up
kubectl delete pods test-pod "${kube_flags[@]:?}" kubectl delete pods test-pod "${kube_flags[@]:?}"
@ -354,6 +357,13 @@ run_kubectl_server_side_apply_tests() {
kubectl apply --server-side -f hack/testdata/pod.yaml "${kube_flags[@]:?}" kubectl apply --server-side -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
# Post-Condition: pod "test-pod" is created # Post-Condition: pod "test-pod" is created
kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label' kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label'
# pod has field manager for kubectl server-side apply
output_message=$(kubectl get -f hack/testdata/pod.yaml -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
kube::test::if_has_string "${output_message}" 'kubectl'
# pod has custom field manager
kubectl apply --server-side --field-manager=my-field-manager --force-conflicts -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
output_message=$(kubectl get -f hack/testdata/pod.yaml -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
kube::test::if_has_string "${output_message}" 'my-field-manager'
# Clean up # Clean up
kubectl delete pods test-pod "${kube_flags[@]:?}" kubectl delete pods test-pod "${kube_flags[@]:?}"

View File

@ -42,6 +42,9 @@ run_daemonset_tests() {
kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '3' kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '3'
kubectl set resources daemonsets/bind "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi kubectl set resources daemonsets/bind "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi
kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '4' kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '4'
# pod has field for kubectl set field manager
output_message=$(kubectl get daemonsets bind -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
kube::test::if_has_string "${output_message}" 'kubectl-set'
# Rollout restart should change generation # Rollout restart should change generation
kubectl rollout restart daemonset/bind "${kube_flags[@]:?}" kubectl rollout restart daemonset/bind "${kube_flags[@]:?}"
@ -335,6 +338,10 @@ run_deployment_tests() {
newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')" newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')"
rs="$(kubectl get rs "${newrs}" -o yaml)" rs="$(kubectl get rs "${newrs}" -o yaml)"
kube::test::if_has_string "${rs}" "deployment.kubernetes.io/revision: \"6\"" kube::test::if_has_string "${rs}" "deployment.kubernetes.io/revision: \"6\""
# Deployment has field for kubectl rollout field manager
output_message=$(kubectl get deployment nginx -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
kube::test::if_has_string "${output_message}" 'kubectl-rollout'
# Create second deployment
${SED} "s/name: nginx$/name: nginx2/" hack/testdata/deployment-revision1.yaml | kubectl create -f - "${kube_flags[@]:?}" ${SED} "s/name: nginx$/name: nginx2/" hack/testdata/deployment-revision1.yaml | kubectl create -f - "${kube_flags[@]:?}"
# Deletion of both deployments should not be blocked # Deletion of both deployments should not be blocked
kubectl delete deployment nginx2 "${kube_flags[@]:?}" kubectl delete deployment nginx2 "${kube_flags[@]:?}"
@ -653,6 +660,10 @@ run_rs_tests() {
kubectl set serviceaccount rs/frontend "${kube_flags[@]:?}" serviceaccount1 kubectl set serviceaccount rs/frontend "${kube_flags[@]:?}" serviceaccount1
kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '5' kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '5'
# RS has field for kubectl set field manager
output_message=$(kubectl get rs frontend -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
kube::test::if_has_string "${output_message}" 'kubectl-set'
### Delete replica set with id ### Delete replica set with id
# Pre-condition: frontend replica set exists # Pre-condition: frontend replica set exists
kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:' kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:'
@ -692,6 +703,10 @@ run_rs_tests() {
# autoscale 2~3 pods, no CPU utilization specified, replica set specified by name # autoscale 2~3 pods, no CPU utilization specified, replica set specified by name
kubectl autoscale rs frontend "${kube_flags[@]:?}" --min=2 --max=3 kubectl autoscale rs frontend "${kube_flags[@]:?}" --min=2 --max=3
kube::test::get_object_assert 'hpa frontend' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '2 3 80' kube::test::get_object_assert 'hpa frontend' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '2 3 80'
# HorizontalPodAutoscaler has field for kubectl autoscale field manager
output_message=$(kubectl get hpa frontend -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
kube::test::if_has_string "${output_message}" 'kubectl-autoscale'
# Clean up
kubectl delete hpa frontend "${kube_flags[@]:?}" kubectl delete hpa frontend "${kube_flags[@]:?}"
# autoscale without specifying --max should fail # autoscale without specifying --max should fail
! kubectl autoscale rs frontend "${kube_flags[@]:?}" || exit 1 ! kubectl autoscale rs frontend "${kube_flags[@]:?}" || exit 1

View File

@ -80,6 +80,9 @@ run_pod_tests() {
kube::test::get_object_assert 'pod valid-pod' "{{$id_field}}" 'valid-pod' kube::test::get_object_assert 'pod valid-pod' "{{$id_field}}" 'valid-pod'
kube::test::get_object_assert 'pod/valid-pod' "{{$id_field}}" 'valid-pod' kube::test::get_object_assert 'pod/valid-pod' "{{$id_field}}" 'valid-pod'
kube::test::get_object_assert 'pods/valid-pod' "{{$id_field}}" 'valid-pod' kube::test::get_object_assert 'pods/valid-pod' "{{$id_field}}" 'valid-pod'
# pod has field manager for kubectl create
output_message=$(kubectl get -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
kube::test::if_has_string "${output_message}" 'kubectl-create'
# Repeat above test using jsonpath template # Repeat above test using jsonpath template
kube::test::get_object_jsonpath_assert pods "{.items[*]$id_field}" 'valid-pod' kube::test::get_object_jsonpath_assert pods "{.items[*]$id_field}" 'valid-pod'
kube::test::get_object_jsonpath_assert 'pod valid-pod' "{$id_field}" 'valid-pod' kube::test::get_object_jsonpath_assert 'pod valid-pod' "{$id_field}" 'valid-pod'
@ -364,6 +367,9 @@ run_pod_tests() {
kubectl annotate pods valid-pod emptyannotation="" "${kube_flags[@]}" kubectl annotate pods valid-pod emptyannotation="" "${kube_flags[@]}"
# Post-condition: valid pod contains "emptyannotation" with no value # Post-condition: valid pod contains "emptyannotation" with no value
kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field}.emptyannotation}}" '' kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field}.emptyannotation}}" ''
# pod has field for kubectl annotate field manager
output_message=$(kubectl get pod valid-pod -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
kube::test::if_has_string "${output_message}" 'kubectl-annotate'
### Record label change ### Record label change
# Pre-condition: valid-pod does not have record annotation # Pre-condition: valid-pod does not have record annotation
@ -372,6 +378,9 @@ run_pod_tests() {
kubectl label pods valid-pod record-change=true --record=true "${kube_flags[@]}" kubectl label pods valid-pod record-change=true --record=true "${kube_flags[@]}"
# Post-condition: valid-pod has record annotation # Post-condition: valid-pod has record annotation
kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*--record=true.*" kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*--record=true.*"
# pod has field for kubectl label field manager
output_message=$(kubectl get pod valid-pod -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
kube::test::if_has_string "${output_message}" 'kubectl-label'
### Do not record label change ### Do not record label change
# Command # Command
@ -451,6 +460,11 @@ run_pod_tests() {
# Post-condition: service named modified and rc named modified are created # Post-condition: service named modified and rc named modified are created
kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" 'modified:' kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'modified:' kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
# resources have field manager for kubectl create
output_message=$(kubectl get service/modified -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
kube::test::if_has_string "${output_message}" 'kubectl-create'
output_message=$(kubectl get rc/modified -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
kube::test::if_has_string "${output_message}" 'kubectl-create'
# Clean up # Clean up
kubectl delete service/modified "${kube_flags[@]}" kubectl delete service/modified "${kube_flags[@]}"
kubectl delete rc/modified "${kube_flags[@]}" kubectl delete rc/modified "${kube_flags[@]}"
@ -521,6 +535,10 @@ run_pod_tests() {
# Post-condition: valid-pod POD has expected image # Post-condition: valid-pod POD has expected image
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'k8s.gcr.io/pause:3.2:' kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'k8s.gcr.io/pause:3.2:'
# pod has field for kubectl patch field manager
output_message=$(kubectl get pod valid-pod -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
kube::test::if_has_string "${output_message}" 'kubectl-patch'
## If resourceVersion is specified in the patch, it will be treated as a precondition, i.e., if the resourceVersion is different from that is stored in the server, the Patch should be rejected ## If resourceVersion is specified in the patch, it will be treated as a precondition, i.e., if the resourceVersion is different from that is stored in the server, the Patch should be rejected
ERROR_FILE="${KUBE_TEMP}/conflict-error" ERROR_FILE="${KUBE_TEMP}/conflict-error"
## If the resourceVersion is the same as the one stored in the server, the patch will be applied. ## If the resourceVersion is the same as the one stored in the server, the patch will be applied.
@ -561,6 +579,10 @@ run_pod_tests() {
# Post-condition: spec.container.name = "replaced-k8s-serve-hostname" # Post-condition: spec.container.name = "replaced-k8s-serve-hostname"
kube::test::get_object_assert 'pod valid-pod' "{{(index .spec.containers 0).name}}" 'replaced-k8s-serve-hostname' kube::test::get_object_assert 'pod valid-pod' "{{(index .spec.containers 0).name}}" 'replaced-k8s-serve-hostname'
# Pod has field manager for kubectl replace
output_message=$(kubectl get pod valid-pod -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
kube::test::if_has_string "${output_message}" 'kubectl-replace'
## check replace --grace-period requires --force ## check replace --grace-period requires --force
output_message=$(! kubectl replace "${kube_flags[@]}" --grace-period=1 -f /tmp/tmp-valid-pod.json 2>&1) output_message=$(! kubectl replace "${kube_flags[@]}" --grace-period=1 -f /tmp/tmp-valid-pod.json 2>&1)
kube::test::if_has_string "${output_message}" '\-\-grace-period must have \-\-force specified' kube::test::if_has_string "${output_message}" '\-\-grace-period must have \-\-force specified'
@ -635,6 +657,9 @@ __EOF__
grep -q 'Patch:' <<< "$(EDITOR=/tmp/tmp-editor.sh kubectl edit "${kube_flags[@]}" pods/valid-pod --output-patch=true)" grep -q 'Patch:' <<< "$(EDITOR=/tmp/tmp-editor.sh kubectl edit "${kube_flags[@]}" pods/valid-pod --output-patch=true)"
# Post-condition: valid-pod POD has image k8s.gcr.io/serve_hostname # Post-condition: valid-pod POD has image k8s.gcr.io/serve_hostname
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'k8s.gcr.io/serve_hostname:' kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'k8s.gcr.io/serve_hostname:'
# pod has field for kubectl edit field manager
output_message=$(kubectl get pod valid-pod -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
kube::test::if_has_string "${output_message}" 'kubectl-edit'
# cleaning # cleaning
rm /tmp/tmp-editor.sh rm /tmp/tmp-editor.sh
@ -976,6 +1001,8 @@ run_service_tests() {
# Show dry-run works on running selector # Show dry-run works on running selector
kubectl set selector services redis-master role=padawan --dry-run=client -o yaml "${kube_flags[@]}" kubectl set selector services redis-master role=padawan --dry-run=client -o yaml "${kube_flags[@]}"
kubectl set selector services redis-master role=padawan --dry-run=server -o yaml "${kube_flags[@]}" kubectl set selector services redis-master role=padawan --dry-run=server -o yaml "${kube_flags[@]}"
output_message=$(kubectl get services redis-master -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
kube::test::if_has_string "${output_message}" 'kubectl-set'
! kubectl set selector services redis-master role=padawan --local -o yaml "${kube_flags[@]}" || exit 1 ! kubectl set selector services redis-master role=padawan --local -o yaml "${kube_flags[@]}" || exit 1
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:" kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
# --resource-version=<current-resource-version> succeeds # --resource-version=<current-resource-version> succeeds
@ -1111,12 +1138,18 @@ __EOF__
# Check result # Check result
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'testmetadata:' kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'testmetadata:'
kube::test::get_object_assert 'service testmetadata' "{{.metadata.annotations}}" "map\[zone-context:home\]" kube::test::get_object_assert 'service testmetadata' "{{.metadata.annotations}}" "map\[zone-context:home\]"
# pod has field for kubectl run field manager
output_message=$(kubectl get pod testmetadata -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
kube::test::if_has_string "${output_message}" 'kubectl-run'
### Expose pod as a new service ### Expose pod as a new service
# Command # Command
kubectl expose pod testmetadata --port=1000 --target-port=80 --type=NodePort --name=exposemetadata --overrides='{ "metadata": { "annotations": { "zone-context": "work" } } } ' kubectl expose pod testmetadata --port=1000 --target-port=80 --type=NodePort --name=exposemetadata --overrides='{ "metadata": { "annotations": { "zone-context": "work" } } } '
# Check result # Check result
kube::test::get_object_assert 'service exposemetadata' "{{.metadata.annotations}}" "map\[zone-context:work\]" kube::test::get_object_assert 'service exposemetadata' "{{.metadata.annotations}}" "map\[zone-context:work\]"
# Service has field manager for kubectl expose
output_message=$(kubectl get service exposemetadata -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
kube::test::if_has_string "${output_message}" 'kubectl-expose'
# Clean-Up # Clean-Up
# Command # Command

View File

@ -54,7 +54,6 @@ run_kubectl_diff_tests() {
kube::test::if_has_string "${resourceVersion}" "${initialResourceVersion}" kube::test::if_has_string "${resourceVersion}" "${initialResourceVersion}"
# Test found diff with server-side apply # Test found diff with server-side apply
kubectl apply -f hack/testdata/pod.yaml
output_message=$(kubectl diff -f hack/testdata/pod-changed.yaml --server-side --force-conflicts || test $? -eq 1) output_message=$(kubectl diff -f hack/testdata/pod-changed.yaml --server-side --force-conflicts || test $? -eq 1)
kube::test::if_has_string "${output_message}" 'k8s.gcr.io/pause:3.0' kube::test::if_has_string "${output_message}" 'k8s.gcr.io/pause:3.0'
@ -65,6 +64,31 @@ run_kubectl_diff_tests() {
# Test that we have a return code bigger than 1 if there is an error when diffing # Test that we have a return code bigger than 1 if there is an error when diffing
kubectl diff -f hack/testdata/invalid-pod.yaml || test $? -gt 1 kubectl diff -f hack/testdata/invalid-pod.yaml || test $? -gt 1
# Cleanup
kubectl delete -f hack/testdata/pod.yaml
kube::log::status "Testing kubectl diff with server-side apply"
# Test that kubectl diff --server-side works when the live object doesn't exist
output_message=$(! kubectl diff --server-side -f hack/testdata/pod.yaml)
kube::test::if_has_string "${output_message}" 'test-pod'
# Ensure diff --server-side only dry-runs and doesn't persist change
kube::test::get_object_assert 'pod' "{{range.items}}{{ if eq ${id_field:?} \\\"test-pod\\\" }}found{{end}}{{end}}:" ':'
# Server-side apply the Pod
kubectl apply --server-side -f hack/testdata/pod.yaml
kube::test::get_object_assert 'pod' "{{range.items}}{{ if eq ${id_field:?} \\\"test-pod\\\" }}found{{end}}{{end}}:" 'found:'
# Make sure that --server-side diffing the resource right after returns nothing (0 exit code).
kubectl diff --server-side -f hack/testdata/pod.yaml
# Make sure that for kubectl diff --server-side:
# 1. the exit code for diff is 1 because it found a difference
# 2. the difference contains the changed image
output_message=$(kubectl diff --server-side -f hack/testdata/pod-changed.yaml || test $? -eq 1)
kube::test::if_has_string "${output_message}" 'k8s.gcr.io/pause:3.0'
# Cleanup
kubectl delete -f hack/testdata/pod.yaml kubectl delete -f hack/testdata/pod.yaml
set +o nounset set +o nounset

View File

@ -87,6 +87,9 @@ __EOF__
# taint can add a taint (<key>:<effect>) # taint can add a taint (<key>:<effect>)
kubectl taint node 127.0.0.1 dedicated:PreferNoSchedule kubectl taint node 127.0.0.1 dedicated:PreferNoSchedule
kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "dedicated=<no value>:PreferNoSchedule" kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "dedicated=<no value>:PreferNoSchedule"
# Node has field manager for kubectl taint
output_message=$(kubectl get node 127.0.0.1 -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
kube::test::if_has_string "${output_message}" 'kubectl-taint'
# Dry-run remove a taint # Dry-run remove a taint
kubectl taint node 127.0.0.1 --dry-run=client dedicated- kubectl taint node 127.0.0.1 --dry-run=client dedicated-
kubectl taint node 127.0.0.1 --dry-run=server dedicated- kubectl taint node 127.0.0.1 --dry-run=server dedicated-