mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 18:31:15 +00:00
Merge pull request #52440 from juanvallejo/jvallejo/add-dry-run-flag-kubectl-drain
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. add --dry-run option to kubectl drain **Release note**: ```release-note Added --dry-run option to `kubectl drain` ``` Adds a `--dry-run` flag to `kubectl <cordon, uncordon, drain>` @fabianofranz @kubernetes/sig-cli-misc
This commit is contained in:
commit
3f1a2e43b4
@ -4214,35 +4214,57 @@ run_certificates_tests() {
|
||||
}
|
||||
|
||||
run_cluster_management_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
kube::log::status "Testing cluster-management commands"
|
||||
kube::log::status "Testing cluster-management commands"
|
||||
|
||||
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
|
||||
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
|
||||
|
||||
### kubectl drain command fails when both --selector and a node argument are given
|
||||
# Pre-condition: node exists and contains label test=label
|
||||
kubectl label node "127.0.0.1" "test=label"
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" '{{.metadata.labels.test}}' 'label'
|
||||
response=$(! kubectl drain "127.0.0.1" --selector test=label 2>&1)
|
||||
kube::test::if_has_string "${response}" 'cannot specify both a node name'
|
||||
### kubectl cordon update with --dry-run does not mark node unschedulable
|
||||
# Pre-condition: node is schedulable
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
kubectl cordon "127.0.0.1" --dry-run
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
|
||||
### kubectl cordon command fails when no arguments are passed
|
||||
# Pre-condition: node exists
|
||||
response=$(! kubectl cordon 2>&1)
|
||||
kube::test::if_has_string "${response}" 'error\: USAGE\: cordon NODE'
|
||||
### kubectl drain update with --dry-run does not mark node unschedulable
|
||||
# Pre-condition: node is schedulable
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
kubectl drain "127.0.0.1" --dry-run
|
||||
# Post-condition: node still exists, node is still schedulable
|
||||
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
|
||||
### kubectl cordon selects all nodes with an empty --selector=
|
||||
# Pre-condition: node "127.0.0.1" is uncordoned
|
||||
kubectl uncordon "127.0.0.1"
|
||||
response=$(kubectl cordon --selector=)
|
||||
kube::test::if_has_string "${response}" 'node "127.0.0.1" cordoned'
|
||||
# Post-condition: node "127.0.0.1" is cordoned
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
|
||||
### kubectl uncordon update with --dry-run is a no-op
|
||||
# Pre-condition: node is already schedulable
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
response=$(kubectl uncordon "127.0.0.1" --dry-run)
|
||||
kube::test::if_has_string "${response}" 'already uncordoned'
|
||||
# Post-condition: node is still schedulable
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
### kubectl drain command fails when both --selector and a node argument are given
|
||||
# Pre-condition: node exists and contains label test=label
|
||||
kubectl label node "127.0.0.1" "test=label"
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" '{{.metadata.labels.test}}' 'label'
|
||||
response=$(! kubectl drain "127.0.0.1" --selector test=label 2>&1)
|
||||
kube::test::if_has_string "${response}" 'cannot specify both a node name'
|
||||
|
||||
### kubectl cordon command fails when no arguments are passed
|
||||
# Pre-condition: node exists
|
||||
response=$(! kubectl cordon 2>&1)
|
||||
kube::test::if_has_string "${response}" 'error\: USAGE\: cordon NODE'
|
||||
|
||||
### kubectl cordon selects all nodes with an empty --selector=
|
||||
# Pre-condition: node "127.0.0.1" is uncordoned
|
||||
kubectl uncordon "127.0.0.1"
|
||||
response=$(kubectl cordon --selector=)
|
||||
kube::test::if_has_string "${response}" 'node "127.0.0.1" cordoned'
|
||||
# Post-condition: node "127.0.0.1" is cordoned
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_plugins_tests() {
|
||||
@ -4835,12 +4857,12 @@ runTests() {
|
||||
record_command run_certificates_tests
|
||||
fi
|
||||
|
||||
######################
|
||||
# Cluster Management #
|
||||
######################
|
||||
if kube::test::if_supports_resource "${nodes}" ; then
|
||||
record_command run_cluster_management_tests
|
||||
fi
|
||||
######################
|
||||
# Cluster Management #
|
||||
######################
|
||||
if kube::test::if_supports_resource "${nodes}" ; then
|
||||
record_command run_cluster_management_tests
|
||||
fi
|
||||
|
||||
###########
|
||||
# Plugins #
|
||||
|
@ -55,6 +55,7 @@ type DrainOptions struct {
|
||||
restClient *restclient.RESTClient
|
||||
Factory cmdutil.Factory
|
||||
Force bool
|
||||
DryRun bool
|
||||
GracePeriodSeconds int
|
||||
IgnoreDaemonsets bool
|
||||
Timeout time.Duration
|
||||
@ -113,6 +114,7 @@ func NewCmdCordon(f cmdutil.Factory, out io.Writer) *cobra.Command {
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVarP(&options.Selector, "selector", "l", options.Selector, "Selector (label query) to filter on")
|
||||
cmdutil.AddDryRunFlag(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
@ -139,6 +141,7 @@ func NewCmdUncordon(f cmdutil.Factory, out io.Writer) *cobra.Command {
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVarP(&options.Selector, "selector", "l", options.Selector, "Selector (label query) to filter on")
|
||||
cmdutil.AddDryRunFlag(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
@ -195,6 +198,7 @@ func NewCmdDrain(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command {
|
||||
cmd.Flags().IntVar(&options.GracePeriodSeconds, "grace-period", -1, "Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.")
|
||||
cmd.Flags().DurationVar(&options.Timeout, "timeout", 0, "The length of time to wait before giving up, zero means infinite")
|
||||
cmd.Flags().StringVarP(&options.Selector, "selector", "l", options.Selector, "Selector (label query) to filter on")
|
||||
cmdutil.AddDryRunFlag(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
@ -214,6 +218,8 @@ func (o *DrainOptions) SetupDrain(cmd *cobra.Command, args []string) error {
|
||||
return cmdutil.UsageErrorf(cmd, fmt.Sprintf("USAGE: %s [flags]", cmd.Use))
|
||||
}
|
||||
|
||||
o.DryRun = cmdutil.GetFlagBool(cmd, "dry-run")
|
||||
|
||||
if o.client, err = o.Factory.ClientSet(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -269,10 +275,13 @@ func (o *DrainOptions) RunDrain() error {
|
||||
var fatal error
|
||||
|
||||
for _, info := range o.nodeInfos {
|
||||
err := o.deleteOrEvictPodsSimple(info)
|
||||
if err == nil {
|
||||
var err error
|
||||
if !o.DryRun {
|
||||
err = o.deleteOrEvictPodsSimple(info)
|
||||
}
|
||||
if err == nil || o.DryRun {
|
||||
drainedNodes.Insert(info.Name)
|
||||
cmdutil.PrintSuccess(o.mapper, false, o.Out, "node", info.Name, false, "drained")
|
||||
cmdutil.PrintSuccess(o.mapper, false, o.Out, "node", info.Name, o.DryRun, "drained")
|
||||
} else {
|
||||
fmt.Fprintf(o.ErrOut, "error: unable to drain node %q, aborting command...\n\n", info.Name)
|
||||
remainingNodes := []string{}
|
||||
@ -697,29 +706,31 @@ func (o *DrainOptions) RunCordonOrUncordon(desired bool) error {
|
||||
}
|
||||
unsched := node.Spec.Unschedulable
|
||||
if unsched == desired {
|
||||
cmdutil.PrintSuccess(o.mapper, false, o.Out, nodeInfo.Mapping.Resource, nodeInfo.Name, false, already(desired))
|
||||
cmdutil.PrintSuccess(o.mapper, false, o.Out, nodeInfo.Mapping.Resource, nodeInfo.Name, o.DryRun, already(desired))
|
||||
} else {
|
||||
helper := resource.NewHelper(o.restClient, nodeInfo.Mapping)
|
||||
node.Spec.Unschedulable = desired
|
||||
newData, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
fmt.Fprintf(o.ErrOut, "error: unable to %s node %q: %v", cordonOrUncordon, nodeInfo.Name, err)
|
||||
continue
|
||||
if !o.DryRun {
|
||||
helper := resource.NewHelper(o.restClient, nodeInfo.Mapping)
|
||||
node.Spec.Unschedulable = desired
|
||||
newData, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
fmt.Fprintf(o.ErrOut, "error: unable to %s node %q: %v", cordonOrUncordon, nodeInfo.Name, err)
|
||||
continue
|
||||
}
|
||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, obj)
|
||||
if err != nil {
|
||||
fmt.Printf("error: unable to %s node %q: %v", cordonOrUncordon, nodeInfo.Name, err)
|
||||
continue
|
||||
}
|
||||
_, err = helper.Patch(cmdNamespace, nodeInfo.Name, types.StrategicMergePatchType, patchBytes)
|
||||
if err != nil {
|
||||
fmt.Printf("error: unable to %s node %q: %v", cordonOrUncordon, nodeInfo.Name, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, obj)
|
||||
if err != nil {
|
||||
fmt.Printf("error: unable to %s node %q: %v", cordonOrUncordon, nodeInfo.Name, err)
|
||||
continue
|
||||
}
|
||||
_, err = helper.Patch(cmdNamespace, nodeInfo.Name, types.StrategicMergePatchType, patchBytes)
|
||||
if err != nil {
|
||||
fmt.Printf("error: unable to %s node %q: %v", cordonOrUncordon, nodeInfo.Name, err)
|
||||
continue
|
||||
}
|
||||
cmdutil.PrintSuccess(o.mapper, false, o.Out, nodeInfo.Mapping.Resource, nodeInfo.Name, false, changed(desired))
|
||||
cmdutil.PrintSuccess(o.mapper, false, o.Out, nodeInfo.Mapping.Resource, nodeInfo.Name, o.DryRun, changed(desired))
|
||||
}
|
||||
} else {
|
||||
cmdutil.PrintSuccess(o.mapper, false, o.Out, nodeInfo.Mapping.Resource, nodeInfo.Name, false, "skipped")
|
||||
cmdutil.PrintSuccess(o.mapper, false, o.Out, nodeInfo.Mapping.Resource, nodeInfo.Name, o.DryRun, "skipped")
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user