From 5200ff86d0983e42c3d2d5c0d830e96495516347 Mon Sep 17 00:00:00 2001 From: Katrina Verey Date: Fri, 19 Mar 2021 08:32:37 -0700 Subject: [PATCH] Add --chunk-size flag to kubectl drain --- .../src/k8s.io/kubectl/pkg/cmd/drain/drain.go | 3 +++ .../kubectl/pkg/cmd/drain/drain_test.go | 1 + staging/src/k8s.io/kubectl/pkg/drain/drain.go | 20 ++++++++++++-- test/cmd/node-management.sh | 26 +++++++++++++++++++ 4 files changed, 48 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go b/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go index 556fa3afa39..c2a13a078e8 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go @@ -148,6 +148,7 @@ func NewDrainCmdOptions(f cmdutil.Factory, ioStreams genericclioptions.IOStreams GracePeriodSeconds: -1, Out: ioStreams.Out, ErrOut: ioStreams.ErrOut, + ChunkSize: cmdutil.DefaultChunkSize, }, } o.drainer.OnPodDeletedOrEvicted = o.onPodDeletedOrEvicted @@ -198,6 +199,7 @@ func NewCmdDrain(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobr cmd.Flags().BoolVar(&o.drainer.DisableEviction, "disable-eviction", o.drainer.DisableEviction, "Force drain to use delete, even if eviction is supported. This will bypass checking PodDisruptionBudgets, use with caution.") cmd.Flags().IntVar(&o.drainer.SkipWaitForDeleteTimeoutSeconds, "skip-wait-for-delete-timeout", o.drainer.SkipWaitForDeleteTimeoutSeconds, "If pod DeletionTimestamp older than N seconds, skip waiting for the pod. Seconds must be greater than 0 to skip.") + cmdutil.AddChunkSizeFlag(cmd, &o.drainer.ChunkSize) cmdutil.AddDryRunFlag(cmd) return cmd } @@ -256,6 +258,7 @@ func (o *DrainCmdOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [ builder := f.NewBuilder(). WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). NamespaceParam(o.Namespace).DefaultNamespace(). + RequestChunksOf(o.drainer.ChunkSize). ResourceNames("nodes", args...). SingleResourceType(). Flatten() diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain_test.go index adb2b29ea06..34568342b7b 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain_test.go @@ -810,6 +810,7 @@ func TestDrain(t *testing.T) { } getParams := make(url.Values) getParams["fieldSelector"] = []string{"spec.nodeName=node"} + getParams["limit"] = []string{"500"} if !reflect.DeepEqual(getParams, values) { t.Fatalf("%s: expected:\n%v\nsaw:\n%v\n", test.description, getParams, values) } diff --git a/staging/src/k8s.io/kubectl/pkg/drain/drain.go b/staging/src/k8s.io/kubectl/pkg/drain/drain.go index 3428e91e38b..26585814750 100644 --- a/staging/src/k8s.io/kubectl/pkg/drain/drain.go +++ b/staging/src/k8s.io/kubectl/pkg/drain/drain.go @@ -29,6 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/cli-runtime/pkg/resource" @@ -61,6 +62,7 @@ type Helper struct { DeleteEmptyDirData bool Selector string PodSelector string + ChunkSize int64 // DisableEviction forces drain to use delete rather than evict DisableEviction bool @@ -189,9 +191,23 @@ func (d *Helper) GetPodsForDeletion(nodeName string) (*PodDeleteList, []error) { return nil, []error{err} } - podList, err := d.Client.CoreV1().Pods(metav1.NamespaceAll).List(d.getContext(), metav1.ListOptions{ + podList := &corev1.PodList{} + initialOpts := &metav1.ListOptions{ LabelSelector: labelSelector.String(), - FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}).String()}) + FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}).String(), + Limit: d.ChunkSize, + } + + err = resource.FollowContinue(initialOpts, func(options metav1.ListOptions) (runtime.Object, error) { + newPods, err := d.Client.CoreV1().Pods(metav1.NamespaceAll).List(d.getContext(), options) + if err != nil { + podR := corev1.SchemeGroupVersion.WithResource(corev1.ResourcePods.String()) + return nil, resource.EnhanceListError(err, options, podR.String()) + } + podList.Items = append(podList.Items, newPods.Items...) + return newPods, nil + }) + if err != nil { return nil, []error{err} } diff --git a/test/cmd/node-management.sh b/test/cmd/node-management.sh index 786c0c113dd..d0f6f39b2bf 100755 --- a/test/cmd/node-management.sh +++ b/test/cmd/node-management.sh @@ -167,6 +167,32 @@ run_cluster_management_tests() { response=$(! kubectl drain "127.0.0.1" --selector test=label 2>&1) kube::test::if_has_string "${response}" 'cannot specify both a node name' + ### Test kubectl drain chunk size + # Pre-condition: node exists and contains label test=label + kube::test::get_object_assert "nodes 127.0.0.1" '{{.metadata.labels.test}}' 'label' + # Pre-condition: node is schedulable + kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '' + # Pre-condition: test-pod-1 and test-pod-2 exist + kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,' + # command - need to use force because pods are unmanaged, dry run (or skip-wait) because node is unready + output_message=$(kubectl --v=6 drain --force --pod-selector type=test-pod --selector test=label --chunk-size=1 --dry-run=client 2>&1 "${kube_flags[@]}") + # Post-condition: Check if we get a limit on node, and both limit and continue on pods + kube::test::if_has_string "${output_message}" "/v1/nodes?labelSelector=test%3Dlabel&limit=1 200 OK" + kube::test::if_has_string "${output_message}" "/v1/pods?fieldSelector=spec.nodeName%3D127.0.0.1&labelSelector=type%3Dtest-pod&limit=1 200 OK" + kube::test::if_has_string "${output_message}" "/v1/pods?continue=.*&fieldSelector=spec.nodeName%3D127.0.0.1&labelSelector=type%3Dtest-pod&limit=1 200 OK" + # Post-condition: Check we evict multiple pages worth of pods + kube::test::if_has_string "${output_message}" "evicting pod .*/test-pod-1" + kube::test::if_has_string "${output_message}" "evicting pod .*/test-pod-2" + # Post-condition: node is schedulable + kubectl uncordon "127.0.0.1" + kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '' + + ### Test kubectl drain chunk size defaults to 500 + output_message=$(kubectl --v=6 drain --force --selector test=label --dry-run=client 2>&1 "${kube_flags[@]}") + # Post-condition: Check if we get a limit + kube::test::if_has_string "${output_message}" "/v1/nodes?labelSelector=test%3Dlabel&limit=500 200 OK" + kube::test::if_has_string "${output_message}" "/v1/pods?fieldSelector=spec.nodeName%3D127.0.0.1&limit=500 200 OK" + ### kubectl cordon command fails when no arguments are passed # Pre-condition: node exists response=$(! kubectl cordon 2>&1)