From 222e2c5ef897f4d47831b8d822cab09c000037c2 Mon Sep 17 00:00:00 2001 From: Katrina Verey Date: Wed, 10 Mar 2021 12:21:25 -0800 Subject: [PATCH 1/4] Extract DefaultChunkSize constant --- staging/src/k8s.io/kubectl/pkg/cmd/get/get.go | 2 +- staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/get/get.go b/staging/src/k8s.io/kubectl/pkg/cmd/get/get.go index 64dfe2a7fb0..7f2d9a62561 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/get/get.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/get/get.go @@ -145,7 +145,7 @@ func NewGetOptions(parent string, streams genericclioptions.IOStreams) *GetOptio CmdParent: parent, IOStreams: streams, - ChunkSize: 500, + ChunkSize: cmdutil.DefaultChunkSize, ServerPrint: true, } } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go b/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go index 8f74d66b8a6..8fadbf83439 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go @@ -50,6 +50,7 @@ import ( const ( ApplyAnnotationsFlag = "save-config" DefaultErrorExitCode = 1 + DefaultChunkSize = 500 ) type debugError interface { From f8097c6ceeb16461861c74d4fc5dd0bcd36ebeaf Mon Sep 17 00:00:00 2001 From: Katrina Verey Date: Fri, 19 Mar 2021 08:30:49 -0700 Subject: [PATCH 2/4] Create helper for adding chunk-size flag to commands --- staging/src/k8s.io/kubectl/pkg/cmd/get/get.go | 2 +- staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/get/get.go b/staging/src/k8s.io/kubectl/pkg/cmd/get/get.go index 7f2d9a62561..4f1f5a8c4ee 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/get/get.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/get/get.go @@ -175,7 +175,6 @@ func NewCmdGet(parent string, f cmdutil.Factory, streams genericclioptions.IOStr cmd.Flags().BoolVarP(&o.Watch, "watch", "w", o.Watch, "After listing/getting the requested object, watch for changes. Uninitialized objects are excluded if no object name is provided.") cmd.Flags().BoolVar(&o.WatchOnly, "watch-only", o.WatchOnly, "Watch for changes to the requested object(s), without listing/getting first.") cmd.Flags().BoolVar(&o.OutputWatchEvents, "output-watch-events", o.OutputWatchEvents, "Output watch event objects when --watch or --watch-only is used. Existing objects are output as initial ADDED events.") - cmd.Flags().Int64Var(&o.ChunkSize, "chunk-size", o.ChunkSize, "Return large lists in chunks rather than all at once. Pass 0 to disable. This flag is beta and may change in the future.") cmd.Flags().BoolVar(&o.IgnoreNotFound, "ignore-not-found", o.IgnoreNotFound, "If the requested object does not exist the command will return exit code 0.") cmd.Flags().StringVarP(&o.LabelSelector, "selector", "l", o.LabelSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)") cmd.Flags().StringVar(&o.FieldSelector, "field-selector", o.FieldSelector, "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.") @@ -183,6 +182,7 @@ func NewCmdGet(parent string, f cmdutil.Factory, streams genericclioptions.IOStr addOpenAPIPrintColumnFlags(cmd, o) addServerPrintColumnFlags(cmd, o) cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, "identifying the resource to get from a server.") + cmdutil.AddChunkSizeFlag(cmd, &o.ChunkSize) return cmd } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go b/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go index 8fadbf83439..40746f7d098 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go @@ -464,6 +464,11 @@ func AddGeneratorFlags(cmd *cobra.Command, defaultGenerator string) { AddDryRunFlag(cmd) } +func AddChunkSizeFlag(cmd *cobra.Command, value *int64) { + cmd.Flags().Int64Var(value, "chunk-size", *value, + "Return large lists in chunks rather than all at once. Pass 0 to disable. This flag is beta and may change in the future.") +} + type ValidateOptions struct { EnableValidation bool } From c56be1fa9f1a0329c08cd0e2f9fc46dd903fc9ca Mon Sep 17 00:00:00 2001 From: Katrina Verey Date: Fri, 19 Mar 2021 08:31:55 -0700 Subject: [PATCH 3/4] Extract helper for continue handling --- .../k8s.io/cli-runtime/pkg/resource/helper.go | 51 ++++++ .../cli-runtime/pkg/resource/helper_test.go | 173 ++++++++++++++++++ .../cli-runtime/pkg/resource/selector.go | 53 ++---- 3 files changed, 239 insertions(+), 38 deletions(-) diff --git a/staging/src/k8s.io/cli-runtime/pkg/resource/helper.go b/staging/src/k8s.io/cli-runtime/pkg/resource/helper.go index 0132759d022..684802e8857 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/resource/helper.go +++ b/staging/src/k8s.io/cli-runtime/pkg/resource/helper.go @@ -18,9 +18,12 @@ package resource import ( "context" + "fmt" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -90,6 +93,54 @@ func (m *Helper) List(namespace, apiVersion string, options *metav1.ListOptions) return req.Do(context.TODO()).Get() } +// FollowContinue handles the continue parameter returned by the API server when using list +// chunking. To take advantage of this, the initial ListOptions provided by the consumer +// should include a non-zero Limit parameter. +func FollowContinue(initialOpts *metav1.ListOptions, + listFunc func(metav1.ListOptions) (runtime.Object, error)) error { + opts := initialOpts + for { + list, err := listFunc(*opts) + if err != nil { + return err + } + nextContinueToken, _ := metadataAccessor.Continue(list) + if len(nextContinueToken) == 0 { + return nil + } + opts.Continue = nextContinueToken + } +} + +// EnhanceListError augments errors typically returned by List operations with additional context, +// making sure to retain the StatusError type when applicable. +func EnhanceListError(err error, opts metav1.ListOptions, subj string) error { + if apierrors.IsResourceExpired(err) { + return err + } + if apierrors.IsBadRequest(err) || apierrors.IsNotFound(err) { + if se, ok := err.(*apierrors.StatusError); ok { + // modify the message without hiding this is an API error + if len(opts.LabelSelector) == 0 && len(opts.FieldSelector) == 0 { + se.ErrStatus.Message = fmt.Sprintf("Unable to list %q: %v", subj, + se.ErrStatus.Message) + } else { + se.ErrStatus.Message = fmt.Sprintf( + "Unable to find %q that match label selector %q, field selector %q: %v", subj, + opts.LabelSelector, + opts.FieldSelector, se.ErrStatus.Message) + } + return se + } + if len(opts.LabelSelector) == 0 && len(opts.FieldSelector) == 0 { + return fmt.Errorf("Unable to list %q: %v", subj, err) + } + return fmt.Errorf("Unable to find %q that match label selector %q, field selector %q: %v", + subj, opts.LabelSelector, opts.FieldSelector, err) + } + return err +} + func (m *Helper) Watch(namespace, apiVersion string, options *metav1.ListOptions) (watch.Interface, error) { options.Watch = true return m.RESTClient.Get(). diff --git a/staging/src/k8s.io/cli-runtime/pkg/resource/helper_test.go b/staging/src/k8s.io/cli-runtime/pkg/resource/helper_test.go index 3c32b8e9d07..3c5198aa4cb 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/resource/helper_test.go +++ b/staging/src/k8s.io/cli-runtime/pkg/resource/helper_test.go @@ -19,6 +19,7 @@ package resource import ( "bytes" "errors" + "fmt" "io" "io/ioutil" "net/http" @@ -26,6 +27,7 @@ import ( "strings" "testing" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -628,3 +630,174 @@ func TestHelperReplace(t *testing.T) { }) } } + +func TestEnhanceListError(t *testing.T) { + podGVR := corev1.SchemeGroupVersion.WithResource(corev1.ResourcePods.String()) + podSubject := podGVR.String() + tests := []struct { + name string + err error + opts metav1.ListOptions + subj string + + expectedErr string + expectStatusErr bool + }{ + { + name: "leaves resource expired error as is", + err: apierrors.NewResourceExpired("resourceversion too old"), + opts: metav1.ListOptions{}, + subj: podSubject, + expectedErr: "resourceversion too old", + expectStatusErr: true, + }, { + name: "leaves unrecognized error as is", + err: errors.New("something went wrong"), + opts: metav1.ListOptions{}, + subj: podSubject, + expectedErr: "something went wrong", + expectStatusErr: false, + }, { + name: "bad request StatusError without selectors", + err: apierrors.NewBadRequest("request is invalid"), + opts: metav1.ListOptions{}, + subj: podSubject, + expectedErr: "Unable to list \"/v1, Resource=pods\": request is invalid", + expectStatusErr: true, + }, { + name: "bad request StatusError with selectors", + err: apierrors.NewBadRequest("request is invalid"), + opts: metav1.ListOptions{ + LabelSelector: "a=b", + FieldSelector: ".spec.nodeName=foo", + }, + subj: podSubject, + expectedErr: "Unable to find \"/v1, Resource=pods\" that match label selector \"a=b\", field selector \".spec.nodeName=foo\": request is invalid", + expectStatusErr: true, + }, { + name: "not found without selectors", + err: apierrors.NewNotFound(podGVR.GroupResource(), "foo"), + opts: metav1.ListOptions{}, + subj: podSubject, + expectedErr: "Unable to list \"/v1, Resource=pods\": pods \"foo\" not found", + expectStatusErr: true, + }, { + name: "not found StatusError with selectors", + err: apierrors.NewNotFound(podGVR.GroupResource(), "foo"), + opts: metav1.ListOptions{ + LabelSelector: "a=b", + FieldSelector: ".spec.nodeName=foo", + }, + subj: podSubject, + expectedErr: "Unable to find \"/v1, Resource=pods\" that match label selector \"a=b\", field selector \".spec.nodeName=foo\": pods \"foo\" not found", + expectStatusErr: true, + }, { + name: "non StatusError without selectors", + err: fmt.Errorf("extra info: %w", apierrors.NewNotFound(podGVR.GroupResource(), + "foo")), + opts: metav1.ListOptions{}, + subj: podSubject, + expectedErr: "Unable to list \"/v1, Resource=pods\": extra info: pods \"foo\" not found", + expectStatusErr: false, + }, { + name: "non StatusError with selectors", + err: fmt.Errorf("extra info: %w", apierrors.NewNotFound(podGVR.GroupResource(), "foo")), + opts: metav1.ListOptions{ + LabelSelector: "a=b", + FieldSelector: ".spec.nodeName=foo", + }, + subj: podSubject, + expectedErr: "Unable to find \"/v1, " + + "Resource=pods\" that match label selector \"a=b\", " + + "field selector \".spec.nodeName=foo\": extra info: pods \"foo\" not found", + expectStatusErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := EnhanceListError(tt.err, tt.opts, tt.subj) + if err == nil { + t.Errorf("EnhanceListError did not return an error") + } + if err.Error() != tt.expectedErr { + t.Errorf("EnhanceListError() error = %q, expectedErr %q", err, tt.expectedErr) + } + if tt.expectStatusErr { + if _, ok := err.(*apierrors.StatusError); !ok { + t.Errorf("EnhanceListError incorrectly returned a non-StatusError: %v", err) + } + } + }) + } +} + +func TestFollowContinue(t *testing.T) { + var continueTokens []string + tests := []struct { + name string + initialOpts *metav1.ListOptions + tokensSeen []string + listFunc func(metav1.ListOptions) (runtime.Object, error) + + expectedTokens []string + wantErr string + }{ + { + name: "updates list options with continue token until list finished", + initialOpts: &metav1.ListOptions{}, + listFunc: func(options metav1.ListOptions) (runtime.Object, error) { + continueTokens = append(continueTokens, options.Continue) + obj := corev1.PodList{} + switch options.Continue { + case "": + metadataAccessor.SetContinue(&obj, "abc") + case "abc": + metadataAccessor.SetContinue(&obj, "def") + case "def": + metadataAccessor.SetKind(&obj, "ListComplete") + } + return &obj, nil + }, + expectedTokens: []string{"", "abc", "def"}, + }, + { + name: "stops looping if listFunc returns an error", + initialOpts: &metav1.ListOptions{}, + listFunc: func(options metav1.ListOptions) (runtime.Object, error) { + continueTokens = append(continueTokens, options.Continue) + obj := corev1.PodList{} + switch options.Continue { + case "": + metadataAccessor.SetContinue(&obj, "abc") + case "abc": + return nil, fmt.Errorf("err from list func") + case "def": + metadataAccessor.SetKind(&obj, "ListComplete") + } + return &obj, nil + }, + expectedTokens: []string{"", "abc"}, + wantErr: "err from list func", + }, + } + for _, tt := range tests { + continueTokens = []string{} + t.Run(tt.name, func(t *testing.T) { + err := FollowContinue(tt.initialOpts, tt.listFunc) + if tt.wantErr != "" { + if err == nil { + t.Fatalf("FollowContinue was expected to return an error and did not") + } else if err.Error() != tt.wantErr { + t.Fatalf("wanted error %q, got %q", tt.wantErr, err.Error()) + } + } else { + if err != nil { + t.Errorf("FollowContinue failed: %v", tt.wantErr) + } + if !reflect.DeepEqual(continueTokens, tt.expectedTokens) { + t.Errorf("got token list %q, wanted %q", continueTokens, tt.expectedTokens) + } + } + }) + } +} diff --git a/staging/src/k8s.io/cli-runtime/pkg/resource/selector.go b/staging/src/k8s.io/cli-runtime/pkg/resource/selector.go index d29dfc46c39..2a283d4e060 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/resource/selector.go +++ b/staging/src/k8s.io/cli-runtime/pkg/resource/selector.go @@ -17,11 +17,9 @@ limitations under the License. package resource import ( - "fmt" - - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" ) @@ -49,41 +47,23 @@ func NewSelector(client RESTClient, mapping *meta.RESTMapping, namespace, labelS // Visit implements Visitor and uses request chunking by default. func (r *Selector) Visit(fn VisitorFunc) error { - var continueToken string - for { - list, err := NewHelper(r.Client, r.Mapping).List( + helper := NewHelper(r.Client, r.Mapping) + initialOpts := metav1.ListOptions{ + LabelSelector: r.LabelSelector, + FieldSelector: r.FieldSelector, + Limit: r.LimitChunks, + } + return FollowContinue(&initialOpts, func(options metav1.ListOptions) (runtime.Object, error) { + list, err := helper.List( r.Namespace, r.ResourceMapping().GroupVersionKind.GroupVersion().String(), - &metav1.ListOptions{ - LabelSelector: r.LabelSelector, - FieldSelector: r.FieldSelector, - Limit: r.LimitChunks, - Continue: continueToken, - }, + &options, ) if err != nil { - if errors.IsResourceExpired(err) { - return err - } - if errors.IsBadRequest(err) || errors.IsNotFound(err) { - if se, ok := err.(*errors.StatusError); ok { - // modify the message without hiding this is an API error - if len(r.LabelSelector) == 0 && len(r.FieldSelector) == 0 { - se.ErrStatus.Message = fmt.Sprintf("Unable to list %q: %v", r.Mapping.Resource, se.ErrStatus.Message) - } else { - se.ErrStatus.Message = fmt.Sprintf("Unable to find %q that match label selector %q, field selector %q: %v", r.Mapping.Resource, r.LabelSelector, r.FieldSelector, se.ErrStatus.Message) - } - return se - } - if len(r.LabelSelector) == 0 && len(r.FieldSelector) == 0 { - return fmt.Errorf("Unable to list %q: %v", r.Mapping.Resource, err) - } - return fmt.Errorf("Unable to find %q that match label selector %q, field selector %q: %v", r.Mapping.Resource, r.LabelSelector, r.FieldSelector, err) - } - return err + return nil, EnhanceListError(err, options, r.Mapping.Resource.String()) } resourceVersion, _ := metadataAccessor.ResourceVersion(list) - nextContinueToken, _ := metadataAccessor.Continue(list) + info := &Info{ Client: r.Client, Mapping: r.Mapping, @@ -95,13 +75,10 @@ func (r *Selector) Visit(fn VisitorFunc) error { } if err := fn(info, nil); err != nil { - return err + return nil, err } - if len(nextContinueToken) == 0 { - return nil - } - continueToken = nextContinueToken - } + return list, nil + }) } func (r *Selector) Watch(resourceVersion string) (watch.Interface, error) { From 5200ff86d0983e42c3d2d5c0d830e96495516347 Mon Sep 17 00:00:00 2001 From: Katrina Verey Date: Fri, 19 Mar 2021 08:32:37 -0700 Subject: [PATCH 4/4] Add --chunk-size flag to kubectl drain --- .../src/k8s.io/kubectl/pkg/cmd/drain/drain.go | 3 +++ .../kubectl/pkg/cmd/drain/drain_test.go | 1 + staging/src/k8s.io/kubectl/pkg/drain/drain.go | 20 ++++++++++++-- test/cmd/node-management.sh | 26 +++++++++++++++++++ 4 files changed, 48 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go b/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go index 556fa3afa39..c2a13a078e8 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go @@ -148,6 +148,7 @@ func NewDrainCmdOptions(f cmdutil.Factory, ioStreams genericclioptions.IOStreams GracePeriodSeconds: -1, Out: ioStreams.Out, ErrOut: ioStreams.ErrOut, + ChunkSize: cmdutil.DefaultChunkSize, }, } o.drainer.OnPodDeletedOrEvicted = o.onPodDeletedOrEvicted @@ -198,6 +199,7 @@ func NewCmdDrain(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobr cmd.Flags().BoolVar(&o.drainer.DisableEviction, "disable-eviction", o.drainer.DisableEviction, "Force drain to use delete, even if eviction is supported. This will bypass checking PodDisruptionBudgets, use with caution.") cmd.Flags().IntVar(&o.drainer.SkipWaitForDeleteTimeoutSeconds, "skip-wait-for-delete-timeout", o.drainer.SkipWaitForDeleteTimeoutSeconds, "If pod DeletionTimestamp older than N seconds, skip waiting for the pod. Seconds must be greater than 0 to skip.") + cmdutil.AddChunkSizeFlag(cmd, &o.drainer.ChunkSize) cmdutil.AddDryRunFlag(cmd) return cmd } @@ -256,6 +258,7 @@ func (o *DrainCmdOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [ builder := f.NewBuilder(). WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). NamespaceParam(o.Namespace).DefaultNamespace(). + RequestChunksOf(o.drainer.ChunkSize). ResourceNames("nodes", args...). SingleResourceType(). Flatten() diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain_test.go index adb2b29ea06..34568342b7b 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain_test.go @@ -810,6 +810,7 @@ func TestDrain(t *testing.T) { } getParams := make(url.Values) getParams["fieldSelector"] = []string{"spec.nodeName=node"} + getParams["limit"] = []string{"500"} if !reflect.DeepEqual(getParams, values) { t.Fatalf("%s: expected:\n%v\nsaw:\n%v\n", test.description, getParams, values) } diff --git a/staging/src/k8s.io/kubectl/pkg/drain/drain.go b/staging/src/k8s.io/kubectl/pkg/drain/drain.go index 3428e91e38b..26585814750 100644 --- a/staging/src/k8s.io/kubectl/pkg/drain/drain.go +++ b/staging/src/k8s.io/kubectl/pkg/drain/drain.go @@ -29,6 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/cli-runtime/pkg/resource" @@ -61,6 +62,7 @@ type Helper struct { DeleteEmptyDirData bool Selector string PodSelector string + ChunkSize int64 // DisableEviction forces drain to use delete rather than evict DisableEviction bool @@ -189,9 +191,23 @@ func (d *Helper) GetPodsForDeletion(nodeName string) (*PodDeleteList, []error) { return nil, []error{err} } - podList, err := d.Client.CoreV1().Pods(metav1.NamespaceAll).List(d.getContext(), metav1.ListOptions{ + podList := &corev1.PodList{} + initialOpts := &metav1.ListOptions{ LabelSelector: labelSelector.String(), - FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}).String()}) + FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}).String(), + Limit: d.ChunkSize, + } + + err = resource.FollowContinue(initialOpts, func(options metav1.ListOptions) (runtime.Object, error) { + newPods, err := d.Client.CoreV1().Pods(metav1.NamespaceAll).List(d.getContext(), options) + if err != nil { + podR := corev1.SchemeGroupVersion.WithResource(corev1.ResourcePods.String()) + return nil, resource.EnhanceListError(err, options, podR.String()) + } + podList.Items = append(podList.Items, newPods.Items...) + return newPods, nil + }) + if err != nil { return nil, []error{err} } diff --git a/test/cmd/node-management.sh b/test/cmd/node-management.sh index 786c0c113dd..d0f6f39b2bf 100755 --- a/test/cmd/node-management.sh +++ b/test/cmd/node-management.sh @@ -167,6 +167,32 @@ run_cluster_management_tests() { response=$(! kubectl drain "127.0.0.1" --selector test=label 2>&1) kube::test::if_has_string "${response}" 'cannot specify both a node name' + ### Test kubectl drain chunk size + # Pre-condition: node exists and contains label test=label + kube::test::get_object_assert "nodes 127.0.0.1" '{{.metadata.labels.test}}' 'label' + # Pre-condition: node is schedulable + kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '' + # Pre-condition: test-pod-1 and test-pod-2 exist + kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,' + # command - need to use force because pods are unmanaged, dry run (or skip-wait) because node is unready + output_message=$(kubectl --v=6 drain --force --pod-selector type=test-pod --selector test=label --chunk-size=1 --dry-run=client 2>&1 "${kube_flags[@]}") + # Post-condition: Check if we get a limit on node, and both limit and continue on pods + kube::test::if_has_string "${output_message}" "/v1/nodes?labelSelector=test%3Dlabel&limit=1 200 OK" + kube::test::if_has_string "${output_message}" "/v1/pods?fieldSelector=spec.nodeName%3D127.0.0.1&labelSelector=type%3Dtest-pod&limit=1 200 OK" + kube::test::if_has_string "${output_message}" "/v1/pods?continue=.*&fieldSelector=spec.nodeName%3D127.0.0.1&labelSelector=type%3Dtest-pod&limit=1 200 OK" + # Post-condition: Check we evict multiple pages worth of pods + kube::test::if_has_string "${output_message}" "evicting pod .*/test-pod-1" + kube::test::if_has_string "${output_message}" "evicting pod .*/test-pod-2" + # Post-condition: node is schedulable + kubectl uncordon "127.0.0.1" + kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '' + + ### Test kubectl drain chunk size defaults to 500 + output_message=$(kubectl --v=6 drain --force --selector test=label --dry-run=client 2>&1 "${kube_flags[@]}") + # Post-condition: Check if we get a limit + kube::test::if_has_string "${output_message}" "/v1/nodes?labelSelector=test%3Dlabel&limit=500 200 OK" + kube::test::if_has_string "${output_message}" "/v1/pods?fieldSelector=spec.nodeName%3D127.0.0.1&limit=500 200 OK" + ### kubectl cordon command fails when no arguments are passed # Pre-condition: node exists response=$(! kubectl cordon 2>&1)