mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
Merge pull request #100148 from KnVerey/kubectl-drain-chunk-size
Add --chunk-size flag to kubectl drain
This commit is contained in:
commit
d7355278b3
@ -18,9 +18,12 @@ package resource
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@ -90,6 +93,54 @@ func (m *Helper) List(namespace, apiVersion string, options *metav1.ListOptions)
|
||||
return req.Do(context.TODO()).Get()
|
||||
}
|
||||
|
||||
// FollowContinue handles the continue parameter returned by the API server when using list
|
||||
// chunking. To take advantage of this, the initial ListOptions provided by the consumer
|
||||
// should include a non-zero Limit parameter.
|
||||
func FollowContinue(initialOpts *metav1.ListOptions,
|
||||
listFunc func(metav1.ListOptions) (runtime.Object, error)) error {
|
||||
opts := initialOpts
|
||||
for {
|
||||
list, err := listFunc(*opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nextContinueToken, _ := metadataAccessor.Continue(list)
|
||||
if len(nextContinueToken) == 0 {
|
||||
return nil
|
||||
}
|
||||
opts.Continue = nextContinueToken
|
||||
}
|
||||
}
|
||||
|
||||
// EnhanceListError augments errors typically returned by List operations with additional context,
|
||||
// making sure to retain the StatusError type when applicable.
|
||||
func EnhanceListError(err error, opts metav1.ListOptions, subj string) error {
|
||||
if apierrors.IsResourceExpired(err) {
|
||||
return err
|
||||
}
|
||||
if apierrors.IsBadRequest(err) || apierrors.IsNotFound(err) {
|
||||
if se, ok := err.(*apierrors.StatusError); ok {
|
||||
// modify the message without hiding this is an API error
|
||||
if len(opts.LabelSelector) == 0 && len(opts.FieldSelector) == 0 {
|
||||
se.ErrStatus.Message = fmt.Sprintf("Unable to list %q: %v", subj,
|
||||
se.ErrStatus.Message)
|
||||
} else {
|
||||
se.ErrStatus.Message = fmt.Sprintf(
|
||||
"Unable to find %q that match label selector %q, field selector %q: %v", subj,
|
||||
opts.LabelSelector,
|
||||
opts.FieldSelector, se.ErrStatus.Message)
|
||||
}
|
||||
return se
|
||||
}
|
||||
if len(opts.LabelSelector) == 0 && len(opts.FieldSelector) == 0 {
|
||||
return fmt.Errorf("Unable to list %q: %v", subj, err)
|
||||
}
|
||||
return fmt.Errorf("Unable to find %q that match label selector %q, field selector %q: %v",
|
||||
subj, opts.LabelSelector, opts.FieldSelector, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *Helper) Watch(namespace, apiVersion string, options *metav1.ListOptions) (watch.Interface, error) {
|
||||
options.Watch = true
|
||||
return m.RESTClient.Get().
|
||||
|
@ -19,6 +19,7 @@ package resource
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@ -26,6 +27,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -628,3 +630,174 @@ func TestHelperReplace(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnhanceListError(t *testing.T) {
|
||||
podGVR := corev1.SchemeGroupVersion.WithResource(corev1.ResourcePods.String())
|
||||
podSubject := podGVR.String()
|
||||
tests := []struct {
|
||||
name string
|
||||
err error
|
||||
opts metav1.ListOptions
|
||||
subj string
|
||||
|
||||
expectedErr string
|
||||
expectStatusErr bool
|
||||
}{
|
||||
{
|
||||
name: "leaves resource expired error as is",
|
||||
err: apierrors.NewResourceExpired("resourceversion too old"),
|
||||
opts: metav1.ListOptions{},
|
||||
subj: podSubject,
|
||||
expectedErr: "resourceversion too old",
|
||||
expectStatusErr: true,
|
||||
}, {
|
||||
name: "leaves unrecognized error as is",
|
||||
err: errors.New("something went wrong"),
|
||||
opts: metav1.ListOptions{},
|
||||
subj: podSubject,
|
||||
expectedErr: "something went wrong",
|
||||
expectStatusErr: false,
|
||||
}, {
|
||||
name: "bad request StatusError without selectors",
|
||||
err: apierrors.NewBadRequest("request is invalid"),
|
||||
opts: metav1.ListOptions{},
|
||||
subj: podSubject,
|
||||
expectedErr: "Unable to list \"/v1, Resource=pods\": request is invalid",
|
||||
expectStatusErr: true,
|
||||
}, {
|
||||
name: "bad request StatusError with selectors",
|
||||
err: apierrors.NewBadRequest("request is invalid"),
|
||||
opts: metav1.ListOptions{
|
||||
LabelSelector: "a=b",
|
||||
FieldSelector: ".spec.nodeName=foo",
|
||||
},
|
||||
subj: podSubject,
|
||||
expectedErr: "Unable to find \"/v1, Resource=pods\" that match label selector \"a=b\", field selector \".spec.nodeName=foo\": request is invalid",
|
||||
expectStatusErr: true,
|
||||
}, {
|
||||
name: "not found without selectors",
|
||||
err: apierrors.NewNotFound(podGVR.GroupResource(), "foo"),
|
||||
opts: metav1.ListOptions{},
|
||||
subj: podSubject,
|
||||
expectedErr: "Unable to list \"/v1, Resource=pods\": pods \"foo\" not found",
|
||||
expectStatusErr: true,
|
||||
}, {
|
||||
name: "not found StatusError with selectors",
|
||||
err: apierrors.NewNotFound(podGVR.GroupResource(), "foo"),
|
||||
opts: metav1.ListOptions{
|
||||
LabelSelector: "a=b",
|
||||
FieldSelector: ".spec.nodeName=foo",
|
||||
},
|
||||
subj: podSubject,
|
||||
expectedErr: "Unable to find \"/v1, Resource=pods\" that match label selector \"a=b\", field selector \".spec.nodeName=foo\": pods \"foo\" not found",
|
||||
expectStatusErr: true,
|
||||
}, {
|
||||
name: "non StatusError without selectors",
|
||||
err: fmt.Errorf("extra info: %w", apierrors.NewNotFound(podGVR.GroupResource(),
|
||||
"foo")),
|
||||
opts: metav1.ListOptions{},
|
||||
subj: podSubject,
|
||||
expectedErr: "Unable to list \"/v1, Resource=pods\": extra info: pods \"foo\" not found",
|
||||
expectStatusErr: false,
|
||||
}, {
|
||||
name: "non StatusError with selectors",
|
||||
err: fmt.Errorf("extra info: %w", apierrors.NewNotFound(podGVR.GroupResource(), "foo")),
|
||||
opts: metav1.ListOptions{
|
||||
LabelSelector: "a=b",
|
||||
FieldSelector: ".spec.nodeName=foo",
|
||||
},
|
||||
subj: podSubject,
|
||||
expectedErr: "Unable to find \"/v1, " +
|
||||
"Resource=pods\" that match label selector \"a=b\", " +
|
||||
"field selector \".spec.nodeName=foo\": extra info: pods \"foo\" not found",
|
||||
expectStatusErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := EnhanceListError(tt.err, tt.opts, tt.subj)
|
||||
if err == nil {
|
||||
t.Errorf("EnhanceListError did not return an error")
|
||||
}
|
||||
if err.Error() != tt.expectedErr {
|
||||
t.Errorf("EnhanceListError() error = %q, expectedErr %q", err, tt.expectedErr)
|
||||
}
|
||||
if tt.expectStatusErr {
|
||||
if _, ok := err.(*apierrors.StatusError); !ok {
|
||||
t.Errorf("EnhanceListError incorrectly returned a non-StatusError: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowContinue(t *testing.T) {
|
||||
var continueTokens []string
|
||||
tests := []struct {
|
||||
name string
|
||||
initialOpts *metav1.ListOptions
|
||||
tokensSeen []string
|
||||
listFunc func(metav1.ListOptions) (runtime.Object, error)
|
||||
|
||||
expectedTokens []string
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "updates list options with continue token until list finished",
|
||||
initialOpts: &metav1.ListOptions{},
|
||||
listFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
continueTokens = append(continueTokens, options.Continue)
|
||||
obj := corev1.PodList{}
|
||||
switch options.Continue {
|
||||
case "":
|
||||
metadataAccessor.SetContinue(&obj, "abc")
|
||||
case "abc":
|
||||
metadataAccessor.SetContinue(&obj, "def")
|
||||
case "def":
|
||||
metadataAccessor.SetKind(&obj, "ListComplete")
|
||||
}
|
||||
return &obj, nil
|
||||
},
|
||||
expectedTokens: []string{"", "abc", "def"},
|
||||
},
|
||||
{
|
||||
name: "stops looping if listFunc returns an error",
|
||||
initialOpts: &metav1.ListOptions{},
|
||||
listFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
continueTokens = append(continueTokens, options.Continue)
|
||||
obj := corev1.PodList{}
|
||||
switch options.Continue {
|
||||
case "":
|
||||
metadataAccessor.SetContinue(&obj, "abc")
|
||||
case "abc":
|
||||
return nil, fmt.Errorf("err from list func")
|
||||
case "def":
|
||||
metadataAccessor.SetKind(&obj, "ListComplete")
|
||||
}
|
||||
return &obj, nil
|
||||
},
|
||||
expectedTokens: []string{"", "abc"},
|
||||
wantErr: "err from list func",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
continueTokens = []string{}
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := FollowContinue(tt.initialOpts, tt.listFunc)
|
||||
if tt.wantErr != "" {
|
||||
if err == nil {
|
||||
t.Fatalf("FollowContinue was expected to return an error and did not")
|
||||
} else if err.Error() != tt.wantErr {
|
||||
t.Fatalf("wanted error %q, got %q", tt.wantErr, err.Error())
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("FollowContinue failed: %v", tt.wantErr)
|
||||
}
|
||||
if !reflect.DeepEqual(continueTokens, tt.expectedTokens) {
|
||||
t.Errorf("got token list %q, wanted %q", continueTokens, tt.expectedTokens)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -17,11 +17,9 @@ limitations under the License.
|
||||
package resource
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
@ -49,41 +47,23 @@ func NewSelector(client RESTClient, mapping *meta.RESTMapping, namespace, labelS
|
||||
|
||||
// Visit implements Visitor and uses request chunking by default.
|
||||
func (r *Selector) Visit(fn VisitorFunc) error {
|
||||
var continueToken string
|
||||
for {
|
||||
list, err := NewHelper(r.Client, r.Mapping).List(
|
||||
helper := NewHelper(r.Client, r.Mapping)
|
||||
initialOpts := metav1.ListOptions{
|
||||
LabelSelector: r.LabelSelector,
|
||||
FieldSelector: r.FieldSelector,
|
||||
Limit: r.LimitChunks,
|
||||
}
|
||||
return FollowContinue(&initialOpts, func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
list, err := helper.List(
|
||||
r.Namespace,
|
||||
r.ResourceMapping().GroupVersionKind.GroupVersion().String(),
|
||||
&metav1.ListOptions{
|
||||
LabelSelector: r.LabelSelector,
|
||||
FieldSelector: r.FieldSelector,
|
||||
Limit: r.LimitChunks,
|
||||
Continue: continueToken,
|
||||
},
|
||||
&options,
|
||||
)
|
||||
if err != nil {
|
||||
if errors.IsResourceExpired(err) {
|
||||
return err
|
||||
}
|
||||
if errors.IsBadRequest(err) || errors.IsNotFound(err) {
|
||||
if se, ok := err.(*errors.StatusError); ok {
|
||||
// modify the message without hiding this is an API error
|
||||
if len(r.LabelSelector) == 0 && len(r.FieldSelector) == 0 {
|
||||
se.ErrStatus.Message = fmt.Sprintf("Unable to list %q: %v", r.Mapping.Resource, se.ErrStatus.Message)
|
||||
} else {
|
||||
se.ErrStatus.Message = fmt.Sprintf("Unable to find %q that match label selector %q, field selector %q: %v", r.Mapping.Resource, r.LabelSelector, r.FieldSelector, se.ErrStatus.Message)
|
||||
}
|
||||
return se
|
||||
}
|
||||
if len(r.LabelSelector) == 0 && len(r.FieldSelector) == 0 {
|
||||
return fmt.Errorf("Unable to list %q: %v", r.Mapping.Resource, err)
|
||||
}
|
||||
return fmt.Errorf("Unable to find %q that match label selector %q, field selector %q: %v", r.Mapping.Resource, r.LabelSelector, r.FieldSelector, err)
|
||||
}
|
||||
return err
|
||||
return nil, EnhanceListError(err, options, r.Mapping.Resource.String())
|
||||
}
|
||||
resourceVersion, _ := metadataAccessor.ResourceVersion(list)
|
||||
nextContinueToken, _ := metadataAccessor.Continue(list)
|
||||
|
||||
info := &Info{
|
||||
Client: r.Client,
|
||||
Mapping: r.Mapping,
|
||||
@ -95,13 +75,10 @@ func (r *Selector) Visit(fn VisitorFunc) error {
|
||||
}
|
||||
|
||||
if err := fn(info, nil); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if len(nextContinueToken) == 0 {
|
||||
return nil
|
||||
}
|
||||
continueToken = nextContinueToken
|
||||
}
|
||||
return list, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (r *Selector) Watch(resourceVersion string) (watch.Interface, error) {
|
||||
|
@ -148,6 +148,7 @@ func NewDrainCmdOptions(f cmdutil.Factory, ioStreams genericclioptions.IOStreams
|
||||
GracePeriodSeconds: -1,
|
||||
Out: ioStreams.Out,
|
||||
ErrOut: ioStreams.ErrOut,
|
||||
ChunkSize: cmdutil.DefaultChunkSize,
|
||||
},
|
||||
}
|
||||
o.drainer.OnPodDeletedOrEvicted = o.onPodDeletedOrEvicted
|
||||
@ -198,6 +199,7 @@ func NewCmdDrain(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobr
|
||||
cmd.Flags().BoolVar(&o.drainer.DisableEviction, "disable-eviction", o.drainer.DisableEviction, "Force drain to use delete, even if eviction is supported. This will bypass checking PodDisruptionBudgets, use with caution.")
|
||||
cmd.Flags().IntVar(&o.drainer.SkipWaitForDeleteTimeoutSeconds, "skip-wait-for-delete-timeout", o.drainer.SkipWaitForDeleteTimeoutSeconds, "If pod DeletionTimestamp older than N seconds, skip waiting for the pod. Seconds must be greater than 0 to skip.")
|
||||
|
||||
cmdutil.AddChunkSizeFlag(cmd, &o.drainer.ChunkSize)
|
||||
cmdutil.AddDryRunFlag(cmd)
|
||||
return cmd
|
||||
}
|
||||
@ -256,6 +258,7 @@ func (o *DrainCmdOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [
|
||||
builder := f.NewBuilder().
|
||||
WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).
|
||||
NamespaceParam(o.Namespace).DefaultNamespace().
|
||||
RequestChunksOf(o.drainer.ChunkSize).
|
||||
ResourceNames("nodes", args...).
|
||||
SingleResourceType().
|
||||
Flatten()
|
||||
|
@ -810,6 +810,7 @@ func TestDrain(t *testing.T) {
|
||||
}
|
||||
getParams := make(url.Values)
|
||||
getParams["fieldSelector"] = []string{"spec.nodeName=node"}
|
||||
getParams["limit"] = []string{"500"}
|
||||
if !reflect.DeepEqual(getParams, values) {
|
||||
t.Fatalf("%s: expected:\n%v\nsaw:\n%v\n", test.description, getParams, values)
|
||||
}
|
||||
|
@ -145,7 +145,7 @@ func NewGetOptions(parent string, streams genericclioptions.IOStreams) *GetOptio
|
||||
CmdParent: parent,
|
||||
|
||||
IOStreams: streams,
|
||||
ChunkSize: 500,
|
||||
ChunkSize: cmdutil.DefaultChunkSize,
|
||||
ServerPrint: true,
|
||||
}
|
||||
}
|
||||
@ -175,7 +175,6 @@ func NewCmdGet(parent string, f cmdutil.Factory, streams genericclioptions.IOStr
|
||||
cmd.Flags().BoolVarP(&o.Watch, "watch", "w", o.Watch, "After listing/getting the requested object, watch for changes. Uninitialized objects are excluded if no object name is provided.")
|
||||
cmd.Flags().BoolVar(&o.WatchOnly, "watch-only", o.WatchOnly, "Watch for changes to the requested object(s), without listing/getting first.")
|
||||
cmd.Flags().BoolVar(&o.OutputWatchEvents, "output-watch-events", o.OutputWatchEvents, "Output watch event objects when --watch or --watch-only is used. Existing objects are output as initial ADDED events.")
|
||||
cmd.Flags().Int64Var(&o.ChunkSize, "chunk-size", o.ChunkSize, "Return large lists in chunks rather than all at once. Pass 0 to disable. This flag is beta and may change in the future.")
|
||||
cmd.Flags().BoolVar(&o.IgnoreNotFound, "ignore-not-found", o.IgnoreNotFound, "If the requested object does not exist the command will return exit code 0.")
|
||||
cmd.Flags().StringVarP(&o.LabelSelector, "selector", "l", o.LabelSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
|
||||
cmd.Flags().StringVar(&o.FieldSelector, "field-selector", o.FieldSelector, "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.")
|
||||
@ -183,6 +182,7 @@ func NewCmdGet(parent string, f cmdutil.Factory, streams genericclioptions.IOStr
|
||||
addOpenAPIPrintColumnFlags(cmd, o)
|
||||
addServerPrintColumnFlags(cmd, o)
|
||||
cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, "identifying the resource to get from a server.")
|
||||
cmdutil.AddChunkSizeFlag(cmd, &o.ChunkSize)
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
@ -50,6 +50,7 @@ import (
|
||||
const (
|
||||
ApplyAnnotationsFlag = "save-config"
|
||||
DefaultErrorExitCode = 1
|
||||
DefaultChunkSize = 500
|
||||
)
|
||||
|
||||
type debugError interface {
|
||||
@ -463,6 +464,11 @@ func AddGeneratorFlags(cmd *cobra.Command, defaultGenerator string) {
|
||||
AddDryRunFlag(cmd)
|
||||
}
|
||||
|
||||
func AddChunkSizeFlag(cmd *cobra.Command, value *int64) {
|
||||
cmd.Flags().Int64Var(value, "chunk-size", *value,
|
||||
"Return large lists in chunks rather than all at once. Pass 0 to disable. This flag is beta and may change in the future.")
|
||||
}
|
||||
|
||||
type ValidateOptions struct {
|
||||
EnableValidation bool
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/cli-runtime/pkg/resource"
|
||||
@ -61,6 +62,7 @@ type Helper struct {
|
||||
DeleteEmptyDirData bool
|
||||
Selector string
|
||||
PodSelector string
|
||||
ChunkSize int64
|
||||
|
||||
// DisableEviction forces drain to use delete rather than evict
|
||||
DisableEviction bool
|
||||
@ -189,9 +191,23 @@ func (d *Helper) GetPodsForDeletion(nodeName string) (*PodDeleteList, []error) {
|
||||
return nil, []error{err}
|
||||
}
|
||||
|
||||
podList, err := d.Client.CoreV1().Pods(metav1.NamespaceAll).List(d.getContext(), metav1.ListOptions{
|
||||
podList := &corev1.PodList{}
|
||||
initialOpts := &metav1.ListOptions{
|
||||
LabelSelector: labelSelector.String(),
|
||||
FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}).String()})
|
||||
FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}).String(),
|
||||
Limit: d.ChunkSize,
|
||||
}
|
||||
|
||||
err = resource.FollowContinue(initialOpts, func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
newPods, err := d.Client.CoreV1().Pods(metav1.NamespaceAll).List(d.getContext(), options)
|
||||
if err != nil {
|
||||
podR := corev1.SchemeGroupVersion.WithResource(corev1.ResourcePods.String())
|
||||
return nil, resource.EnhanceListError(err, options, podR.String())
|
||||
}
|
||||
podList.Items = append(podList.Items, newPods.Items...)
|
||||
return newPods, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, []error{err}
|
||||
}
|
||||
|
@ -167,6 +167,32 @@ run_cluster_management_tests() {
|
||||
response=$(! kubectl drain "127.0.0.1" --selector test=label 2>&1)
|
||||
kube::test::if_has_string "${response}" 'cannot specify both a node name'
|
||||
|
||||
### Test kubectl drain chunk size
|
||||
# Pre-condition: node exists and contains label test=label
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" '{{.metadata.labels.test}}' 'label'
|
||||
# Pre-condition: node is schedulable
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
# Pre-condition: test-pod-1 and test-pod-2 exist
|
||||
kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,'
|
||||
# command - need to use force because pods are unmanaged, dry run (or skip-wait) because node is unready
|
||||
output_message=$(kubectl --v=6 drain --force --pod-selector type=test-pod --selector test=label --chunk-size=1 --dry-run=client 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: Check if we get a limit on node, and both limit and continue on pods
|
||||
kube::test::if_has_string "${output_message}" "/v1/nodes?labelSelector=test%3Dlabel&limit=1 200 OK"
|
||||
kube::test::if_has_string "${output_message}" "/v1/pods?fieldSelector=spec.nodeName%3D127.0.0.1&labelSelector=type%3Dtest-pod&limit=1 200 OK"
|
||||
kube::test::if_has_string "${output_message}" "/v1/pods?continue=.*&fieldSelector=spec.nodeName%3D127.0.0.1&labelSelector=type%3Dtest-pod&limit=1 200 OK"
|
||||
# Post-condition: Check we evict multiple pages worth of pods
|
||||
kube::test::if_has_string "${output_message}" "evicting pod .*/test-pod-1"
|
||||
kube::test::if_has_string "${output_message}" "evicting pod .*/test-pod-2"
|
||||
# Post-condition: node is schedulable
|
||||
kubectl uncordon "127.0.0.1"
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
|
||||
### Test kubectl drain chunk size defaults to 500
|
||||
output_message=$(kubectl --v=6 drain --force --selector test=label --dry-run=client 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: Check if we get a limit
|
||||
kube::test::if_has_string "${output_message}" "/v1/nodes?labelSelector=test%3Dlabel&limit=500 200 OK"
|
||||
kube::test::if_has_string "${output_message}" "/v1/pods?fieldSelector=spec.nodeName%3D127.0.0.1&limit=500 200 OK"
|
||||
|
||||
### kubectl cordon command fails when no arguments are passed
|
||||
# Pre-condition: node exists
|
||||
response=$(! kubectl cordon 2>&1)
|
||||
|
Loading…
Reference in New Issue
Block a user