mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 02:41:25 +00:00
Add --chunk-size flag to kubectl drain
This commit is contained in:
parent
c56be1fa9f
commit
5200ff86d0
@ -148,6 +148,7 @@ func NewDrainCmdOptions(f cmdutil.Factory, ioStreams genericclioptions.IOStreams
|
|||||||
GracePeriodSeconds: -1,
|
GracePeriodSeconds: -1,
|
||||||
Out: ioStreams.Out,
|
Out: ioStreams.Out,
|
||||||
ErrOut: ioStreams.ErrOut,
|
ErrOut: ioStreams.ErrOut,
|
||||||
|
ChunkSize: cmdutil.DefaultChunkSize,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
o.drainer.OnPodDeletedOrEvicted = o.onPodDeletedOrEvicted
|
o.drainer.OnPodDeletedOrEvicted = o.onPodDeletedOrEvicted
|
||||||
@ -198,6 +199,7 @@ func NewCmdDrain(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobr
|
|||||||
cmd.Flags().BoolVar(&o.drainer.DisableEviction, "disable-eviction", o.drainer.DisableEviction, "Force drain to use delete, even if eviction is supported. This will bypass checking PodDisruptionBudgets, use with caution.")
|
cmd.Flags().BoolVar(&o.drainer.DisableEviction, "disable-eviction", o.drainer.DisableEviction, "Force drain to use delete, even if eviction is supported. This will bypass checking PodDisruptionBudgets, use with caution.")
|
||||||
cmd.Flags().IntVar(&o.drainer.SkipWaitForDeleteTimeoutSeconds, "skip-wait-for-delete-timeout", o.drainer.SkipWaitForDeleteTimeoutSeconds, "If pod DeletionTimestamp older than N seconds, skip waiting for the pod. Seconds must be greater than 0 to skip.")
|
cmd.Flags().IntVar(&o.drainer.SkipWaitForDeleteTimeoutSeconds, "skip-wait-for-delete-timeout", o.drainer.SkipWaitForDeleteTimeoutSeconds, "If pod DeletionTimestamp older than N seconds, skip waiting for the pod. Seconds must be greater than 0 to skip.")
|
||||||
|
|
||||||
|
cmdutil.AddChunkSizeFlag(cmd, &o.drainer.ChunkSize)
|
||||||
cmdutil.AddDryRunFlag(cmd)
|
cmdutil.AddDryRunFlag(cmd)
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
@ -256,6 +258,7 @@ func (o *DrainCmdOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [
|
|||||||
builder := f.NewBuilder().
|
builder := f.NewBuilder().
|
||||||
WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).
|
WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).
|
||||||
NamespaceParam(o.Namespace).DefaultNamespace().
|
NamespaceParam(o.Namespace).DefaultNamespace().
|
||||||
|
RequestChunksOf(o.drainer.ChunkSize).
|
||||||
ResourceNames("nodes", args...).
|
ResourceNames("nodes", args...).
|
||||||
SingleResourceType().
|
SingleResourceType().
|
||||||
Flatten()
|
Flatten()
|
||||||
|
@ -810,6 +810,7 @@ func TestDrain(t *testing.T) {
|
|||||||
}
|
}
|
||||||
getParams := make(url.Values)
|
getParams := make(url.Values)
|
||||||
getParams["fieldSelector"] = []string{"spec.nodeName=node"}
|
getParams["fieldSelector"] = []string{"spec.nodeName=node"}
|
||||||
|
getParams["limit"] = []string{"500"}
|
||||||
if !reflect.DeepEqual(getParams, values) {
|
if !reflect.DeepEqual(getParams, values) {
|
||||||
t.Fatalf("%s: expected:\n%v\nsaw:\n%v\n", test.description, getParams, values)
|
t.Fatalf("%s: expected:\n%v\nsaw:\n%v\n", test.description, getParams, values)
|
||||||
}
|
}
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/cli-runtime/pkg/resource"
|
"k8s.io/cli-runtime/pkg/resource"
|
||||||
@ -61,6 +62,7 @@ type Helper struct {
|
|||||||
DeleteEmptyDirData bool
|
DeleteEmptyDirData bool
|
||||||
Selector string
|
Selector string
|
||||||
PodSelector string
|
PodSelector string
|
||||||
|
ChunkSize int64
|
||||||
|
|
||||||
// DisableEviction forces drain to use delete rather than evict
|
// DisableEviction forces drain to use delete rather than evict
|
||||||
DisableEviction bool
|
DisableEviction bool
|
||||||
@ -189,9 +191,23 @@ func (d *Helper) GetPodsForDeletion(nodeName string) (*PodDeleteList, []error) {
|
|||||||
return nil, []error{err}
|
return nil, []error{err}
|
||||||
}
|
}
|
||||||
|
|
||||||
podList, err := d.Client.CoreV1().Pods(metav1.NamespaceAll).List(d.getContext(), metav1.ListOptions{
|
podList := &corev1.PodList{}
|
||||||
|
initialOpts := &metav1.ListOptions{
|
||||||
LabelSelector: labelSelector.String(),
|
LabelSelector: labelSelector.String(),
|
||||||
FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}).String()})
|
FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}).String(),
|
||||||
|
Limit: d.ChunkSize,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = resource.FollowContinue(initialOpts, func(options metav1.ListOptions) (runtime.Object, error) {
|
||||||
|
newPods, err := d.Client.CoreV1().Pods(metav1.NamespaceAll).List(d.getContext(), options)
|
||||||
|
if err != nil {
|
||||||
|
podR := corev1.SchemeGroupVersion.WithResource(corev1.ResourcePods.String())
|
||||||
|
return nil, resource.EnhanceListError(err, options, podR.String())
|
||||||
|
}
|
||||||
|
podList.Items = append(podList.Items, newPods.Items...)
|
||||||
|
return newPods, nil
|
||||||
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, []error{err}
|
return nil, []error{err}
|
||||||
}
|
}
|
||||||
|
@ -167,6 +167,32 @@ run_cluster_management_tests() {
|
|||||||
response=$(! kubectl drain "127.0.0.1" --selector test=label 2>&1)
|
response=$(! kubectl drain "127.0.0.1" --selector test=label 2>&1)
|
||||||
kube::test::if_has_string "${response}" 'cannot specify both a node name'
|
kube::test::if_has_string "${response}" 'cannot specify both a node name'
|
||||||
|
|
||||||
|
### Test kubectl drain chunk size
|
||||||
|
# Pre-condition: node exists and contains label test=label
|
||||||
|
kube::test::get_object_assert "nodes 127.0.0.1" '{{.metadata.labels.test}}' 'label'
|
||||||
|
# Pre-condition: node is schedulable
|
||||||
|
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||||
|
# Pre-condition: test-pod-1 and test-pod-2 exist
|
||||||
|
kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,'
|
||||||
|
# command - need to use force because pods are unmanaged, dry run (or skip-wait) because node is unready
|
||||||
|
output_message=$(kubectl --v=6 drain --force --pod-selector type=test-pod --selector test=label --chunk-size=1 --dry-run=client 2>&1 "${kube_flags[@]}")
|
||||||
|
# Post-condition: Check if we get a limit on node, and both limit and continue on pods
|
||||||
|
kube::test::if_has_string "${output_message}" "/v1/nodes?labelSelector=test%3Dlabel&limit=1 200 OK"
|
||||||
|
kube::test::if_has_string "${output_message}" "/v1/pods?fieldSelector=spec.nodeName%3D127.0.0.1&labelSelector=type%3Dtest-pod&limit=1 200 OK"
|
||||||
|
kube::test::if_has_string "${output_message}" "/v1/pods?continue=.*&fieldSelector=spec.nodeName%3D127.0.0.1&labelSelector=type%3Dtest-pod&limit=1 200 OK"
|
||||||
|
# Post-condition: Check we evict multiple pages worth of pods
|
||||||
|
kube::test::if_has_string "${output_message}" "evicting pod .*/test-pod-1"
|
||||||
|
kube::test::if_has_string "${output_message}" "evicting pod .*/test-pod-2"
|
||||||
|
# Post-condition: node is schedulable
|
||||||
|
kubectl uncordon "127.0.0.1"
|
||||||
|
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||||
|
|
||||||
|
### Test kubectl drain chunk size defaults to 500
|
||||||
|
output_message=$(kubectl --v=6 drain --force --selector test=label --dry-run=client 2>&1 "${kube_flags[@]}")
|
||||||
|
# Post-condition: Check if we get a limit
|
||||||
|
kube::test::if_has_string "${output_message}" "/v1/nodes?labelSelector=test%3Dlabel&limit=500 200 OK"
|
||||||
|
kube::test::if_has_string "${output_message}" "/v1/pods?fieldSelector=spec.nodeName%3D127.0.0.1&limit=500 200 OK"
|
||||||
|
|
||||||
### kubectl cordon command fails when no arguments are passed
|
### kubectl cordon command fails when no arguments are passed
|
||||||
# Pre-condition: node exists
|
# Pre-condition: node exists
|
||||||
response=$(! kubectl cordon 2>&1)
|
response=$(! kubectl cordon 2>&1)
|
||||||
|
Loading…
Reference in New Issue
Block a user