mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 14:37:00 +00:00
add --pod-selector opt kubectl drain
This commit is contained in:
parent
722cb7a758
commit
151398e961
@ -4300,6 +4300,51 @@ run_cluster_management_tests() {
|
|||||||
|
|
||||||
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
|
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
|
||||||
|
|
||||||
|
# create test pods we can work with
|
||||||
|
kubectl create -f - "${kube_flags[@]}" << __EOF__
|
||||||
|
{
|
||||||
|
"kind": "Pod",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "test-pod-1",
|
||||||
|
"labels": {
|
||||||
|
"e": "f"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"containers": [
|
||||||
|
{
|
||||||
|
"name": "container-1",
|
||||||
|
"resources": {},
|
||||||
|
"image": "test-image"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
__EOF__
|
||||||
|
|
||||||
|
kubectl create -f - "${kube_flags[@]}" << __EOF__
|
||||||
|
{
|
||||||
|
"kind": "Pod",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "test-pod-2",
|
||||||
|
"labels": {
|
||||||
|
"c": "d"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"containers": [
|
||||||
|
{
|
||||||
|
"name": "container-1",
|
||||||
|
"resources": {},
|
||||||
|
"image": "test-image"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
__EOF__
|
||||||
|
|
||||||
### kubectl cordon update with --dry-run does not mark node unschedulable
|
### kubectl cordon update with --dry-run does not mark node unschedulable
|
||||||
# Pre-condition: node is schedulable
|
# Pre-condition: node is schedulable
|
||||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||||
@ -4314,6 +4359,20 @@ run_cluster_management_tests() {
|
|||||||
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
|
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
|
||||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||||
|
|
||||||
|
### kubectl drain with --pod-selector only evicts pods that match the given selector
|
||||||
|
# Pre-condition: node is schedulable
|
||||||
|
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||||
|
# Pre-condition: test-pod-1 and test-pod-2 exist
|
||||||
|
kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,'
|
||||||
|
kubectl drain "127.0.0.1" --pod-selector 'e in (f)'
|
||||||
|
# only "test-pod-1" should have been matched and deleted - test-pod-2 should still exist
|
||||||
|
kube::test::get_object_assert "pods/test-pod-2" "{{.metadata.name}}" 'test-pod-2'
|
||||||
|
# delete pod no longer in use
|
||||||
|
kubectl delete pod/test-pod-2
|
||||||
|
# Post-condition: node is schedulable
|
||||||
|
kubectl uncordon "127.0.0.1"
|
||||||
|
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||||
|
|
||||||
### kubectl uncordon update with --dry-run is a no-op
|
### kubectl uncordon update with --dry-run is a no-op
|
||||||
# Pre-condition: node is already schedulable
|
# Pre-condition: node is already schedulable
|
||||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||||
|
@ -33,6 +33,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/json"
|
"k8s.io/apimachinery/pkg/util/json"
|
||||||
@ -61,6 +62,7 @@ type DrainOptions struct {
|
|||||||
backOff clockwork.Clock
|
backOff clockwork.Clock
|
||||||
DeleteLocalData bool
|
DeleteLocalData bool
|
||||||
Selector string
|
Selector string
|
||||||
|
PodSelector string
|
||||||
mapper meta.RESTMapper
|
mapper meta.RESTMapper
|
||||||
nodeInfos []*resource.Info
|
nodeInfos []*resource.Info
|
||||||
Out io.Writer
|
Out io.Writer
|
||||||
@ -197,6 +199,8 @@ func NewCmdDrain(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command {
|
|||||||
cmd.Flags().IntVar(&options.GracePeriodSeconds, "grace-period", -1, "Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.")
|
cmd.Flags().IntVar(&options.GracePeriodSeconds, "grace-period", -1, "Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.")
|
||||||
cmd.Flags().DurationVar(&options.Timeout, "timeout", 0, "The length of time to wait before giving up, zero means infinite")
|
cmd.Flags().DurationVar(&options.Timeout, "timeout", 0, "The length of time to wait before giving up, zero means infinite")
|
||||||
cmd.Flags().StringVarP(&options.Selector, "selector", "l", options.Selector, "Selector (label query) to filter on")
|
cmd.Flags().StringVarP(&options.Selector, "selector", "l", options.Selector, "Selector (label query) to filter on")
|
||||||
|
cmd.Flags().StringVarP(&options.PodSelector, "pod-selector", "", options.PodSelector, "Label selector to filter pods on the node")
|
||||||
|
|
||||||
cmdutil.AddDryRunFlag(cmd)
|
cmdutil.AddDryRunFlag(cmd)
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
@ -223,6 +227,12 @@ func (o *DrainOptions) SetupDrain(cmd *cobra.Command, args []string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(o.PodSelector) > 0 {
|
||||||
|
if _, err := labels.Parse(o.PodSelector); err != nil {
|
||||||
|
return errors.New("--pod-selector=<pod_selector> must be a valid label selector")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
o.restClient, err = o.Factory.RESTClient()
|
o.restClient, err = o.Factory.RESTClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -455,7 +465,13 @@ func (ps podStatuses) Message() string {
|
|||||||
// getPodsForDeletion receives resource info for a node, and returns all the pods from the given node that we
|
// getPodsForDeletion receives resource info for a node, and returns all the pods from the given node that we
|
||||||
// are planning on deleting. If there are any pods preventing us from deleting, we return that list in an error.
|
// are planning on deleting. If there are any pods preventing us from deleting, we return that list in an error.
|
||||||
func (o *DrainOptions) getPodsForDeletion(nodeInfo *resource.Info) (pods []corev1.Pod, err error) {
|
func (o *DrainOptions) getPodsForDeletion(nodeInfo *resource.Info) (pods []corev1.Pod, err error) {
|
||||||
|
labelSelector, err := labels.Parse(o.PodSelector)
|
||||||
|
if err != nil {
|
||||||
|
return pods, err
|
||||||
|
}
|
||||||
|
|
||||||
podList, err := o.client.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{
|
podList, err := o.client.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{
|
||||||
|
LabelSelector: labelSelector.String(),
|
||||||
FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeInfo.Name}).String()})
|
FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeInfo.Name}).String()})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pods, err
|
return pods, err
|
||||||
|
Loading…
Reference in New Issue
Block a user