Add support for Job-managed pods to drain.

This commit is contained in:
Matt Liggett 2016-01-15 17:14:22 -08:00
parent 146a9e6075
commit d4b02466b7
4 changed files with 61 additions and 18 deletions

View File

@ -19,8 +19,8 @@ Drain node in preparation for maintenance.
The given node will be marked unschedulable to prevent new pods from arriving. The given node will be marked unschedulable to prevent new pods from arriving.
Then drain deletes all pods except mirror pods (which cannot be deleted through Then drain deletes all pods except mirror pods (which cannot be deleted through
the API server). If there are any pods that are neither mirror pods nor the API server). If there are any pods that are neither mirror pods nor
managed by a ReplicationController or DaemonSet, then drain will not delete any managed by a ReplicationController, Job, or DaemonSet, then drain will not
pods unless you use \-\-force. delete any pods unless you use \-\-force.
.PP .PP
When you are ready to put the node back into service, use kubectl uncordon, which When you are ready to put the node back into service, use kubectl uncordon, which
@ -30,7 +30,7 @@ will make the node schedulable again.
.SH OPTIONS .SH OPTIONS
.PP .PP
\fB\-\-force\fP=false \fB\-\-force\fP=false
Continue even if there are pods not managed by a ReplicationController or DaemonSet. Continue even if there are pods not managed by a ReplicationController, Job, or DaemonSet.
.PP .PP
\fB\-\-grace\-period\fP=\-1 \fB\-\-grace\-period\fP=\-1
@ -136,10 +136,10 @@ will make the node schedulable again.
.RS .RS
.nf .nf
# Drain node "foo", even if there are pods not managed by a ReplicationController or DaemonSet on it. # Drain node "foo", even if there are pods not managed by a ReplicationController, Job, or DaemonSet on it.
$ kubectl drain foo \-\-force $ kubectl drain foo \-\-force
# As above, but abort if there are pods not managed by a ReplicationController or DaemonSet, and use a grace period of 15 minutes. # As above, but abort if there are pods not managed by a ReplicationController, Job, or DaemonSet, and use a grace period of 15 minutes.
$ kubectl drain foo \-\-grace\-period=900 $ kubectl drain foo \-\-grace\-period=900

View File

@ -39,8 +39,8 @@ Drain node in preparation for maintenance.
The given node will be marked unschedulable to prevent new pods from arriving. The given node will be marked unschedulable to prevent new pods from arriving.
Then drain deletes all pods except mirror pods (which cannot be deleted through Then drain deletes all pods except mirror pods (which cannot be deleted through
the API server). If there are any pods that are neither mirror pods nor the API server). If there are any pods that are neither mirror pods nor
managed by a ReplicationController or DaemonSet, then drain will not delete any managed by a ReplicationController, Job, or DaemonSet, then drain will not
pods unless you use --force. delete any pods unless you use --force.
When you are ready to put the node back into service, use kubectl uncordon, which When you are ready to put the node back into service, use kubectl uncordon, which
will make the node schedulable again. will make the node schedulable again.
@ -53,10 +53,10 @@ kubectl drain NODE
### Examples ### Examples
``` ```
# Drain node "foo", even if there are pods not managed by a ReplicationController or DaemonSet on it. # Drain node "foo", even if there are pods not managed by a ReplicationController, Job, or DaemonSet on it.
$ kubectl drain foo --force $ kubectl drain foo --force
# As above, but abort if there are pods not managed by a ReplicationController or DaemonSet, and use a grace period of 15 minutes. # As above, but abort if there are pods not managed by a ReplicationController, Job, or DaemonSet, and use a grace period of 15 minutes.
$ kubectl drain foo --grace-period=900 $ kubectl drain foo --grace-period=900
``` ```
@ -64,7 +64,7 @@ $ kubectl drain foo --grace-period=900
### Options ### Options
``` ```
--force[=false]: Continue even if there are pods not managed by a ReplicationController or DaemonSet. --force[=false]: Continue even if there are pods not managed by a ReplicationController, Job, or DaemonSet.
--grace-period=-1: Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used. --grace-period=-1: Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.
``` ```
@ -100,7 +100,7 @@ $ kubectl drain foo --grace-period=900
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
###### Auto generated by spf13/cobra on 6-Jan-2016 ###### Auto generated by spf13/cobra on 28-Jan-2016
<!-- BEGIN MUNGE: GENERATED_ANALYTICS --> <!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_drain.md?pixel)]() [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_drain.md?pixel)]()

View File

@ -99,16 +99,16 @@ const (
The given node will be marked unschedulable to prevent new pods from arriving. The given node will be marked unschedulable to prevent new pods from arriving.
Then drain deletes all pods except mirror pods (which cannot be deleted through Then drain deletes all pods except mirror pods (which cannot be deleted through
the API server). If there are any pods that are neither mirror pods nor the API server). If there are any pods that are neither mirror pods nor
managed by a ReplicationController or DaemonSet, then drain will not delete any managed by a ReplicationController, Job, or DaemonSet, then drain will not
pods unless you use --force. delete any pods unless you use --force.
When you are ready to put the node back into service, use kubectl uncordon, which When you are ready to put the node back into service, use kubectl uncordon, which
will make the node schedulable again. will make the node schedulable again.
` `
drain_example = `# Drain node "foo", even if there are pods not managed by a ReplicationController or DaemonSet on it. drain_example = `# Drain node "foo", even if there are pods not managed by a ReplicationController, Job, or DaemonSet on it.
$ kubectl drain foo --force $ kubectl drain foo --force
# As above, but abort if there are pods not managed by a ReplicationController or DaemonSet, and use a grace period of 15 minutes. # As above, but abort if there are pods not managed by a ReplicationController, Job, or DaemonSet, and use a grace period of 15 minutes.
$ kubectl drain foo --grace-period=900 $ kubectl drain foo --grace-period=900
` `
) )
@ -126,7 +126,7 @@ func NewCmdDrain(f *cmdutil.Factory, out io.Writer) *cobra.Command {
cmdutil.CheckErr(options.RunDrain()) cmdutil.CheckErr(options.RunDrain())
}, },
} }
cmd.Flags().BoolVar(&options.Force, "force", false, "Continue even if there are pods not managed by a ReplicationController or DaemonSet.") cmd.Flags().BoolVar(&options.Force, "force", false, "Continue even if there are pods not managed by a ReplicationController, Job, or DaemonSet.")
cmd.Flags().IntVar(&options.GracePeriodSeconds, "grace-period", -1, "Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.") cmd.Flags().IntVar(&options.GracePeriodSeconds, "grace-period", -1, "Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.")
return cmd return cmd
} }
@ -229,6 +229,15 @@ func (o *DrainOptions) getPodsForDeletion() ([]api.Pod, error) {
if err == nil && ds != nil { if err == nil && ds != nil {
replicated = true replicated = true
} }
} else if sr.Reference.Kind == "Job" {
job, err := o.client.Jobs(sr.Reference.Namespace).Get(sr.Reference.Name)
// Assume the only reason for an error is because the Job is
// gone/missing, not for any other cause. TODO(mml): something more
// sophisticated than this
if err == nil && job != nil {
replicated = true
}
} }
} }
if replicated || o.Force { if replicated || o.Force {
@ -242,9 +251,9 @@ func (o *DrainOptions) getPodsForDeletion() ([]api.Pod, error) {
if len(unreplicatedPodNames) > 0 { if len(unreplicatedPodNames) > 0 {
joined := strings.Join(unreplicatedPodNames, ", ") joined := strings.Join(unreplicatedPodNames, ", ")
if !o.Force { if !o.Force {
return pods, fmt.Errorf("refusing to continue due to pods managed by neither a ReplicationController nor a DaemonSet: %s (use --force to override)", joined) return pods, fmt.Errorf("refusing to continue due to pods managed by neither a ReplicationController, nor a Job, nor a DaemonSet: %s (use --force to override)", joined)
} }
fmt.Fprintf(o.out, "WARNING: About to delete these pods managed by neither a ReplicationController nor a DaemonSet: %s\n", joined) fmt.Fprintf(o.out, "WARNING: About to delete these pods managed by neither a ReplicationController, nor a Job, nor a DaemonSet: %s\n", joined)
} }
return pods, nil return pods, nil
} }

View File

@ -262,6 +262,28 @@ func TestDrain(t *testing.T) {
}, },
} }
job := extensions.Job{
ObjectMeta: api.ObjectMeta{
Name: "job",
Namespace: "default",
CreationTimestamp: unversioned.Time{time.Now()},
SelfLink: "/apis/extensions/v1beta1/namespaces/default/jobs/job",
},
Spec: extensions.JobSpec{
Selector: &extensions.LabelSelector{MatchLabels: labels},
},
}
job_pod := api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "bar",
Namespace: "default",
CreationTimestamp: unversioned.Time{time.Now()},
Labels: labels,
Annotations: map[string]string{controller.CreatedByAnnotation: refJson(t, &job)},
},
}
naked_pod := api.Pod{ naked_pod := api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: "bar", Name: "bar",
@ -304,6 +326,16 @@ func TestDrain(t *testing.T) {
expectFatal: false, expectFatal: false,
expectDelete: true, expectDelete: true,
}, },
{
description: "Job-managed pod",
node: node,
expected: cordoned_node,
pods: []api.Pod{job_pod},
rcs: []api.ReplicationController{rc},
args: []string{"node"},
expectFatal: false,
expectDelete: true,
},
{ {
description: "naked pod", description: "naked pod",
node: node, node: node,
@ -352,6 +384,8 @@ func TestDrain(t *testing.T) {
return &http.Response{StatusCode: 200, Body: objBody(codec, &test.rcs[0])}, nil return &http.Response{StatusCode: 200, Body: objBody(codec, &test.rcs[0])}, nil
case m.isFor("GET", "/namespaces/default/daemonsets/ds"): case m.isFor("GET", "/namespaces/default/daemonsets/ds"):
return &http.Response{StatusCode: 200, Body: objBody(testapi.Extensions.Codec(), &ds)}, nil return &http.Response{StatusCode: 200, Body: objBody(testapi.Extensions.Codec(), &ds)}, nil
case m.isFor("GET", "/namespaces/default/jobs/job"):
return &http.Response{StatusCode: 200, Body: objBody(testapi.Extensions.Codec(), &job)}, nil
case m.isFor("GET", "/pods"): case m.isFor("GET", "/pods"):
values, err := url.ParseQuery(req.URL.RawQuery) values, err := url.ParseQuery(req.URL.RawQuery)
if err != nil { if err != nil {