Merge pull request #20103 from mml/drain-followup

Auto commit by PR queue bot
This commit is contained in:
k8s-merge-robot 2016-01-30 06:25:06 -08:00
commit 34f4a03f62
4 changed files with 61 additions and 18 deletions

View File

@ -19,8 +19,8 @@ Drain node in preparation for maintenance.
The given node will be marked unschedulable to prevent new pods from arriving.
Then drain deletes all pods except mirror pods (which cannot be deleted through
the API server). If there are any pods that are neither mirror pods nor
managed by a ReplicationController or DaemonSet, then drain will not delete any
pods unless you use \-\-force.
managed by a ReplicationController, Job, or DaemonSet, then drain will not
delete any pods unless you use \-\-force.
.PP
When you are ready to put the node back into service, use kubectl uncordon, which
@ -30,7 +30,7 @@ will make the node schedulable again.
.SH OPTIONS
.PP
\fB\-\-force\fP=false
Continue even if there are pods not managed by a ReplicationController or DaemonSet.
Continue even if there are pods not managed by a ReplicationController, Job, or DaemonSet.
.PP
\fB\-\-grace\-period\fP=\-1
@ -136,10 +136,10 @@ will make the node schedulable again.
.RS
.nf
# Drain node "foo", even if there are pods not managed by a ReplicationController or DaemonSet on it.
# Drain node "foo", even if there are pods not managed by a ReplicationController, Job, or DaemonSet on it.
$ kubectl drain foo \-\-force
# As above, but abort if there are pods not managed by a ReplicationController or DaemonSet, and use a grace period of 15 minutes.
# As above, but abort if there are pods not managed by a ReplicationController, Job, or DaemonSet, and use a grace period of 15 minutes.
$ kubectl drain foo \-\-grace\-period=900

View File

@ -39,8 +39,8 @@ Drain node in preparation for maintenance.
The given node will be marked unschedulable to prevent new pods from arriving.
Then drain deletes all pods except mirror pods (which cannot be deleted through
the API server). If there are any pods that are neither mirror pods nor
managed by a ReplicationController or DaemonSet, then drain will not delete any
pods unless you use --force.
managed by a ReplicationController, Job, or DaemonSet, then drain will not
delete any pods unless you use --force.
When you are ready to put the node back into service, use kubectl uncordon, which
will make the node schedulable again.
@ -53,10 +53,10 @@ kubectl drain NODE
### Examples
```
# Drain node "foo", even if there are pods not managed by a ReplicationController or DaemonSet on it.
# Drain node "foo", even if there are pods not managed by a ReplicationController, Job, or DaemonSet on it.
$ kubectl drain foo --force
# As above, but abort if there are pods not managed by a ReplicationController or DaemonSet, and use a grace period of 15 minutes.
# As above, but abort if there are pods not managed by a ReplicationController, Job, or DaemonSet, and use a grace period of 15 minutes.
$ kubectl drain foo --grace-period=900
```
@ -64,7 +64,7 @@ $ kubectl drain foo --grace-period=900
### Options
```
--force[=false]: Continue even if there are pods not managed by a ReplicationController or DaemonSet.
--force[=false]: Continue even if there are pods not managed by a ReplicationController, Job, or DaemonSet.
--grace-period=-1: Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.
```
@ -100,7 +100,7 @@ $ kubectl drain foo --grace-period=900
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
###### Auto generated by spf13/cobra on 6-Jan-2016
###### Auto generated by spf13/cobra on 28-Jan-2016
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_drain.md?pixel)]()

View File

@ -99,16 +99,16 @@ const (
The given node will be marked unschedulable to prevent new pods from arriving.
Then drain deletes all pods except mirror pods (which cannot be deleted through
the API server). If there are any pods that are neither mirror pods nor
managed by a ReplicationController or DaemonSet, then drain will not delete any
pods unless you use --force.
managed by a ReplicationController, Job, or DaemonSet, then drain will not
delete any pods unless you use --force.
When you are ready to put the node back into service, use kubectl uncordon, which
will make the node schedulable again.
`
drain_example = `# Drain node "foo", even if there are pods not managed by a ReplicationController or DaemonSet on it.
drain_example = `# Drain node "foo", even if there are pods not managed by a ReplicationController, Job, or DaemonSet on it.
$ kubectl drain foo --force
# As above, but abort if there are pods not managed by a ReplicationController or DaemonSet, and use a grace period of 15 minutes.
# As above, but abort if there are pods not managed by a ReplicationController, Job, or DaemonSet, and use a grace period of 15 minutes.
$ kubectl drain foo --grace-period=900
`
)
@ -126,7 +126,7 @@ func NewCmdDrain(f *cmdutil.Factory, out io.Writer) *cobra.Command {
cmdutil.CheckErr(options.RunDrain())
},
}
cmd.Flags().BoolVar(&options.Force, "force", false, "Continue even if there are pods not managed by a ReplicationController or DaemonSet.")
cmd.Flags().BoolVar(&options.Force, "force", false, "Continue even if there are pods not managed by a ReplicationController, Job, or DaemonSet.")
cmd.Flags().IntVar(&options.GracePeriodSeconds, "grace-period", -1, "Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.")
return cmd
}
@ -229,6 +229,15 @@ func (o *DrainOptions) getPodsForDeletion() ([]api.Pod, error) {
if err == nil && ds != nil {
replicated = true
}
} else if sr.Reference.Kind == "Job" {
job, err := o.client.Jobs(sr.Reference.Namespace).Get(sr.Reference.Name)
// Assume the only reason for an error is because the Job is
// gone/missing, not for any other cause. TODO(mml): something more
// sophisticated than this
if err == nil && job != nil {
replicated = true
}
}
}
if replicated || o.Force {
@ -242,9 +251,9 @@ func (o *DrainOptions) getPodsForDeletion() ([]api.Pod, error) {
if len(unreplicatedPodNames) > 0 {
joined := strings.Join(unreplicatedPodNames, ", ")
if !o.Force {
return pods, fmt.Errorf("refusing to continue due to pods managed by neither a ReplicationController nor a DaemonSet: %s (use --force to override)", joined)
return pods, fmt.Errorf("refusing to continue due to pods managed by neither a ReplicationController, nor a Job, nor a DaemonSet: %s (use --force to override)", joined)
}
fmt.Fprintf(o.out, "WARNING: About to delete these pods managed by neither a ReplicationController nor a DaemonSet: %s\n", joined)
fmt.Fprintf(o.out, "WARNING: About to delete these pods managed by neither a ReplicationController, nor a Job, nor a DaemonSet: %s\n", joined)
}
return pods, nil
}

View File

@ -262,6 +262,28 @@ func TestDrain(t *testing.T) {
},
}
job := extensions.Job{
ObjectMeta: api.ObjectMeta{
Name: "job",
Namespace: "default",
CreationTimestamp: unversioned.Time{time.Now()},
SelfLink: "/apis/extensions/v1beta1/namespaces/default/jobs/job",
},
Spec: extensions.JobSpec{
Selector: &extensions.LabelSelector{MatchLabels: labels},
},
}
job_pod := api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "bar",
Namespace: "default",
CreationTimestamp: unversioned.Time{time.Now()},
Labels: labels,
Annotations: map[string]string{controller.CreatedByAnnotation: refJson(t, &job)},
},
}
naked_pod := api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "bar",
@ -304,6 +326,16 @@ func TestDrain(t *testing.T) {
expectFatal: false,
expectDelete: true,
},
{
description: "Job-managed pod",
node: node,
expected: cordoned_node,
pods: []api.Pod{job_pod},
rcs: []api.ReplicationController{rc},
args: []string{"node"},
expectFatal: false,
expectDelete: true,
},
{
description: "naked pod",
node: node,
@ -352,6 +384,8 @@ func TestDrain(t *testing.T) {
return &http.Response{StatusCode: 200, Body: objBody(codec, &test.rcs[0])}, nil
case m.isFor("GET", "/namespaces/default/daemonsets/ds"):
return &http.Response{StatusCode: 200, Body: objBody(testapi.Extensions.Codec(), &ds)}, nil
case m.isFor("GET", "/namespaces/default/jobs/job"):
return &http.Response{StatusCode: 200, Body: objBody(testapi.Extensions.Codec(), &job)}, nil
case m.isFor("GET", "/pods"):
values, err := url.ParseQuery(req.URL.RawQuery)
if err != nil {