Merge pull request #23689 from maclof/drain-replicaset-pods

Automatic merge from submit-queue

Drain pods created from ReplicaSets

Refer to my issue here: https://github.com/kubernetes/kubernetes/issues/23531
This commit is contained in:
k8s-merge-robot 2016-04-16 00:37:41 -07:00
commit fe6a7a2c7d
5 changed files with 75 additions and 20 deletions

View File

@ -23,7 +23,8 @@ without \-\-ignore\-daemonsets, and regardless it will not delete any
DaemonSet\-managed pods, because those pods would be immediately replaced by the
DaemonSet controller, which ignores unschedulable markings. If there are any
pods that are neither mirror pods nor managed\-\-by ReplicationController,
DaemonSet or Job\-\-, then drain will not delete any pods unless you use \-\-force.
ReplicaSet, DaemonSet or Job\-\-, then drain will not delete any pods unless you
use \-\-force.
.PP
When you are ready to put the node back into service, use kubectl uncordon, which
@ -33,7 +34,7 @@ will make the node schedulable again.
.SH OPTIONS
.PP
\fB\-\-force\fP=false
Continue even if there are pods not managed by a ReplicationController, Job, or DaemonSet.
Continue even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet.
.PP
\fB\-\-grace\-period\fP=\-1
@ -147,10 +148,10 @@ will make the node schedulable again.
.RS
.nf
# Drain node "foo", even if there are pods not managed by a ReplicationController, Job, or DaemonSet on it.
# Drain node "foo", even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet on it.
$ kubectl drain foo \-\-force
# As above, but abort if there are pods not managed by a ReplicationController, Job, or DaemonSet, and use a grace period of 15 minutes.
# As above, but abort if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet, and use a grace period of 15 minutes.
$ kubectl drain foo \-\-grace\-period=900

View File

@ -48,7 +48,8 @@ without --ignore-daemonsets, and regardless it will not delete any
DaemonSet-managed pods, because those pods would be immediately replaced by the
DaemonSet controller, which ignores unschedulable markings. If there are any
pods that are neither mirror pods nor managed--by ReplicationController,
DaemonSet or Job--, then drain will not delete any pods unless you use --force.
ReplicaSet, DaemonSet or Job--, then drain will not delete any pods unless you
use --force.
When you are ready to put the node back into service, use kubectl uncordon, which
will make the node schedulable again.
@ -61,10 +62,10 @@ kubectl drain NODE
### Examples
```
# Drain node "foo", even if there are pods not managed by a ReplicationController, Job, or DaemonSet on it.
# Drain node "foo", even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet on it.
$ kubectl drain foo --force
# As above, but abort if there are pods not managed by a ReplicationController, Job, or DaemonSet, and use a grace period of 15 minutes.
# As above, but abort if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet, and use a grace period of 15 minutes.
$ kubectl drain foo --grace-period=900
```
@ -72,7 +73,7 @@ $ kubectl drain foo --grace-period=900
### Options
```
--force[=false]: Continue even if there are pods not managed by a ReplicationController, Job, or DaemonSet.
--force[=false]: Continue even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet.
--grace-period=-1: Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.
--ignore-daemonsets[=false]: Ignore DaemonSet-managed pods.
```
@ -109,7 +110,7 @@ $ kubectl drain foo --grace-period=900
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
###### Auto generated by spf13/cobra on 5-Apr-2016
###### Auto generated by spf13/cobra on 15-Apr-2016
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_drain.md?pixel)]()

View File

@ -10,7 +10,8 @@ description: |
DaemonSet-managed pods, because those pods would be immediately replaced by the
DaemonSet controller, which ignores unschedulable markings. If there are any
pods that are neither mirror pods nor managed--by ReplicationController,
DaemonSet or Job--, then drain will not delete any pods unless you use --force.
ReplicaSet, DaemonSet or Job--, then drain will not delete any pods unless you
use --force.
When you are ready to put the node back into service, use kubectl uncordon, which
will make the node schedulable again.
@ -18,7 +19,7 @@ options:
- name: force
default_value: "false"
usage: |
Continue even if there are pods not managed by a ReplicationController, Job, or DaemonSet.
Continue even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet.
- name: grace-period
default_value: "-1"
usage: |
@ -88,10 +89,10 @@ inherited_options:
usage: |
comma-separated list of pattern=N settings for file-filtered logging
example: |
# Drain node "foo", even if there are pods not managed by a ReplicationController, Job, or DaemonSet on it.
# Drain node "foo", even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet on it.
$ kubectl drain foo --force
# As above, but abort if there are pods not managed by a ReplicationController, Job, or DaemonSet, and use a grace period of 15 minutes.
# As above, but abort if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet, and use a grace period of 15 minutes.
$ kubectl drain foo --grace-period=900
see_also:
- kubectl

View File

@ -107,15 +107,16 @@ without --ignore-daemonsets, and regardless it will not delete any
DaemonSet-managed pods, because those pods would be immediately replaced by the
DaemonSet controller, which ignores unschedulable markings. If there are any
pods that are neither mirror pods nor managed--by ReplicationController,
DaemonSet or Job--, then drain will not delete any pods unless you use --force.
ReplicaSet, DaemonSet or Job--, then drain will not delete any pods unless you
use --force.
When you are ready to put the node back into service, use kubectl uncordon, which
will make the node schedulable again.
`
drain_example = `# Drain node "foo", even if there are pods not managed by a ReplicationController, Job, or DaemonSet on it.
drain_example = `# Drain node "foo", even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet on it.
$ kubectl drain foo --force
# As above, but abort if there are pods not managed by a ReplicationController, Job, or DaemonSet, and use a grace period of 15 minutes.
# As above, but abort if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet, and use a grace period of 15 minutes.
$ kubectl drain foo --grace-period=900
`
)
@ -133,7 +134,7 @@ func NewCmdDrain(f *cmdutil.Factory, out io.Writer) *cobra.Command {
cmdutil.CheckErr(options.RunDrain())
},
}
cmd.Flags().BoolVar(&options.Force, "force", false, "Continue even if there are pods not managed by a ReplicationController, Job, or DaemonSet.")
cmd.Flags().BoolVar(&options.Force, "force", false, "Continue even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet.")
cmd.Flags().BoolVar(&options.IgnoreDaemonsets, "ignore-daemonsets", false, "Ignore DaemonSet-managed pods.")
cmd.Flags().IntVar(&options.GracePeriodSeconds, "grace-period", -1, "Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.")
return cmd
@ -252,6 +253,15 @@ func (o *DrainOptions) getPodsForDeletion() ([]api.Pod, error) {
if err == nil && job != nil {
replicated = true
}
} else if sr.Reference.Kind == "ReplicaSet" {
rs, err := o.client.ExtensionsClient.ReplicaSets(sr.Reference.Namespace).Get(sr.Reference.Name)
// Assume the only reason for an error is because the RS is
// gone/missing, not for any other cause. TODO(mml): something more
// sophisticated than this
if err == nil && rs != nil {
replicated = true
}
}
}
@ -294,7 +304,7 @@ func (o *DrainOptions) getPodsForDeletion() ([]api.Pod, error) {
func unmanagedMsg(unreplicatedNames []string, daemonSetNames []string, include_guidance bool) string {
msgs := []string{}
if len(unreplicatedNames) > 0 {
msg := fmt.Sprintf("pods not managed by ReplicationController, Job, or DaemonSet: %s", strings.Join(unreplicatedNames, ","))
msg := fmt.Sprintf("pods not managed by ReplicationController, ReplicaSet, Job, or DaemonSet: %s", strings.Join(unreplicatedNames, ","))
if include_guidance {
msg += " (use --force to override)"
}

View File

@ -221,7 +221,7 @@ func TestDrain(t *testing.T) {
rc_anno := make(map[string]string)
rc_anno[controller.CreatedByAnnotation] = refJson(t, &rc)
replicated_pod := api.Pod{
rc_pod := api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "bar",
Namespace: "default",
@ -284,6 +284,35 @@ func TestDrain(t *testing.T) {
},
}
rs := extensions.ReplicaSet{
ObjectMeta: api.ObjectMeta{
Name: "rs",
Namespace: "default",
CreationTimestamp: unversioned.Time{Time: time.Now()},
Labels: labels,
SelfLink: testapi.Default.SelfLink("replicasets", "rs"),
},
Spec: extensions.ReplicaSetSpec{
Selector: &unversioned.LabelSelector{MatchLabels: labels},
},
}
rs_anno := make(map[string]string)
rs_anno[controller.CreatedByAnnotation] = refJson(t, &rs)
rs_pod := api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "bar",
Namespace: "default",
CreationTimestamp: unversioned.Time{Time: time.Now()},
Labels: labels,
Annotations: rs_anno,
},
Spec: api.PodSpec{
NodeName: "node",
},
}
naked_pod := api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "bar",
@ -302,6 +331,7 @@ func TestDrain(t *testing.T) {
expected *api.Node
pods []api.Pod
rcs []api.ReplicationController
replicaSets []extensions.ReplicaSet
args []string
expectFatal bool
expectDelete bool
@ -310,7 +340,7 @@ func TestDrain(t *testing.T) {
description: "RC-managed pod",
node: node,
expected: cordoned_node,
pods: []api.Pod{replicated_pod},
pods: []api.Pod{rc_pod},
rcs: []api.ReplicationController{rc},
args: []string{"node"},
expectFatal: false,
@ -346,6 +376,16 @@ func TestDrain(t *testing.T) {
expectFatal: false,
expectDelete: true,
},
{
description: "RS-managed pod",
node: node,
expected: cordoned_node,
pods: []api.Pod{rs_pod},
replicaSets: []extensions.ReplicaSet{rs},
args: []string{"node"},
expectFatal: false,
expectDelete: true,
},
{
description: "naked pod",
node: node,
@ -396,6 +436,8 @@ func TestDrain(t *testing.T) {
return &http.Response{StatusCode: 200, Body: objBody(testapi.Extensions.Codec(), &ds)}, nil
case m.isFor("GET", "/namespaces/default/jobs/job"):
return &http.Response{StatusCode: 200, Body: objBody(testapi.Extensions.Codec(), &job)}, nil
case m.isFor("GET", "/namespaces/default/replicasets/rs"):
return &http.Response{StatusCode: 200, Body: objBody(testapi.Extensions.Codec(), &test.replicaSets[0])}, nil
case m.isFor("GET", "/pods"):
values, err := url.ParseQuery(req.URL.RawQuery)
if err != nil {