mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-26 05:03:09 +00:00
Drain pods created from ReplicaSets
This commit is contained in:
parent
f4473af950
commit
fdf409861a
@ -23,7 +23,8 @@ without \-\-ignore\-daemonsets, and regardless it will not delete any
|
|||||||
DaemonSet\-managed pods, because those pods would be immediately replaced by the
|
DaemonSet\-managed pods, because those pods would be immediately replaced by the
|
||||||
DaemonSet controller, which ignores unschedulable markings. If there are any
|
DaemonSet controller, which ignores unschedulable markings. If there are any
|
||||||
pods that are neither mirror pods nor managed\-\-by ReplicationController,
|
pods that are neither mirror pods nor managed\-\-by ReplicationController,
|
||||||
DaemonSet or Job\-\-, then drain will not delete any pods unless you use \-\-force.
|
ReplicaSet, DaemonSet or Job\-\-, then drain will not delete any pods unless you
|
||||||
|
use \-\-force.
|
||||||
|
|
||||||
.PP
|
.PP
|
||||||
When you are ready to put the node back into service, use kubectl uncordon, which
|
When you are ready to put the node back into service, use kubectl uncordon, which
|
||||||
@ -33,7 +34,7 @@ will make the node schedulable again.
|
|||||||
.SH OPTIONS
|
.SH OPTIONS
|
||||||
.PP
|
.PP
|
||||||
\fB\-\-force\fP=false
|
\fB\-\-force\fP=false
|
||||||
Continue even if there are pods not managed by a ReplicationController, Job, or DaemonSet.
|
Continue even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet.
|
||||||
|
|
||||||
.PP
|
.PP
|
||||||
\fB\-\-grace\-period\fP=\-1
|
\fB\-\-grace\-period\fP=\-1
|
||||||
@ -147,10 +148,10 @@ will make the node schedulable again.
|
|||||||
.RS
|
.RS
|
||||||
|
|
||||||
.nf
|
.nf
|
||||||
# Drain node "foo", even if there are pods not managed by a ReplicationController, Job, or DaemonSet on it.
|
# Drain node "foo", even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet on it.
|
||||||
$ kubectl drain foo \-\-force
|
$ kubectl drain foo \-\-force
|
||||||
|
|
||||||
# As above, but abort if there are pods not managed by a ReplicationController, Job, or DaemonSet, and use a grace period of 15 minutes.
|
# As above, but abort if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet, and use a grace period of 15 minutes.
|
||||||
$ kubectl drain foo \-\-grace\-period=900
|
$ kubectl drain foo \-\-grace\-period=900
|
||||||
|
|
||||||
|
|
||||||
|
@ -48,7 +48,8 @@ without --ignore-daemonsets, and regardless it will not delete any
|
|||||||
DaemonSet-managed pods, because those pods would be immediately replaced by the
|
DaemonSet-managed pods, because those pods would be immediately replaced by the
|
||||||
DaemonSet controller, which ignores unschedulable markings. If there are any
|
DaemonSet controller, which ignores unschedulable markings. If there are any
|
||||||
pods that are neither mirror pods nor managed--by ReplicationController,
|
pods that are neither mirror pods nor managed--by ReplicationController,
|
||||||
DaemonSet or Job--, then drain will not delete any pods unless you use --force.
|
ReplicaSet, DaemonSet or Job--, then drain will not delete any pods unless you
|
||||||
|
use --force.
|
||||||
|
|
||||||
When you are ready to put the node back into service, use kubectl uncordon, which
|
When you are ready to put the node back into service, use kubectl uncordon, which
|
||||||
will make the node schedulable again.
|
will make the node schedulable again.
|
||||||
@ -61,10 +62,10 @@ kubectl drain NODE
|
|||||||
### Examples
|
### Examples
|
||||||
|
|
||||||
```
|
```
|
||||||
# Drain node "foo", even if there are pods not managed by a ReplicationController, Job, or DaemonSet on it.
|
# Drain node "foo", even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet on it.
|
||||||
$ kubectl drain foo --force
|
$ kubectl drain foo --force
|
||||||
|
|
||||||
# As above, but abort if there are pods not managed by a ReplicationController, Job, or DaemonSet, and use a grace period of 15 minutes.
|
# As above, but abort if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet, and use a grace period of 15 minutes.
|
||||||
$ kubectl drain foo --grace-period=900
|
$ kubectl drain foo --grace-period=900
|
||||||
|
|
||||||
```
|
```
|
||||||
@ -72,7 +73,7 @@ $ kubectl drain foo --grace-period=900
|
|||||||
### Options
|
### Options
|
||||||
|
|
||||||
```
|
```
|
||||||
--force[=false]: Continue even if there are pods not managed by a ReplicationController, Job, or DaemonSet.
|
--force[=false]: Continue even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet.
|
||||||
--grace-period=-1: Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.
|
--grace-period=-1: Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.
|
||||||
--ignore-daemonsets[=false]: Ignore DaemonSet-managed pods.
|
--ignore-daemonsets[=false]: Ignore DaemonSet-managed pods.
|
||||||
```
|
```
|
||||||
@ -109,7 +110,7 @@ $ kubectl drain foo --grace-period=900
|
|||||||
|
|
||||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||||
|
|
||||||
###### Auto generated by spf13/cobra on 5-Apr-2016
|
###### Auto generated by spf13/cobra on 15-Apr-2016
|
||||||
|
|
||||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||||
[]()
|
[]()
|
||||||
|
@ -10,7 +10,8 @@ description: |
|
|||||||
DaemonSet-managed pods, because those pods would be immediately replaced by the
|
DaemonSet-managed pods, because those pods would be immediately replaced by the
|
||||||
DaemonSet controller, which ignores unschedulable markings. If there are any
|
DaemonSet controller, which ignores unschedulable markings. If there are any
|
||||||
pods that are neither mirror pods nor managed--by ReplicationController,
|
pods that are neither mirror pods nor managed--by ReplicationController,
|
||||||
DaemonSet or Job--, then drain will not delete any pods unless you use --force.
|
ReplicaSet, DaemonSet or Job--, then drain will not delete any pods unless you
|
||||||
|
use --force.
|
||||||
|
|
||||||
When you are ready to put the node back into service, use kubectl uncordon, which
|
When you are ready to put the node back into service, use kubectl uncordon, which
|
||||||
will make the node schedulable again.
|
will make the node schedulable again.
|
||||||
@ -18,7 +19,7 @@ options:
|
|||||||
- name: force
|
- name: force
|
||||||
default_value: "false"
|
default_value: "false"
|
||||||
usage: |
|
usage: |
|
||||||
Continue even if there are pods not managed by a ReplicationController, Job, or DaemonSet.
|
Continue even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet.
|
||||||
- name: grace-period
|
- name: grace-period
|
||||||
default_value: "-1"
|
default_value: "-1"
|
||||||
usage: |
|
usage: |
|
||||||
@ -88,10 +89,10 @@ inherited_options:
|
|||||||
usage: |
|
usage: |
|
||||||
comma-separated list of pattern=N settings for file-filtered logging
|
comma-separated list of pattern=N settings for file-filtered logging
|
||||||
example: |
|
example: |
|
||||||
# Drain node "foo", even if there are pods not managed by a ReplicationController, Job, or DaemonSet on it.
|
# Drain node "foo", even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet on it.
|
||||||
$ kubectl drain foo --force
|
$ kubectl drain foo --force
|
||||||
|
|
||||||
# As above, but abort if there are pods not managed by a ReplicationController, Job, or DaemonSet, and use a grace period of 15 minutes.
|
# As above, but abort if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet, and use a grace period of 15 minutes.
|
||||||
$ kubectl drain foo --grace-period=900
|
$ kubectl drain foo --grace-period=900
|
||||||
see_also:
|
see_also:
|
||||||
- kubectl
|
- kubectl
|
||||||
|
@ -107,15 +107,16 @@ without --ignore-daemonsets, and regardless it will not delete any
|
|||||||
DaemonSet-managed pods, because those pods would be immediately replaced by the
|
DaemonSet-managed pods, because those pods would be immediately replaced by the
|
||||||
DaemonSet controller, which ignores unschedulable markings. If there are any
|
DaemonSet controller, which ignores unschedulable markings. If there are any
|
||||||
pods that are neither mirror pods nor managed--by ReplicationController,
|
pods that are neither mirror pods nor managed--by ReplicationController,
|
||||||
DaemonSet or Job--, then drain will not delete any pods unless you use --force.
|
ReplicaSet, DaemonSet or Job--, then drain will not delete any pods unless you
|
||||||
|
use --force.
|
||||||
|
|
||||||
When you are ready to put the node back into service, use kubectl uncordon, which
|
When you are ready to put the node back into service, use kubectl uncordon, which
|
||||||
will make the node schedulable again.
|
will make the node schedulable again.
|
||||||
`
|
`
|
||||||
drain_example = `# Drain node "foo", even if there are pods not managed by a ReplicationController, Job, or DaemonSet on it.
|
drain_example = `# Drain node "foo", even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet on it.
|
||||||
$ kubectl drain foo --force
|
$ kubectl drain foo --force
|
||||||
|
|
||||||
# As above, but abort if there are pods not managed by a ReplicationController, Job, or DaemonSet, and use a grace period of 15 minutes.
|
# As above, but abort if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet, and use a grace period of 15 minutes.
|
||||||
$ kubectl drain foo --grace-period=900
|
$ kubectl drain foo --grace-period=900
|
||||||
`
|
`
|
||||||
)
|
)
|
||||||
@ -133,7 +134,7 @@ func NewCmdDrain(f *cmdutil.Factory, out io.Writer) *cobra.Command {
|
|||||||
cmdutil.CheckErr(options.RunDrain())
|
cmdutil.CheckErr(options.RunDrain())
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
cmd.Flags().BoolVar(&options.Force, "force", false, "Continue even if there are pods not managed by a ReplicationController, Job, or DaemonSet.")
|
cmd.Flags().BoolVar(&options.Force, "force", false, "Continue even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet.")
|
||||||
cmd.Flags().BoolVar(&options.IgnoreDaemonsets, "ignore-daemonsets", false, "Ignore DaemonSet-managed pods.")
|
cmd.Flags().BoolVar(&options.IgnoreDaemonsets, "ignore-daemonsets", false, "Ignore DaemonSet-managed pods.")
|
||||||
cmd.Flags().IntVar(&options.GracePeriodSeconds, "grace-period", -1, "Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.")
|
cmd.Flags().IntVar(&options.GracePeriodSeconds, "grace-period", -1, "Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.")
|
||||||
return cmd
|
return cmd
|
||||||
@ -252,6 +253,15 @@ func (o *DrainOptions) getPodsForDeletion() ([]api.Pod, error) {
|
|||||||
if err == nil && job != nil {
|
if err == nil && job != nil {
|
||||||
replicated = true
|
replicated = true
|
||||||
}
|
}
|
||||||
|
} else if sr.Reference.Kind == "ReplicaSet" {
|
||||||
|
rs, err := o.client.ExtensionsClient.ReplicaSets(sr.Reference.Namespace).Get(sr.Reference.Name)
|
||||||
|
|
||||||
|
// Assume the only reason for an error is because the RS is
|
||||||
|
// gone/missing, not for any other cause. TODO(mml): something more
|
||||||
|
// sophisticated than this
|
||||||
|
if err == nil && rs != nil {
|
||||||
|
replicated = true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -294,7 +304,7 @@ func (o *DrainOptions) getPodsForDeletion() ([]api.Pod, error) {
|
|||||||
func unmanagedMsg(unreplicatedNames []string, daemonSetNames []string, include_guidance bool) string {
|
func unmanagedMsg(unreplicatedNames []string, daemonSetNames []string, include_guidance bool) string {
|
||||||
msgs := []string{}
|
msgs := []string{}
|
||||||
if len(unreplicatedNames) > 0 {
|
if len(unreplicatedNames) > 0 {
|
||||||
msg := fmt.Sprintf("pods not managed by ReplicationController, Job, or DaemonSet: %s", strings.Join(unreplicatedNames, ","))
|
msg := fmt.Sprintf("pods not managed by ReplicationController, ReplicaSet, Job, or DaemonSet: %s", strings.Join(unreplicatedNames, ","))
|
||||||
if include_guidance {
|
if include_guidance {
|
||||||
msg += " (use --force to override)"
|
msg += " (use --force to override)"
|
||||||
}
|
}
|
||||||
|
@ -221,7 +221,7 @@ func TestDrain(t *testing.T) {
|
|||||||
rc_anno := make(map[string]string)
|
rc_anno := make(map[string]string)
|
||||||
rc_anno[controller.CreatedByAnnotation] = refJson(t, &rc)
|
rc_anno[controller.CreatedByAnnotation] = refJson(t, &rc)
|
||||||
|
|
||||||
replicated_pod := api.Pod{
|
rc_pod := api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: "bar",
|
Name: "bar",
|
||||||
Namespace: "default",
|
Namespace: "default",
|
||||||
@ -284,6 +284,35 @@ func TestDrain(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rs := extensions.ReplicaSet{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "rs",
|
||||||
|
Namespace: "default",
|
||||||
|
CreationTimestamp: unversioned.Time{Time: time.Now()},
|
||||||
|
Labels: labels,
|
||||||
|
SelfLink: testapi.Default.SelfLink("replicasets", "rs"),
|
||||||
|
},
|
||||||
|
Spec: extensions.ReplicaSetSpec{
|
||||||
|
Selector: &unversioned.LabelSelector{MatchLabels: labels},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
rs_anno := make(map[string]string)
|
||||||
|
rs_anno[controller.CreatedByAnnotation] = refJson(t, &rs)
|
||||||
|
|
||||||
|
rs_pod := api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "bar",
|
||||||
|
Namespace: "default",
|
||||||
|
CreationTimestamp: unversioned.Time{Time: time.Now()},
|
||||||
|
Labels: labels,
|
||||||
|
Annotations: rs_anno,
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
NodeName: "node",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
naked_pod := api.Pod{
|
naked_pod := api.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: "bar",
|
Name: "bar",
|
||||||
@ -302,6 +331,7 @@ func TestDrain(t *testing.T) {
|
|||||||
expected *api.Node
|
expected *api.Node
|
||||||
pods []api.Pod
|
pods []api.Pod
|
||||||
rcs []api.ReplicationController
|
rcs []api.ReplicationController
|
||||||
|
replicaSets []extensions.ReplicaSet
|
||||||
args []string
|
args []string
|
||||||
expectFatal bool
|
expectFatal bool
|
||||||
expectDelete bool
|
expectDelete bool
|
||||||
@ -310,7 +340,7 @@ func TestDrain(t *testing.T) {
|
|||||||
description: "RC-managed pod",
|
description: "RC-managed pod",
|
||||||
node: node,
|
node: node,
|
||||||
expected: cordoned_node,
|
expected: cordoned_node,
|
||||||
pods: []api.Pod{replicated_pod},
|
pods: []api.Pod{rc_pod},
|
||||||
rcs: []api.ReplicationController{rc},
|
rcs: []api.ReplicationController{rc},
|
||||||
args: []string{"node"},
|
args: []string{"node"},
|
||||||
expectFatal: false,
|
expectFatal: false,
|
||||||
@ -346,6 +376,16 @@ func TestDrain(t *testing.T) {
|
|||||||
expectFatal: false,
|
expectFatal: false,
|
||||||
expectDelete: true,
|
expectDelete: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
description: "RS-managed pod",
|
||||||
|
node: node,
|
||||||
|
expected: cordoned_node,
|
||||||
|
pods: []api.Pod{rs_pod},
|
||||||
|
replicaSets: []extensions.ReplicaSet{rs},
|
||||||
|
args: []string{"node"},
|
||||||
|
expectFatal: false,
|
||||||
|
expectDelete: true,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
description: "naked pod",
|
description: "naked pod",
|
||||||
node: node,
|
node: node,
|
||||||
@ -396,6 +436,8 @@ func TestDrain(t *testing.T) {
|
|||||||
return &http.Response{StatusCode: 200, Body: objBody(testapi.Extensions.Codec(), &ds)}, nil
|
return &http.Response{StatusCode: 200, Body: objBody(testapi.Extensions.Codec(), &ds)}, nil
|
||||||
case m.isFor("GET", "/namespaces/default/jobs/job"):
|
case m.isFor("GET", "/namespaces/default/jobs/job"):
|
||||||
return &http.Response{StatusCode: 200, Body: objBody(testapi.Extensions.Codec(), &job)}, nil
|
return &http.Response{StatusCode: 200, Body: objBody(testapi.Extensions.Codec(), &job)}, nil
|
||||||
|
case m.isFor("GET", "/namespaces/default/replicasets/rs"):
|
||||||
|
return &http.Response{StatusCode: 200, Body: objBody(testapi.Extensions.Codec(), &test.replicaSets[0])}, nil
|
||||||
case m.isFor("GET", "/pods"):
|
case m.isFor("GET", "/pods"):
|
||||||
values, err := url.ParseQuery(req.URL.RawQuery)
|
values, err := url.ParseQuery(req.URL.RawQuery)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user