mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 02:41:25 +00:00
Merge pull request #20232 from mml/daemonset-warn
Auto commit by PR queue bot
This commit is contained in:
commit
6c5540baae
@ -1204,6 +1204,7 @@ _kubectl_drain()
|
|||||||
|
|
||||||
flags+=("--force")
|
flags+=("--force")
|
||||||
flags+=("--grace-period=")
|
flags+=("--grace-period=")
|
||||||
|
flags+=("--ignore-daemonsets")
|
||||||
flags+=("--alsologtostderr")
|
flags+=("--alsologtostderr")
|
||||||
flags+=("--api-version=")
|
flags+=("--api-version=")
|
||||||
flags+=("--certificate-authority=")
|
flags+=("--certificate-authority=")
|
||||||
|
@ -18,9 +18,12 @@ Drain node in preparation for maintenance.
|
|||||||
.PP
|
.PP
|
||||||
The given node will be marked unschedulable to prevent new pods from arriving.
|
The given node will be marked unschedulable to prevent new pods from arriving.
|
||||||
Then drain deletes all pods except mirror pods (which cannot be deleted through
|
Then drain deletes all pods except mirror pods (which cannot be deleted through
|
||||||
the API server). If there are any pods that are neither mirror pods nor
|
the API server). If there are DaemonSet\-managed pods, drain will not proceed
|
||||||
managed by a ReplicationController, Job, or DaemonSet, then drain will not
|
without \-\-ignore\-daemonsets, and regardless it will not delete any
|
||||||
delete any pods unless you use \-\-force.
|
DaemonSet\-managed pods, because those pods would be immediately replaced by the
|
||||||
|
DaemonSet controller, which ignores unschedulable marknigs. If there are any
|
||||||
|
pods that are neither mirror pods nor managed\-\-by ReplicationController,
|
||||||
|
DaemonSet or Job\-\-, then drain will not delete any pods unless you use \-\-force.
|
||||||
|
|
||||||
.PP
|
.PP
|
||||||
When you are ready to put the node back into service, use kubectl uncordon, which
|
When you are ready to put the node back into service, use kubectl uncordon, which
|
||||||
@ -36,6 +39,10 @@ will make the node schedulable again.
|
|||||||
\fB\-\-grace\-period\fP=\-1
|
\fB\-\-grace\-period\fP=\-1
|
||||||
Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.
|
Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.
|
||||||
|
|
||||||
|
.PP
|
||||||
|
\fB\-\-ignore\-daemonsets\fP=false
|
||||||
|
Ignore DaemonSet\-managed pods.
|
||||||
|
|
||||||
|
|
||||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||||
.PP
|
.PP
|
||||||
|
@ -38,9 +38,12 @@ Drain node in preparation for maintenance.
|
|||||||
|
|
||||||
The given node will be marked unschedulable to prevent new pods from arriving.
|
The given node will be marked unschedulable to prevent new pods from arriving.
|
||||||
Then drain deletes all pods except mirror pods (which cannot be deleted through
|
Then drain deletes all pods except mirror pods (which cannot be deleted through
|
||||||
the API server). If there are any pods that are neither mirror pods nor
|
the API server). If there are DaemonSet-managed pods, drain will not proceed
|
||||||
managed by a ReplicationController, Job, or DaemonSet, then drain will not
|
without --ignore-daemonsets, and regardless it will not delete any
|
||||||
delete any pods unless you use --force.
|
DaemonSet-managed pods, because those pods would be immediately replaced by the
|
||||||
|
DaemonSet controller, which ignores unschedulable marknigs. If there are any
|
||||||
|
pods that are neither mirror pods nor managed--by ReplicationController,
|
||||||
|
DaemonSet or Job--, then drain will not delete any pods unless you use --force.
|
||||||
|
|
||||||
When you are ready to put the node back into service, use kubectl uncordon, which
|
When you are ready to put the node back into service, use kubectl uncordon, which
|
||||||
will make the node schedulable again.
|
will make the node schedulable again.
|
||||||
@ -66,6 +69,7 @@ $ kubectl drain foo --grace-period=900
|
|||||||
```
|
```
|
||||||
--force[=false]: Continue even if there are pods not managed by a ReplicationController, Job, or DaemonSet.
|
--force[=false]: Continue even if there are pods not managed by a ReplicationController, Job, or DaemonSet.
|
||||||
--grace-period=-1: Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.
|
--grace-period=-1: Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.
|
||||||
|
--ignore-daemonsets[=false]: Ignore DaemonSet-managed pods.
|
||||||
```
|
```
|
||||||
|
|
||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
@ -100,7 +104,7 @@ $ kubectl drain foo --grace-period=900
|
|||||||
|
|
||||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||||
|
|
||||||
###### Auto generated by spf13/cobra on 28-Jan-2016
|
###### Auto generated by spf13/cobra on 2-Feb-2016
|
||||||
|
|
||||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||||
[]()
|
[]()
|
||||||
|
@ -140,6 +140,7 @@ host-pid-sources
|
|||||||
hostname-override
|
hostname-override
|
||||||
http-check-frequency
|
http-check-frequency
|
||||||
http-port
|
http-port
|
||||||
|
ignore-daemonsets
|
||||||
ignore-not-found
|
ignore-not-found
|
||||||
image-gc-high-threshold
|
image-gc-high-threshold
|
||||||
image-gc-low-threshold
|
image-gc-low-threshold
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"reflect"
|
"reflect"
|
||||||
@ -41,6 +42,7 @@ type DrainOptions struct {
|
|||||||
factory *cmdutil.Factory
|
factory *cmdutil.Factory
|
||||||
Force bool
|
Force bool
|
||||||
GracePeriodSeconds int
|
GracePeriodSeconds int
|
||||||
|
IgnoreDaemonsets bool
|
||||||
mapper meta.RESTMapper
|
mapper meta.RESTMapper
|
||||||
nodeInfo *resource.Info
|
nodeInfo *resource.Info
|
||||||
out io.Writer
|
out io.Writer
|
||||||
@ -98,9 +100,12 @@ const (
|
|||||||
|
|
||||||
The given node will be marked unschedulable to prevent new pods from arriving.
|
The given node will be marked unschedulable to prevent new pods from arriving.
|
||||||
Then drain deletes all pods except mirror pods (which cannot be deleted through
|
Then drain deletes all pods except mirror pods (which cannot be deleted through
|
||||||
the API server). If there are any pods that are neither mirror pods nor
|
the API server). If there are DaemonSet-managed pods, drain will not proceed
|
||||||
managed by a ReplicationController, Job, or DaemonSet, then drain will not
|
without --ignore-daemonsets, and regardless it will not delete any
|
||||||
delete any pods unless you use --force.
|
DaemonSet-managed pods, because those pods would be immediately replaced by the
|
||||||
|
DaemonSet controller, which ignores unschedulable marknigs. If there are any
|
||||||
|
pods that are neither mirror pods nor managed--by ReplicationController,
|
||||||
|
DaemonSet or Job--, then drain will not delete any pods unless you use --force.
|
||||||
|
|
||||||
When you are ready to put the node back into service, use kubectl uncordon, which
|
When you are ready to put the node back into service, use kubectl uncordon, which
|
||||||
will make the node schedulable again.
|
will make the node schedulable again.
|
||||||
@ -127,6 +132,7 @@ func NewCmdDrain(f *cmdutil.Factory, out io.Writer) *cobra.Command {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
cmd.Flags().BoolVar(&options.Force, "force", false, "Continue even if there are pods not managed by a ReplicationController, Job, or DaemonSet.")
|
cmd.Flags().BoolVar(&options.Force, "force", false, "Continue even if there are pods not managed by a ReplicationController, Job, or DaemonSet.")
|
||||||
|
cmd.Flags().BoolVar(&options.IgnoreDaemonsets, "ignore-daemonsets", false, "Ignore DaemonSet-managed pods.")
|
||||||
cmd.Flags().IntVar(&options.GracePeriodSeconds, "grace-period", -1, "Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.")
|
cmd.Flags().IntVar(&options.GracePeriodSeconds, "grace-period", -1, "Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.")
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
@ -196,6 +202,7 @@ func (o *DrainOptions) getPodsForDeletion() ([]api.Pod, error) {
|
|||||||
return pods, err
|
return pods, err
|
||||||
}
|
}
|
||||||
unreplicatedPodNames := []string{}
|
unreplicatedPodNames := []string{}
|
||||||
|
daemonSetPodNames := []string{}
|
||||||
|
|
||||||
for _, pod := range podList.Items {
|
for _, pod := range podList.Items {
|
||||||
_, found := pod.ObjectMeta.Annotations[types.ConfigMirrorAnnotationKey]
|
_, found := pod.ObjectMeta.Annotations[types.ConfigMirrorAnnotationKey]
|
||||||
@ -204,6 +211,7 @@ func (o *DrainOptions) getPodsForDeletion() ([]api.Pod, error) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
replicated := false
|
replicated := false
|
||||||
|
daemonset_pod := false
|
||||||
|
|
||||||
creatorRef, found := pod.ObjectMeta.Annotations[controller.CreatedByAnnotation]
|
creatorRef, found := pod.ObjectMeta.Annotations[controller.CreatedByAnnotation]
|
||||||
if found {
|
if found {
|
||||||
@ -227,7 +235,11 @@ func (o *DrainOptions) getPodsForDeletion() ([]api.Pod, error) {
|
|||||||
// gone/missing, not for any other cause. TODO(mml): something more
|
// gone/missing, not for any other cause. TODO(mml): something more
|
||||||
// sophisticated than this
|
// sophisticated than this
|
||||||
if err == nil && ds != nil {
|
if err == nil && ds != nil {
|
||||||
replicated = true
|
// Otherwise, treat daemonset-managed pods as unmanaged since
|
||||||
|
// DaemonSet Controller currently ignores the unschedulable bit.
|
||||||
|
// FIXME(mml): Add link to the issue concerning a proper way to drain
|
||||||
|
// daemonset pods, probably using taints.
|
||||||
|
daemonset_pod = true
|
||||||
}
|
}
|
||||||
} else if sr.Reference.Kind == "Job" {
|
} else if sr.Reference.Kind == "Job" {
|
||||||
job, err := o.client.Jobs(sr.Reference.Namespace).Get(sr.Reference.Name)
|
job, err := o.client.Jobs(sr.Reference.Namespace).Get(sr.Reference.Name)
|
||||||
@ -240,24 +252,63 @@ func (o *DrainOptions) getPodsForDeletion() ([]api.Pod, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if replicated || o.Force {
|
|
||||||
pods = append(pods, pod)
|
switch {
|
||||||
}
|
case daemonset_pod:
|
||||||
if !replicated {
|
daemonSetPodNames = append(daemonSetPodNames, pod.Name)
|
||||||
|
case !replicated:
|
||||||
unreplicatedPodNames = append(unreplicatedPodNames, pod.Name)
|
unreplicatedPodNames = append(unreplicatedPodNames, pod.Name)
|
||||||
|
if o.Force {
|
||||||
|
pods = append(pods, pod)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
pods = append(pods, pod)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(unreplicatedPodNames) > 0 {
|
daemonSetErrors := !o.IgnoreDaemonsets && len(daemonSetPodNames) > 0
|
||||||
joined := strings.Join(unreplicatedPodNames, ", ")
|
unreplicatedErrors := !o.Force && len(unreplicatedPodNames) > 0
|
||||||
if !o.Force {
|
|
||||||
return pods, fmt.Errorf("refusing to continue due to pods managed by neither a ReplicationController, nor a Job, nor a DaemonSet: %s (use --force to override)", joined)
|
switch {
|
||||||
}
|
case daemonSetErrors && unreplicatedErrors:
|
||||||
fmt.Fprintf(o.out, "WARNING: About to delete these pods managed by neither a ReplicationController, nor a Job, nor a DaemonSet: %s\n", joined)
|
return []api.Pod{}, errors.New(unmanagedMsg(unreplicatedPodNames, daemonSetPodNames, true))
|
||||||
|
case daemonSetErrors && !unreplicatedErrors:
|
||||||
|
return []api.Pod{}, errors.New(unmanagedMsg([]string{}, daemonSetPodNames, true))
|
||||||
|
case unreplicatedErrors && !daemonSetErrors:
|
||||||
|
return []api.Pod{}, errors.New(unmanagedMsg(unreplicatedPodNames, []string{}, true))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(unreplicatedPodNames) > 0 {
|
||||||
|
fmt.Fprintf(o.out, "WARNING: About to delete these %s\n", unmanagedMsg(unreplicatedPodNames, []string{}, false))
|
||||||
|
}
|
||||||
|
if len(daemonSetPodNames) > 0 {
|
||||||
|
fmt.Fprintf(o.out, "WARNING: Skipping %s\n", unmanagedMsg([]string{}, daemonSetPodNames, false))
|
||||||
|
}
|
||||||
|
|
||||||
return pods, nil
|
return pods, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Helper for generating errors or warnings about unmanaged pods.
|
||||||
|
func unmanagedMsg(unreplicatedNames []string, daemonSetNames []string, include_guidance bool) string {
|
||||||
|
msgs := []string{}
|
||||||
|
if len(unreplicatedNames) > 0 {
|
||||||
|
msg := fmt.Sprintf("pods not managed by ReplicationController, Job, or DaemonSet: %s", strings.Join(unreplicatedNames, ","))
|
||||||
|
if include_guidance {
|
||||||
|
msg += " (use --force to override)"
|
||||||
|
}
|
||||||
|
msgs = append(msgs, msg)
|
||||||
|
}
|
||||||
|
if len(daemonSetNames) > 0 {
|
||||||
|
msg := fmt.Sprintf("DaemonSet-managed pods: %s", strings.Join(daemonSetNames, ","))
|
||||||
|
if include_guidance {
|
||||||
|
msg += " (use --ignore-daemonsets to ignore)"
|
||||||
|
}
|
||||||
|
msgs = append(msgs, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(msgs, " and ")
|
||||||
|
}
|
||||||
|
|
||||||
// deletePods deletes the pods on the api server
|
// deletePods deletes the pods on the api server
|
||||||
func (o *DrainOptions) deletePods(pods []api.Pod) error {
|
func (o *DrainOptions) deletePods(pods []api.Pod) error {
|
||||||
deleteOptions := api.DeleteOptions{}
|
deleteOptions := api.DeleteOptions{}
|
||||||
|
@ -323,8 +323,18 @@ func TestDrain(t *testing.T) {
|
|||||||
pods: []api.Pod{ds_pod},
|
pods: []api.Pod{ds_pod},
|
||||||
rcs: []api.ReplicationController{rc},
|
rcs: []api.ReplicationController{rc},
|
||||||
args: []string{"node"},
|
args: []string{"node"},
|
||||||
|
expectFatal: true,
|
||||||
|
expectDelete: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "DS-managed pod with --ignore-daemonsets",
|
||||||
|
node: node,
|
||||||
|
expected: cordoned_node,
|
||||||
|
pods: []api.Pod{ds_pod},
|
||||||
|
rcs: []api.ReplicationController{rc},
|
||||||
|
args: []string{"node", "--ignore-daemonsets"},
|
||||||
expectFatal: false,
|
expectFatal: false,
|
||||||
expectDelete: true,
|
expectDelete: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "Job-managed pod",
|
description: "Job-managed pod",
|
||||||
|
Loading…
Reference in New Issue
Block a user