Merge pull request #56713 from juanvallejo/jvallejo/handle-ds-pod-drain-local-storage

Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Allow kubectl drain to continue w ds-managed pods with local storage

**Release note**:
```release-note
NONE
```

Prevents oadm drain from failing if it encounters DaemonSet-managed pods
that have local storage, when the option to ignore DaemonSet-managed
pods has been specified.

Will add a test

cc @kubernetes/sig-cli-misc @deads2k @fabianofranz @dustymabe
This commit is contained in:
Kubernetes Submit Queue
2018-01-12 23:05:46 -08:00
committed by GitHub
2 changed files with 74 additions and 11 deletions

View File

@@ -446,7 +446,7 @@ func (o *DrainOptions) getPodsForDeletion(nodeInfo *resource.Info) (pods []corev
for _, pod := range podList.Items {
podOk := true
for _, filt := range []podFilter{mirrorPodFilter, o.localStorageFilter, o.unreplicatedFilter, o.daemonsetFilter} {
for _, filt := range []podFilter{o.daemonsetFilter, mirrorPodFilter, o.localStorageFilter, o.unreplicatedFilter} {
filterOk, w, f := filt(pod)
podOk = podOk && filterOk
@@ -456,6 +456,13 @@ func (o *DrainOptions) getPodsForDeletion(nodeInfo *resource.Info) (pods []corev
if f != nil {
fs[f.string] = append(fs[f.string], pod.Name)
}
// short-circuit as soon as pod not ok
// at that point, there is no reason to run pod
// through any additional filters
if !podOk {
break
}
}
if podOk {
pods = append(pods, pod)