Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

ensure ScheduleDSPods respects tainted nodes

- add PodToleratesNodeTaints to ~~nodeSelectionPredicates()~~ checkNodeFitness()
- add integration testcase

Fixes #66348, and would keep the behavior consistent with it was when ScheduleDSPods is disabled.

```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue 2018-08-09 21:35:55 -07:00 committed by GitHub
commit ecc64f2e6e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 78 additions and 3 deletions

View File

@ -1423,11 +1423,12 @@ func NewPod(ds *apps.DaemonSet, nodeName string) *v1.Pod {
return newPod return newPod
} }
// nodeSelectionPredicates runs a set of predicates that select candidate nodes for the DaemonSet; // checkNodeFitness runs a set of predicates that select candidate nodes for the DaemonSet;
// the predicates include: // the predicates include:
// - PodFitsHost: checks pod's NodeName against node // - PodFitsHost: checks pod's NodeName against node
// - PodMatchNodeSelector: checks pod's NodeSelector and NodeAffinity against node // - PodMatchNodeSelector: checks pod's NodeSelector and NodeAffinity against node
func nodeSelectionPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { // - PodToleratesNodeTaints: exclude tainted node unless pod has specific toleration
func checkNodeFitness(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
var predicateFails []algorithm.PredicateFailureReason var predicateFails []algorithm.PredicateFailureReason
fit, reasons, err := predicates.PodFitsHost(pod, meta, nodeInfo) fit, reasons, err := predicates.PodFitsHost(pod, meta, nodeInfo)
if err != nil { if err != nil {
@ -1444,6 +1445,14 @@ func nodeSelectionPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, node
if !fit { if !fit {
predicateFails = append(predicateFails, reasons...) predicateFails = append(predicateFails, reasons...)
} }
fit, reasons, err = predicates.PodToleratesNodeTaints(pod, nil, nodeInfo)
if err != nil {
return false, predicateFails, err
}
if !fit {
predicateFails = append(predicateFails, reasons...)
}
return len(predicateFails) == 0, predicateFails, nil return len(predicateFails) == 0, predicateFails, nil
} }
@ -1454,7 +1463,7 @@ func Predicates(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo) (bool, []algorit
// If ScheduleDaemonSetPods is enabled, only check nodeSelector and nodeAffinity. // If ScheduleDaemonSetPods is enabled, only check nodeSelector and nodeAffinity.
if utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) { if utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) {
fit, reasons, err := nodeSelectionPredicates(pod, nil, nodeInfo) fit, reasons, err := checkNodeFitness(pod, nil, nodeInfo)
if err != nil { if err != nil {
return false, predicateFails, err return false, predicateFails, err
} }

View File

@ -897,3 +897,69 @@ func TestLaunchWithHashCollision(t *testing.T) {
validateDaemonSetCollisionCount(dsClient, ds.Name, orgCollisionCount+1, t) validateDaemonSetCollisionCount(dsClient, ds.Name, orgCollisionCount+1, t)
} }
// TestTaintedNode tests that no matter "ScheduleDaemonSetPods" feature is enabled or not
// tainted node isn't expected to have pod scheduled
func TestTaintedNode(t *testing.T) {
forEachFeatureGate(t, func(t *testing.T) {
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
server, closeFn, dc, informers, clientset := setup(t)
defer closeFn()
ns := framework.CreateTestingNamespace("tainted-node", server, t)
defer framework.DeleteTestingNamespace(ns, server, t)
dsClient := clientset.AppsV1().DaemonSets(ns.Name)
podClient := clientset.CoreV1().Pods(ns.Name)
podInformer := informers.Core().V1().Pods().Informer()
nodeClient := clientset.CoreV1().Nodes()
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
go dc.Run(5, stopCh)
// Start Scheduler
setupScheduler(t, clientset, informers, stopCh)
ds := newDaemonSet("foo", ns.Name)
ds.Spec.UpdateStrategy = *strategy
ds, err := dsClient.Create(ds)
if err != nil {
t.Fatalf("Failed to create DaemonSet: %v", err)
}
defer cleanupDaemonSets(t, clientset, ds)
nodeWithTaint := newNode("node-with-taint", nil)
nodeWithTaint.Spec.Taints = []v1.Taint{{Key: "key1", Value: "val1", Effect: "NoSchedule"}}
_, err = nodeClient.Create(nodeWithTaint)
if err != nil {
t.Fatalf("Failed to create nodeWithTaint: %v", err)
}
nodeWithoutTaint := newNode("node-without-taint", nil)
_, err = nodeClient.Create(nodeWithoutTaint)
if err != nil {
t.Fatalf("Failed to create nodeWithoutTaint: %v", err)
}
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 1, t)
validateDaemonSetStatus(dsClient, ds.Name, 1, t)
// remove taint from nodeWithTaint
nodeWithTaint, err = nodeClient.Get("node-with-taint", metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to retrieve nodeWithTaint: %v", err)
}
nodeWithTaintCopy := nodeWithTaint.DeepCopy()
nodeWithTaintCopy.Spec.Taints = []v1.Taint{}
_, err = nodeClient.Update(nodeWithTaintCopy)
if err != nil {
t.Fatalf("Failed to update nodeWithTaint: %v", err)
}
validateDaemonSetPodsAndMarkReady(podClient, podInformer, 2, t)
validateDaemonSetStatus(dsClient, ds.Name, 2, t)
})
})
}