diff --git a/test/conformance/testdata/conformance.txt b/test/conformance/testdata/conformance.txt index 31f2c290513..a91cea67962 100644 --- a/test/conformance/testdata/conformance.txt +++ b/test/conformance/testdata/conformance.txt @@ -267,7 +267,6 @@ test/e2e/node/pre_stop.go: "should call prestop when killing a pod" test/e2e/scheduling/predicates.go: "validates resource limits of pods that are allowed to run" test/e2e/scheduling/predicates.go: "validates that NodeSelector is respected if not matching" test/e2e/scheduling/predicates.go: "validates that NodeSelector is respected if matching" -test/e2e/scheduling/predicates.go: "validates that taints-tolerations is respected if matching" test/e2e/scheduling/predicates.go: "validates that there is no conflict between pods with same hostPort but different hostIP and protocol" test/e2e/scheduling/predicates.go: "validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP" test/e2e/scheduling/taints.go: "removing taint cancels eviction" diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index 34e5f9500b4..f1991147eaf 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -454,14 +454,11 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { framework.ExpectEqual(labelPod.Spec.NodeName, nodeName) }) - /* - Release : v1.18 - Testname: Scheduler, taints-tolerations matching - Description: Find an available node and taint it with a key with effect NoSchedule. Schedule a pod with a - corresponding nodeLabel and toleration spec such that it should only be able to run on the selected node. - Ensure that the pod is scheduled and running on the node. - */ - framework.ConformanceIt("validates that taints-tolerations is respected if matching [Disruptive]", func() { + // 1. Run a pod to get an available node, then delete the pod + // 2. Taint the node with a random taint + // 3. Try to relaunch the pod with tolerations tolerate the taints on node, + // and the pod's nodeName specified to the name of node found in step 1 + ginkgo.It("validates that taints-tolerations is respected if matching", func() { nodeName := getNodeThatCanRunPodWithoutToleration(f) ginkgo.By("Trying to apply a random taint on the found node.") @@ -489,7 +486,12 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { NodeSelector: map[string]string{labelKey: labelValue}, }) - framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(cs, ns, tolerationPodName)) + // check that pod got scheduled. We intentionally DO NOT check that the + // pod is running because this will create a race condition with the + // kubelet and the scheduler: the scheduler might have scheduled a pod + // already when the kubelet does not know about its new taint yet. The + // kubelet will then refuse to launch the pod. + framework.ExpectNoError(e2epod.WaitForPodNotPending(cs, ns, tolerationPodName)) deployedPod, err := cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(deployedPod.Spec.NodeName, nodeName)