Merge pull request #80654 from spiffxp/promote-taint-e2e-tests

Promote taint-based eviction e2e tests to Conformance
This commit is contained in:
Kubernetes Prow Robot 2019-08-06 21:05:43 -07:00 committed by GitHub
commit 64a67184ce
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 26 additions and 12 deletions

View File

@ -218,6 +218,8 @@ test/e2e/scheduling/predicates.go: "validates that NodeSelector is respected if
test/e2e/scheduling/predicates.go: "validates that NodeSelector is respected if matching" test/e2e/scheduling/predicates.go: "validates that NodeSelector is respected if matching"
test/e2e/scheduling/predicates.go: "validates that there is no conflict between pods with same hostPort but different hostIP and protocol" test/e2e/scheduling/predicates.go: "validates that there is no conflict between pods with same hostPort but different hostIP and protocol"
test/e2e/scheduling/predicates.go: "validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP" test/e2e/scheduling/predicates.go: "validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP"
test/e2e/scheduling/taints.go: "removing taint cancels eviction"
test/e2e/scheduling/taints.go: "evicts pods with minTolerationSeconds"
test/e2e/storage/empty_dir_wrapper.go: "should not conflict" test/e2e/storage/empty_dir_wrapper.go: "should not conflict"
test/e2e/storage/empty_dir_wrapper.go: "should not cause race condition when used for configmaps" test/e2e/storage/empty_dir_wrapper.go: "should not cause race condition when used for configmaps"
test/e2e/storage/subpath.go: "should support subpaths with secret pod" test/e2e/storage/subpath.go: "should support subpaths with secret pod"

View File

@ -278,23 +278,26 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
} }
}) })
// 1. Run a pod with short toleration /*
// 2. Taint the node running this pod with a no-execute taint Release : v1.16
// 3. Wait some time Testname: Taint, Pod Eviction on taint removal
// 4. Remove the taint Description: The Pod with toleration timeout scheduled on a tainted Node MUST not be
// 5. See if Pod won't be evicted. evicted if the taint is removed before toleration time ends.
ginkgo.It("removing taint cancels eviction", func() { */
framework.ConformanceIt("removing taint cancels eviction", func() {
podName := "taint-eviction-4" podName := "taint-eviction-4"
pod := createPodForTaintsTest(true, 2*additionalWaitPerDeleteSeconds, podName, podName, ns) pod := createPodForTaintsTest(true, 2*additionalWaitPerDeleteSeconds, podName, podName, ns)
observedDeletions := make(chan string, 100) observedDeletions := make(chan string, 100)
stopCh := make(chan struct{}) stopCh := make(chan struct{})
createTestController(cs, observedDeletions, stopCh, podName, ns) createTestController(cs, observedDeletions, stopCh, podName, ns)
// 1. Run a pod with short toleration
ginkgo.By("Starting pod...") ginkgo.By("Starting pod...")
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute) nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Pod is running on %v. Tainting Node", nodeName) e2elog.Logf("Pod is running on %v. Tainting Node", nodeName)
// 2. Taint the node running this pod with a no-execute taint
ginkgo.By("Trying to apply a taint on the Node") ginkgo.By("Trying to apply a taint on the Node")
testTaint := getTestTaint() testTaint := getTestTaint()
framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
@ -306,7 +309,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
} }
}() }()
// Wait a bit // 3. Wait some time
ginkgo.By("Waiting short time to make sure Pod is queued for deletion") ginkgo.By("Waiting short time to make sure Pod is queued for deletion")
timeoutChannel := time.NewTimer(additionalWaitPerDeleteSeconds).C timeoutChannel := time.NewTimer(additionalWaitPerDeleteSeconds).C
select { select {
@ -316,9 +319,13 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
e2elog.Failf("Pod was evicted despite toleration") e2elog.Failf("Pod was evicted despite toleration")
return return
} }
// 4. Remove the taint
e2elog.Logf("Removing taint from Node") e2elog.Logf("Removing taint from Node")
framework.RemoveTaintOffNode(cs, nodeName, testTaint) framework.RemoveTaintOffNode(cs, nodeName, testTaint)
taintRemoved = true taintRemoved = true
// 5. See if Pod won't be evicted.
ginkgo.By("Waiting some time to make sure that toleration time passed.") ginkgo.By("Waiting some time to make sure that toleration time passed.")
timeoutChannel = time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+3*additionalWaitPerDeleteSeconds) * time.Second).C timeoutChannel = time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+3*additionalWaitPerDeleteSeconds) * time.Second).C
select { select {
@ -401,15 +408,19 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
} }
}) })
// 1. Run two pods both with toleration; one with tolerationSeconds=5, the other with 25 /*
// 2. Taint the nodes running those pods with a no-execute taint Release : v1.16
// 3. See if both pods get evicted in between [5, 25] seconds Testname: Pod Eviction, Toleration limits
ginkgo.It("evicts pods with minTolerationSeconds", func() { Description: In a multi-pods scenario with tolerationSeconds, the pods MUST be evicted as per
the toleration time limit.
*/
framework.ConformanceIt("evicts pods with minTolerationSeconds", func() {
podGroup := "taint-eviction-b" podGroup := "taint-eviction-b"
observedDeletions := make(chan string, 100) observedDeletions := make(chan string, 100)
stopCh := make(chan struct{}) stopCh := make(chan struct{})
createTestController(cs, observedDeletions, stopCh, podGroup, ns) createTestController(cs, observedDeletions, stopCh, podGroup, ns)
// 1. Run two pods both with toleration; one with tolerationSeconds=5, the other with 25
pod1 := createPodForTaintsTest(true, additionalWaitPerDeleteSeconds, podGroup+"1", podGroup, ns) pod1 := createPodForTaintsTest(true, additionalWaitPerDeleteSeconds, podGroup+"1", podGroup, ns)
pod2 := createPodForTaintsTest(true, 5*additionalWaitPerDeleteSeconds, podGroup+"2", podGroup, ns) pod2 := createPodForTaintsTest(true, 5*additionalWaitPerDeleteSeconds, podGroup+"2", podGroup, ns)
@ -432,13 +443,14 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod2)) framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod2))
e2elog.Logf("Pod2 is running on %v. Tainting Node", nodeName) e2elog.Logf("Pod2 is running on %v. Tainting Node", nodeName)
// 2. Taint the nodes running those pods with a no-execute taint
ginkgo.By("Trying to apply a taint on the Node") ginkgo.By("Trying to apply a taint on the Node")
testTaint := getTestTaint() testTaint := getTestTaint()
framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, &testTaint) framework.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer framework.RemoveTaintOffNode(cs, nodeName, testTaint) defer framework.RemoveTaintOffNode(cs, nodeName, testTaint)
// Wait a bit // 3. Wait to see if both pods get evicted in between [5, 25] seconds
ginkgo.By("Waiting for Pod1 and Pod2 to be deleted") ginkgo.By("Waiting for Pod1 and Pod2 to be deleted")
timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+3*additionalWaitPerDeleteSeconds) * time.Second).C timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+3*additionalWaitPerDeleteSeconds) * time.Second).C
var evicted int var evicted int