From ec605eb3000e132c3213687332af148280727d10 Mon Sep 17 00:00:00 2001 From: Odin Ugedal Date: Sat, 7 Sep 2019 14:59:46 +0200 Subject: [PATCH] Fix pre pull of images in DiskPressure tests This waits for DiskPressure to no longer exist before pre pulling images after a DiskPressure test. Otherwise we risk to pull images, while kubelet evicts/removes them. --- test/e2e_node/eviction_test.go | 21 +++++++++++++++++++-- test/e2e_node/system_node_critical_test.go | 7 +++++++ 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index ac1de25779e..5f08d667cd0 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -520,19 +520,36 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe }) ginkgo.AfterEach(func() { - defer func() { + prePullImagesIfNeccecary := func() { if expectedNodeCondition == v1.NodeDiskPressure && framework.TestContext.PrepullImages { // The disk eviction test may cause the prepulled images to be evicted, // prepull those images again to ensure this test not affect following tests. PrePullAllImages() } - }() + } + // Run prePull using a defer to make sure it is executed even when the assertions below fails + defer prePullImagesIfNeccecary() + ginkgo.By("deleting pods") for _, spec := range testSpecs { ginkgo.By(fmt.Sprintf("deleting pod: %s", spec.pod.Name)) f.PodClient().DeleteSync(spec.pod.Name, &metav1.DeleteOptions{}, 10*time.Minute) } + + // In case a test fails before verifying that NodeCondition no longer exist on the node, + // we should wait for the NodeCondition to disappear + ginkgo.By(fmt.Sprintf("making sure NodeCondition %s no longer exist on the node", expectedNodeCondition)) + gomega.Eventually(func() error { + if expectedNodeCondition != noPressure && hasNodeCondition(f, expectedNodeCondition) { + return fmt.Errorf("Conditions havent returned to normal, node still has %s", expectedNodeCondition) + } + return nil + }, pressureDissapearTimeout, evictionPollInterval).Should(gomega.BeNil()) + reduceAllocatableMemoryUsage() + ginkgo.By("making sure we have all the required images for testing") + prePullImagesIfNeccecary() + ginkgo.By("making sure we can start a new pod after the test") podName := "test-admit-pod" f.PodClient().CreateSync(&v1.Pod{ diff --git a/test/e2e_node/system_node_critical_test.go b/test/e2e_node/system_node_critical_test.go index 0b2a5ef8a3b..5660be7f1c7 100644 --- a/test/e2e_node/system_node_critical_test.go +++ b/test/e2e_node/system_node_critical_test.go @@ -110,6 +110,13 @@ var _ = framework.KubeDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptiv return checkMirrorPodDisappear(f.ClientSet, mirrorPodName, ns) }, time.Minute, time.Second*2).Should(gomega.BeNil()) + ginkgo.By("making sure that node no longer has DiskPressure") + gomega.Eventually(func() error { + if hasNodeCondition(f, v1.NodeDiskPressure) { + return fmt.Errorf("Conditions havent returned to normal, node still has DiskPressure") + } + return nil + }, pressureDissapearTimeout, evictionPollInterval).Should(gomega.BeNil()) }) }) })