Merge pull request #82654 from odinuge/test-timeout

Fix pre pull of images in DiskPressure tests
This commit is contained in:
Kubernetes Prow Robot 2019-09-23 12:27:25 -07:00 committed by GitHub
commit c9c01fb902
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 26 additions and 2 deletions

View File

@ -520,19 +520,36 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
defer func() { prePullImagesIfNeccecary := func() {
if expectedNodeCondition == v1.NodeDiskPressure && framework.TestContext.PrepullImages { if expectedNodeCondition == v1.NodeDiskPressure && framework.TestContext.PrepullImages {
// The disk eviction test may cause the prepulled images to be evicted, // The disk eviction test may cause the prepulled images to be evicted,
// prepull those images again to ensure this test not affect following tests. // prepull those images again to ensure this test not affect following tests.
PrePullAllImages() PrePullAllImages()
} }
}() }
// Run prePull using a defer to make sure it is executed even when the assertions below fails
defer prePullImagesIfNeccecary()
ginkgo.By("deleting pods") ginkgo.By("deleting pods")
for _, spec := range testSpecs { for _, spec := range testSpecs {
ginkgo.By(fmt.Sprintf("deleting pod: %s", spec.pod.Name)) ginkgo.By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
f.PodClient().DeleteSync(spec.pod.Name, &metav1.DeleteOptions{}, 10*time.Minute) f.PodClient().DeleteSync(spec.pod.Name, &metav1.DeleteOptions{}, 10*time.Minute)
} }
// In case a test fails before verifying that NodeCondition no longer exist on the node,
// we should wait for the NodeCondition to disappear
ginkgo.By(fmt.Sprintf("making sure NodeCondition %s no longer exist on the node", expectedNodeCondition))
gomega.Eventually(func() error {
if expectedNodeCondition != noPressure && hasNodeCondition(f, expectedNodeCondition) {
return fmt.Errorf("Conditions havent returned to normal, node still has %s", expectedNodeCondition)
}
return nil
}, pressureDissapearTimeout, evictionPollInterval).Should(gomega.BeNil())
reduceAllocatableMemoryUsage() reduceAllocatableMemoryUsage()
ginkgo.By("making sure we have all the required images for testing")
prePullImagesIfNeccecary()
ginkgo.By("making sure we can start a new pod after the test") ginkgo.By("making sure we can start a new pod after the test")
podName := "test-admit-pod" podName := "test-admit-pod"
f.PodClient().CreateSync(&v1.Pod{ f.PodClient().CreateSync(&v1.Pod{

View File

@ -110,6 +110,13 @@ var _ = framework.KubeDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptiv
return checkMirrorPodDisappear(f.ClientSet, mirrorPodName, ns) return checkMirrorPodDisappear(f.ClientSet, mirrorPodName, ns)
}, time.Minute, time.Second*2).Should(gomega.BeNil()) }, time.Minute, time.Second*2).Should(gomega.BeNil())
ginkgo.By("making sure that node no longer has DiskPressure")
gomega.Eventually(func() error {
if hasNodeCondition(f, v1.NodeDiskPressure) {
return fmt.Errorf("Conditions havent returned to normal, node still has DiskPressure")
}
return nil
}, pressureDissapearTimeout, evictionPollInterval).Should(gomega.BeNil())
}) })
}) })
}) })