From f7cf747e488a3e25db137f03a81a09c138eb7165 Mon Sep 17 00:00:00 2001 From: Claudiu Belu Date: Wed, 15 Sep 2021 17:25:32 -0700 Subject: [PATCH] tests: Wait for pod collection to enter a Running state While running tests in parallel, especially those with higher loads than others, it might take some time for Pods to be Running, even more so if the image has to be pulled as well. The test [sig-node] Pods should delete a collection of pods [Conformance] only waits for the for the pods to be scheduled before deleting them, and expects them to be gone in 1 minute, which can flake because of the above reasons. Note that the operations are in order, and kubelet runs them in order, which means that the pod first has to enter the Running state before attempting to delete it. This commit waits for the Pods to enter the Running state first before deleting the entire collection. Co-Authored-By: Antonio Ojea --- test/e2e/common/node/pods.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/e2e/common/node/pods.go b/test/e2e/common/node/pods.go index 8709415ad01..4edde25234d 100644 --- a/test/e2e/common/node/pods.go +++ b/test/e2e/common/node/pods.go @@ -866,6 +866,8 @@ var _ = SIGDescribe("Pods", func() { }}, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod") framework.Logf("created %v", podTestName) + framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, podTestName, f.Namespace.Name)) + framework.Logf("running and ready %v", podTestName) } // wait as required for all 3 pods to be found