From 0cdd5365a1d0c8ccad93bf5d12fbf54ec69deb0e Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Thu, 16 Apr 2020 17:44:37 +0200 Subject: [PATCH] storage tests: really wait for pod to disappear As seen in one case (https://github.com/intel/pmem-csi/issues/587), a pod can reach the "not running" state although its ephemeral volumes are still being torn down by kubelet and the CSI driver. What happens then is that the test returns too early and even deleting the namespace and thus the pod succeeds before the NodeVolumeUnpublish really finishes. To avoid this, StopPod now waits for the pod to really disappear. --- test/e2e/storage/testsuites/provisioning.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 7a2e3384634..c8acf822f77 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -693,8 +693,7 @@ func StopPod(c clientset.Interface, pod *v1.Pod) { } else { framework.Logf("Pod %s has the following logs: %s", pod.Name, body) } - e2epod.DeletePodOrFail(c, pod.Namespace, pod.Name) - e2epod.WaitForPodNoLongerRunningInNamespace(c, pod.Name, pod.Namespace) + e2epod.DeletePodWithWait(c, pod) } func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) {