From cead39cc97badc714fd51482a6936c3221e880c7 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Tue, 30 Jul 2019 20:11:48 +0200 Subject: [PATCH] e2e storage: wait for pod deletion After deleting a pod, we need to be sure that it really is gone, otherwise there is a race condition: if we remove the CSI driver that is responsible for the volume used by the pod before the pod is actually deleted, deleting the pod will fail. --- test/e2e/storage/testsuites/provisioning.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 507e5677bc5..a93c416a5d5 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -588,7 +588,8 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command return pod } -// StopPod first tries to log the output of the pod's container, then deletes the pod. +// StopPod first tries to log the output of the pod's container, then deletes the pod and +// waits for that to succeed. func StopPod(c clientset.Interface, pod *v1.Pod) { if pod == nil { return @@ -600,6 +601,7 @@ func StopPod(c clientset.Interface, pod *v1.Pod) { e2elog.Logf("Pod %s has the following logs: %s", pod.Name, body) } e2epod.DeletePodOrFail(c, pod.Namespace, pod.Name) + e2epod.WaitForPodNoLongerRunningInNamespace(c, pod.Name, pod.Namespace) } func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) {