From efe3747c22c132c7e20eb0090d688e8853053915 Mon Sep 17 00:00:00 2001 From: hasheddan Date: Wed, 22 Jul 2020 07:49:20 -0500 Subject: [PATCH] Wait for resources owned by pod to be cleaned up in ephemeral volume tests sig-storage tests that delete pods need to wait for owned resources to also be cleaned up before returning in the case that resources such as ephemeral inline volumes are being used. This was previously implemented by modifying the pod delete call of the e2e framework, which negatively impacted other tests. This was reverted and now the logic has been moved to StopPodAndDependents, which is local to the sig-storage tests. Signed-off-by: hasheddan --- test/e2e/storage/testsuites/ephemeral.go | 8 +++--- test/e2e/storage/testsuites/provisioning.go | 32 +++++++++++++++++++++ 2 files changed, 36 insertions(+), 4 deletions(-) diff --git a/test/e2e/storage/testsuites/ephemeral.go b/test/e2e/storage/testsuites/ephemeral.go index 47314ab8d95..45b7abe8125 100644 --- a/test/e2e/storage/testsuites/ephemeral.go +++ b/test/e2e/storage/testsuites/ephemeral.go @@ -207,7 +207,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns storageutils.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]") } - defer StopPod(f.ClientSet, pod2) + defer StopPodAndDependents(f.ClientSet, pod2) return nil } @@ -302,7 +302,7 @@ func (t EphemeralTest) TestEphemeral() { pod := StartInPodWithInlineVolume(client, t.Namespace, "inline-volume-tester", command, volumes, t.ReadOnly, t.Node) defer func() { // pod might be nil now. - StopPod(client, pod) + StopPodAndDependents(client, pod) }() framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(client, pod.Name, pod.Namespace), "waiting for pod with inline volume") runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) @@ -315,7 +315,7 @@ func (t EphemeralTest) TestEphemeral() { runningPodData = t.RunningPodCheck(pod) } - StopPod(client, pod) + StopPodAndDependents(client, pod) pod = nil // Don't stop twice. // There should be no dangling PVCs in the namespace now. There might be for @@ -446,7 +446,7 @@ func VolumeSourceEnabled(c clientset.Interface, ns string, volume v1.VolumeSourc switch { case err == nil: // Pod was created, feature supported. - StopPod(c, pod) + StopPodAndDependents(c, pod) return true, nil case apierrors.IsInvalid(err): // "Invalid" because it uses a feature that isn't supported. diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index de2aaf9ec10..5554a1ce4c8 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -712,6 +712,38 @@ func StopPod(c clientset.Interface, pod *v1.Pod) { e2epod.DeletePodWithWait(c, pod) } +// StopPodAndDependents first tries to log the output of the pod's container, +// then deletes the pod and waits for that to succeed. Also waits for all owned +// resources to be deleted. +func StopPodAndDependents(c clientset.Interface, pod *v1.Pod) { + if pod == nil { + return + } + body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do(context.TODO()).Raw() + if err != nil { + framework.Logf("Error getting logs for pod %s: %v", pod.Name, err) + } else { + framework.Logf("Pod %s has the following logs: %s", pod.Name, body) + } + framework.Logf("Deleting pod %q in namespace %q", pod.Name, pod.Namespace) + deletionPolicy := metav1.DeletePropagationForeground + err = c.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, + metav1.DeleteOptions{ + // If the pod is the owner of some resources (like ephemeral inline volumes), + // then we want to be sure that those are also gone before we return. + // Blocking pod deletion via metav1.DeletePropagationForeground achieves that. + PropagationPolicy: &deletionPolicy, + }) + if err != nil { + if apierrors.IsNotFound(err) { + return // assume pod was already deleted + } + framework.Logf("pod Delete API error: %v", err) + } + framework.Logf("Wait up to %v for pod %q to be fully deleted", e2epod.PodDeleteTimeout, pod.Name) + e2epod.WaitForPodNotFoundInNamespace(c, pod.Name, pod.Namespace, e2epod.PodDeleteTimeout) +} + func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) { for _, claim := range pvcs { // Get new copy of the claim