Wait for resources owned by pod to be cleaned up in ephemeral volume tests

sig-storage tests that delete pods need to wait for owned resources to
also be cleaned up before returning in the case that resources such as
ephemeral inline volumes are being used. This was previously implemented
by modifying the pod delete call of the e2e framework, which negatively
impacted other tests. This was reverted and now the logic has been moved
to StopPodAndDependents, which is local to the sig-storage tests.

Signed-off-by: hasheddan <georgedanielmangum@gmail.com>
This commit is contained in:
hasheddan 2020-07-22 07:49:20 -05:00
parent c2b7aa0353
commit efe3747c22
No known key found for this signature in database
GPG Key ID: BD68BC686A14C271
2 changed files with 36 additions and 4 deletions

View File

@ -207,7 +207,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
storageutils.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]")
}
defer StopPod(f.ClientSet, pod2)
defer StopPodAndDependents(f.ClientSet, pod2)
return nil
}
@ -302,7 +302,7 @@ func (t EphemeralTest) TestEphemeral() {
pod := StartInPodWithInlineVolume(client, t.Namespace, "inline-volume-tester", command, volumes, t.ReadOnly, t.Node)
defer func() {
// pod might be nil now.
StopPod(client, pod)
StopPodAndDependents(client, pod)
}()
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(client, pod.Name, pod.Namespace), "waiting for pod with inline volume")
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
@ -315,7 +315,7 @@ func (t EphemeralTest) TestEphemeral() {
runningPodData = t.RunningPodCheck(pod)
}
StopPod(client, pod)
StopPodAndDependents(client, pod)
pod = nil // Don't stop twice.
// There should be no dangling PVCs in the namespace now. There might be for
@ -446,7 +446,7 @@ func VolumeSourceEnabled(c clientset.Interface, ns string, volume v1.VolumeSourc
switch {
case err == nil:
// Pod was created, feature supported.
StopPod(c, pod)
StopPodAndDependents(c, pod)
return true, nil
case apierrors.IsInvalid(err):
// "Invalid" because it uses a feature that isn't supported.

View File

@ -712,6 +712,38 @@ func StopPod(c clientset.Interface, pod *v1.Pod) {
e2epod.DeletePodWithWait(c, pod)
}
// StopPodAndDependents first tries to log the output of the pod's container,
// then deletes the pod and waits for that to succeed. Also waits for all owned
// resources to be deleted.
func StopPodAndDependents(c clientset.Interface, pod *v1.Pod) {
if pod == nil {
return
}
body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do(context.TODO()).Raw()
if err != nil {
framework.Logf("Error getting logs for pod %s: %v", pod.Name, err)
} else {
framework.Logf("Pod %s has the following logs: %s", pod.Name, body)
}
framework.Logf("Deleting pod %q in namespace %q", pod.Name, pod.Namespace)
deletionPolicy := metav1.DeletePropagationForeground
err = c.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name,
metav1.DeleteOptions{
// If the pod is the owner of some resources (like ephemeral inline volumes),
// then we want to be sure that those are also gone before we return.
// Blocking pod deletion via metav1.DeletePropagationForeground achieves that.
PropagationPolicy: &deletionPolicy,
})
if err != nil {
if apierrors.IsNotFound(err) {
return // assume pod was already deleted
}
framework.Logf("pod Delete API error: %v", err)
}
framework.Logf("Wait up to %v for pod %q to be fully deleted", e2epod.PodDeleteTimeout, pod.Name)
e2epod.WaitForPodNotFoundInNamespace(c, pod.Name, pod.Namespace, e2epod.PodDeleteTimeout)
}
func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) {
for _, claim := range pvcs {
// Get new copy of the claim