mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 03:11:40 +00:00
Merge pull request #93332 from hasheddan/wait-for-owned
Wait for resources owned by pod to be cleaned up in sig-storage tests
This commit is contained in:
commit
04ecdb9eb6
@ -207,7 +207,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
|
|||||||
storageutils.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]")
|
storageutils.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]")
|
||||||
}
|
}
|
||||||
|
|
||||||
defer StopPod(f.ClientSet, pod2)
|
defer StopPodAndDependents(f.ClientSet, pod2)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -302,7 +302,7 @@ func (t EphemeralTest) TestEphemeral() {
|
|||||||
pod := StartInPodWithInlineVolume(client, t.Namespace, "inline-volume-tester", command, volumes, t.ReadOnly, t.Node)
|
pod := StartInPodWithInlineVolume(client, t.Namespace, "inline-volume-tester", command, volumes, t.ReadOnly, t.Node)
|
||||||
defer func() {
|
defer func() {
|
||||||
// pod might be nil now.
|
// pod might be nil now.
|
||||||
StopPod(client, pod)
|
StopPodAndDependents(client, pod)
|
||||||
}()
|
}()
|
||||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(client, pod.Name, pod.Namespace), "waiting for pod with inline volume")
|
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(client, pod.Name, pod.Namespace), "waiting for pod with inline volume")
|
||||||
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||||
@ -315,7 +315,7 @@ func (t EphemeralTest) TestEphemeral() {
|
|||||||
runningPodData = t.RunningPodCheck(pod)
|
runningPodData = t.RunningPodCheck(pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
StopPod(client, pod)
|
StopPodAndDependents(client, pod)
|
||||||
pod = nil // Don't stop twice.
|
pod = nil // Don't stop twice.
|
||||||
|
|
||||||
// There should be no dangling PVCs in the namespace now. There might be for
|
// There should be no dangling PVCs in the namespace now. There might be for
|
||||||
@ -446,7 +446,7 @@ func VolumeSourceEnabled(c clientset.Interface, ns string, volume v1.VolumeSourc
|
|||||||
switch {
|
switch {
|
||||||
case err == nil:
|
case err == nil:
|
||||||
// Pod was created, feature supported.
|
// Pod was created, feature supported.
|
||||||
StopPod(c, pod)
|
StopPodAndDependents(c, pod)
|
||||||
return true, nil
|
return true, nil
|
||||||
case apierrors.IsInvalid(err):
|
case apierrors.IsInvalid(err):
|
||||||
// "Invalid" because it uses a feature that isn't supported.
|
// "Invalid" because it uses a feature that isn't supported.
|
||||||
|
@ -712,6 +712,38 @@ func StopPod(c clientset.Interface, pod *v1.Pod) {
|
|||||||
e2epod.DeletePodWithWait(c, pod)
|
e2epod.DeletePodWithWait(c, pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StopPodAndDependents first tries to log the output of the pod's container,
|
||||||
|
// then deletes the pod and waits for that to succeed. Also waits for all owned
|
||||||
|
// resources to be deleted.
|
||||||
|
func StopPodAndDependents(c clientset.Interface, pod *v1.Pod) {
|
||||||
|
if pod == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do(context.TODO()).Raw()
|
||||||
|
if err != nil {
|
||||||
|
framework.Logf("Error getting logs for pod %s: %v", pod.Name, err)
|
||||||
|
} else {
|
||||||
|
framework.Logf("Pod %s has the following logs: %s", pod.Name, body)
|
||||||
|
}
|
||||||
|
framework.Logf("Deleting pod %q in namespace %q", pod.Name, pod.Namespace)
|
||||||
|
deletionPolicy := metav1.DeletePropagationForeground
|
||||||
|
err = c.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name,
|
||||||
|
metav1.DeleteOptions{
|
||||||
|
// If the pod is the owner of some resources (like ephemeral inline volumes),
|
||||||
|
// then we want to be sure that those are also gone before we return.
|
||||||
|
// Blocking pod deletion via metav1.DeletePropagationForeground achieves that.
|
||||||
|
PropagationPolicy: &deletionPolicy,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if apierrors.IsNotFound(err) {
|
||||||
|
return // assume pod was already deleted
|
||||||
|
}
|
||||||
|
framework.Logf("pod Delete API error: %v", err)
|
||||||
|
}
|
||||||
|
framework.Logf("Wait up to %v for pod %q to be fully deleted", e2epod.PodDeleteTimeout, pod.Name)
|
||||||
|
e2epod.WaitForPodNotFoundInNamespace(c, pod.Name, pod.Namespace, e2epod.PodDeleteTimeout)
|
||||||
|
}
|
||||||
|
|
||||||
func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) {
|
func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) {
|
||||||
for _, claim := range pvcs {
|
for _, claim := range pvcs {
|
||||||
// Get new copy of the claim
|
// Get new copy of the claim
|
||||||
|
Loading…
Reference in New Issue
Block a user