From 9df3e2a47a7d631b9934080c0f49d15fbeb23294 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 20 Jan 2023 12:45:01 +0100 Subject: [PATCH] e2e: replace WaitForPodToDisappear with WaitForPodNotFoundInNamespace WaitForPodToDisappear was always called such that it listed all pods, which made it less efficient than trying to get just the one pod it was checking for. Being able to customize the poll interval in practice wasn't useful, therefore it can be replaced with WaitForPodNotFoundInNamespace. --- test/e2e/framework/network/utils.go | 3 +- test/e2e/framework/pod/pod_client.go | 4 +-- test/e2e/framework/pod/wait.go | 36 -------------------- test/e2e/framework/volume/fixtures.go | 7 ++-- test/e2e/kubectl/kubectl.go | 2 +- test/e2e/storage/pd.go | 3 +- test/e2e/storage/testsuites/provisioning.go | 3 +- test/e2e/storage/ubernetes_lite_volumes.go | 3 +- test/e2e/windows/density.go | 3 +- test/e2e_node/node_problem_detector_linux.go | 3 +- test/e2e_node/resource_collector.go | 4 +-- 11 files changed, 12 insertions(+), 59 deletions(-) diff --git a/test/e2e/framework/network/utils.go b/test/e2e/framework/network/utils.go index 227b6b4f0c3..296f0c24cfe 100644 --- a/test/e2e/framework/network/utils.go +++ b/test/e2e/framework/network/utils.go @@ -32,7 +32,6 @@ import ( v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/sets" @@ -889,7 +888,7 @@ func (config *NetworkingTestConfig) DeleteNetProxyPod(ctx context.Context) { framework.ExpectNoError(config.getPodClient().Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))) config.EndpointPods = config.EndpointPods[1:] // wait for pod being deleted. - err := e2epod.WaitForPodToDisappear(ctx, config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout) + err := e2epod.WaitForPodNotFoundInNamespace(ctx, config.f.ClientSet, pod.Name, config.Namespace, wait.ForeverTestTimeout) if err != nil { framework.Failf("Failed to delete %s pod: %v", pod.Name, err) } diff --git a/test/e2e/framework/pod/pod_client.go b/test/e2e/framework/pod/pod_client.go index 54937df8313..ef3594679f2 100644 --- a/test/e2e/framework/pod/pod_client.go +++ b/test/e2e/framework/pod/pod_client.go @@ -27,7 +27,6 @@ import ( v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/strategicpatch" @@ -182,8 +181,7 @@ func (c *PodClient) DeleteSync(ctx context.Context, name string, options metav1. if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Failed to delete pod %q: %v", name, err) } - gomega.Expect(WaitForPodToDisappear(ctx, c.f.ClientSet, namespace, name, labels.Everything(), - 2*time.Second, timeout)).To(gomega.Succeed(), "wait for pod %q to disappear", name) + framework.ExpectNoError(WaitForPodNotFoundInNamespace(ctx, c.f.ClientSet, name, namespace, timeout), "wait for pod %q to disappear", name) } // mungeSpec apply test-suite specific transformations to the pod spec. diff --git a/test/e2e/framework/pod/wait.go b/test/e2e/framework/pod/wait.go index c772920ace6..9f795be47d0 100644 --- a/test/e2e/framework/pod/wait.go +++ b/test/e2e/framework/pod/wait.go @@ -591,42 +591,6 @@ func WaitForPodNotFoundInNamespace(ctx context.Context, c clientset.Interface, p return maybeTimeoutError(err, "waiting for pod %s not found", podIdentifier(ns, podName)) } -// WaitForPodToDisappear waits the given timeout duration for the specified pod to disappear. -func WaitForPodToDisappear(ctx context.Context, c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error { - var lastPod *v1.Pod - err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { - framework.Logf("Waiting for pod %s to disappear", podName) - options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.CoreV1().Pods(ns).List(ctx, options) - if err != nil { - return handleWaitingAPIError(err, true, "listing pods") - } - found := false - for i, pod := range pods.Items { - if pod.Name == podName { - framework.Logf("Pod %s still exists", podName) - found = true - lastPod = &(pods.Items[i]) - break - } - } - if !found { - framework.Logf("Pod %s no longer exists", podName) - return true, nil - } - return false, nil - }) - if err == nil { - return nil - } - if IsTimeout(err) { - return TimeoutError(fmt.Sprintf("timed out while waiting for pod %s to disappear", podIdentifier(ns, podName)), - lastPod, - ) - } - return maybeTimeoutError(err, "waiting for pod %s to disappear", podIdentifier(ns, podName)) -} - // PodsResponding waits for the pods to response. func PodsResponding(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error { ginkgo.By("trying to dial each unique pod") diff --git a/test/e2e/framework/volume/fixtures.go b/test/e2e/framework/volume/fixtures.go index a880465b8c2..d8b62b5b0dc 100644 --- a/test/e2e/framework/volume/fixtures.go +++ b/test/e2e/framework/volume/fixtures.go @@ -51,7 +51,6 @@ import ( v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" clientexec "k8s.io/client-go/util/exec" @@ -464,7 +463,7 @@ func runVolumeTesterPod(ctx context.Context, client clientset.Interface, timeout } if err != nil { e2epod.DeletePodOrFail(ctx, client, clientPod.Namespace, clientPod.Name) - _ = e2epod.WaitForPodToDisappear(ctx, client, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete) + _ = e2epod.WaitForPodNotFoundInNamespace(ctx, client, clientPod.Name, clientPod.Namespace, timeouts.PodDelete) return nil, err } return clientPod, nil @@ -542,7 +541,7 @@ func testVolumeClient(ctx context.Context, f *framework.Framework, config TestCo // testVolumeClient might get used more than once per test, therefore // we have to clean up before returning. e2epod.DeletePodOrFail(ctx, f.ClientSet, clientPod.Namespace, clientPod.Name) - framework.ExpectNoError(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)) + framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, clientPod.Name, clientPod.Namespace, timeouts.PodDelete)) }() testVolumeContent(f, clientPod, "", fsGroup, fsType, tests) @@ -577,7 +576,7 @@ func InjectContent(ctx context.Context, f *framework.Framework, config TestConfi // This pod must get deleted before the function returns becaue the test relies on // the volume not being in use. e2epod.DeletePodOrFail(ctx, f.ClientSet, injectorPod.Namespace, injectorPod.Name) - framework.ExpectNoError(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)) + framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, injectorPod.Name, injectorPod.Namespace, timeouts.PodDelete)) }() ginkgo.By("Writing text file contents in the container.") diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index 661f0f8234e..32a7fb7775a 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -734,7 +734,7 @@ metadata: if !strings.Contains(ee.String(), "timed out") { framework.Failf("Missing expected 'timed out' error, got: %#v", ee) } - framework.ExpectNoError(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, 2*v1.DefaultTerminationGracePeriodSeconds*time.Second)) + framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, "failure-3", ns, 2*v1.DefaultTerminationGracePeriodSeconds*time.Second)) }) ginkgo.It("[Slow] running a failing command with --leave-stdin-open", func(ctx context.Context) { diff --git a/test/e2e/storage/pd.go b/test/e2e/storage/pd.go index da2ab5fc6a6..00fb5350a52 100644 --- a/test/e2e/storage/pd.go +++ b/test/e2e/storage/pd.go @@ -33,7 +33,6 @@ import ( policyv1 "k8s.io/api/policy/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" @@ -196,7 +195,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { ginkgo.By("deleting host0Pod") // delete this pod before creating next pod framework.ExpectNoError(podClient.Delete(ctx, host0Pod.Name, podDelOpt), "Failed to delete host0Pod") framework.Logf("deleted host0Pod %q", host0Pod.Name) - e2epod.WaitForPodToDisappear(ctx, cs, host0Pod.Namespace, host0Pod.Name, labels.Everything(), framework.Poll, f.Timeouts.PodDelete) + e2epod.WaitForPodNotFoundInNamespace(ctx, cs, host0Pod.Name, host0Pod.Namespace, f.Timeouts.PodDelete) framework.Logf("deleted host0Pod %q disappeared", host0Pod.Name) } diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 42aa0b5cc74..87395546363 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -33,7 +33,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" clientset "k8s.io/client-go/kubernetes" @@ -846,7 +845,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Co framework.ExpectNoError(err) ginkgo.DeferCleanup(func(ctx context.Context) error { e2epod.DeletePodOrFail(ctx, t.Client, pod.Namespace, pod.Name) - return e2epod.WaitForPodToDisappear(ctx, t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, t.Timeouts.PodDelete) + return e2epod.WaitForPodNotFoundInNamespace(ctx, t.Client, pod.Name, pod.Namespace, t.Timeouts.PodDelete) }) if expectUnschedulable { // Verify that no claims are provisioned. diff --git a/test/e2e/storage/ubernetes_lite_volumes.go b/test/e2e/storage/ubernetes_lite_volumes.go index ca8c8dffbbd..252c1015502 100644 --- a/test/e2e/storage/ubernetes_lite_volumes.go +++ b/test/e2e/storage/ubernetes_lite_volumes.go @@ -24,7 +24,6 @@ import ( "github.com/onsi/ginkgo/v2" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" utilerrors "k8s.io/apimachinery/pkg/util/errors" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" @@ -101,7 +100,7 @@ func PodsUseStaticPVsOrFail(ctx context.Context, f *framework.Framework, podCoun go func(config *staticPVTestConfig) { defer ginkgo.GinkgoRecover() defer wg.Done() - err := e2epod.WaitForPodToDisappear(ctx, c, ns, config.pod.Name, labels.Everything(), framework.Poll, f.Timeouts.PodDelete) + err := e2epod.WaitForPodNotFoundInNamespace(ctx, c, config.pod.Name, ns, f.Timeouts.PodDelete) framework.ExpectNoError(err, "while waiting for pod to disappear") errs := e2epv.PVPVCCleanup(ctx, c, ns, config.pv, config.pvc) framework.ExpectNoError(utilerrors.NewAggregate(errs), "while cleaning up PVs and PVCs") diff --git a/test/e2e/windows/density.go b/test/e2e/windows/density.go index a5edea46b64..e0ab84ae10f 100644 --- a/test/e2e/windows/density.go +++ b/test/e2e/windows/density.go @@ -276,8 +276,7 @@ func deletePodsSync(ctx context.Context, f *framework.Framework, pods []*v1.Pod) err := e2epod.NewPodClient(f).Delete(ctx, pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30)) framework.ExpectNoError(err) - err = e2epod.WaitForPodToDisappear(ctx, f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(), - 30*time.Second, 10*time.Minute) + err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, pod.ObjectMeta.Name, f.Namespace.Name, 10*time.Minute) framework.ExpectNoError(err) }(pod) } diff --git a/test/e2e_node/node_problem_detector_linux.go b/test/e2e_node/node_problem_detector_linux.go index 7bc9d9613bb..39d25e9765c 100644 --- a/test/e2e_node/node_problem_detector_linux.go +++ b/test/e2e_node/node_problem_detector_linux.go @@ -31,7 +31,6 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" @@ -434,7 +433,7 @@ current-context: local-context ginkgo.By("Delete the node problem detector") framework.ExpectNoError(e2epod.NewPodClient(f).Delete(ctx, name, *metav1.NewDeleteOptions(0))) ginkgo.By("Wait for the node problem detector to disappear") - gomega.Expect(e2epod.WaitForPodToDisappear(ctx, c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(gomega.Succeed()) + gomega.Expect(e2epod.WaitForPodNotFoundInNamespace(ctx, c, name, ns, pollTimeout)).To(gomega.Succeed()) ginkgo.By("Delete the config map") framework.ExpectNoError(c.CoreV1().ConfigMaps(ns).Delete(ctx, configName, metav1.DeleteOptions{})) ginkgo.By("Clean up the events") diff --git a/test/e2e_node/resource_collector.go b/test/e2e_node/resource_collector.go index 97d37faafe9..b3123ab4ecc 100644 --- a/test/e2e_node/resource_collector.go +++ b/test/e2e_node/resource_collector.go @@ -39,7 +39,6 @@ import ( v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" @@ -384,8 +383,7 @@ func deletePodsSync(ctx context.Context, f *framework.Framework, pods []*v1.Pod) framework.Failf("Unexpected error trying to delete pod %s: %v", pod.Name, err) } - gomega.Expect(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(), - 30*time.Second, 10*time.Minute)).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, pod.ObjectMeta.Name, f.Namespace.Name, 10*time.Minute)) }() } wg.Wait()