From c5fc4193bbe1699eba29321812ce814a7110eb88 Mon Sep 17 00:00:00 2001 From: Sotiris Salloumis Date: Mon, 20 Jan 2025 17:26:04 +0100 Subject: [PATCH] Fix pod delete issues in podresize tests --- test/e2e/common/node/lifecycle_hook.go | 12 ++++++------ test/e2e/common/node/pod_resize.go | 3 +-- test/e2e/common/node/pods.go | 2 +- test/e2e/common/node/security_context.go | 4 ++-- test/e2e/framework/pod/output/output.go | 2 +- test/e2e/kubectl/portforward.go | 2 +- test/e2e/network/conntrack.go | 8 ++++---- test/e2e/network/loadbalancer.go | 4 ++-- test/e2e_node/container_log_rotation_test.go | 4 ++-- test/e2e_node/cpu_manager_test.go | 2 +- test/e2e_node/critical_pod_test.go | 8 ++++---- test/e2e_node/device_manager_test.go | 6 +++--- test/e2e_node/device_plugin_test.go | 14 +++++++------- test/e2e_node/garbage_collector_test.go | 2 +- test/e2e_node/hugepages_test.go | 2 +- test/e2e_node/image_gc_test.go | 4 ++-- test/e2e_node/memory_manager_test.go | 6 +++--- test/e2e_node/node_perf_test.go | 2 +- test/e2e_node/pod_hostnamefqdn_test.go | 2 +- test/e2e_node/podresources_test.go | 2 +- test/e2e_node/resource_collector.go | 8 +------- test/e2e_node/util.go | 2 +- 22 files changed, 47 insertions(+), 54 deletions(-) diff --git a/test/e2e/common/node/lifecycle_hook.go b/test/e2e/common/node/lifecycle_hook.go index a6de625f36b..71443b18ca2 100644 --- a/test/e2e/common/node/lifecycle_hook.go +++ b/test/e2e/common/node/lifecycle_hook.go @@ -115,7 +115,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { }, postStartWaitTimeout, podCheckInterval).Should(gomega.BeNil()) } ginkgo.By("delete the pod with lifecycle hook") - podClient.DeleteSync(ctx, podWithHook.Name, *metav1.NewDeleteOptions(15), e2epod.DefaultPodDeletionTimeout) + podClient.DeleteSync(ctx, podWithHook.Name, *metav1.NewDeleteOptions(15), f.Timeouts.PodDelete) if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil { ginkgo.By("check prestop hook") if podWithHook.Spec.Containers[0].Lifecycle.PreStop.HTTPGet != nil { @@ -333,7 +333,7 @@ var _ = SIGDescribe(nodefeature.SidecarContainers, feature.SidecarContainers, "R }, postStartWaitTimeout, podCheckInterval).Should(gomega.BeNil()) } ginkgo.By("delete the pod with lifecycle hook") - podClient.DeleteSync(ctx, podWithHook.Name, *metav1.NewDeleteOptions(15), e2epod.DefaultPodDeletionTimeout) + podClient.DeleteSync(ctx, podWithHook.Name, *metav1.NewDeleteOptions(15), f.Timeouts.PodDelete) if podWithHook.Spec.InitContainers[0].Lifecycle.PreStop != nil { ginkgo.By("check prestop hook") if podWithHook.Spec.InitContainers[0].Lifecycle.PreStop.HTTPGet != nil { @@ -571,7 +571,7 @@ var _ = SIGDescribe(feature.PodLifecycleSleepAction, func() { podClient.CreateSync(ctx, podWithHook) ginkgo.By("delete the pod with lifecycle hook using sleep action") start := time.Now() - podClient.DeleteSync(ctx, podWithHook.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + podClient.DeleteSync(ctx, podWithHook.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) cost := time.Since(start) // cost should be // longer than 5 seconds (pod should sleep for 5 seconds) @@ -592,7 +592,7 @@ var _ = SIGDescribe(feature.PodLifecycleSleepAction, func() { podClient.CreateSync(ctx, podWithHook) ginkgo.By("delete the pod with lifecycle hook using sleep action") start := time.Now() - podClient.DeleteSync(ctx, podWithHook.Name, *metav1.NewDeleteOptions(2), e2epod.DefaultPodDeletionTimeout) + podClient.DeleteSync(ctx, podWithHook.Name, *metav1.NewDeleteOptions(2), f.Timeouts.PodDelete) cost := time.Since(start) // cost should be // longer than 2 seconds (we change gracePeriodSeconds to 2 seconds here, and it's less than sleep action) @@ -618,7 +618,7 @@ var _ = SIGDescribe(feature.PodLifecycleSleepAction, func() { framework.ExpectNoError(e2epod.WaitForContainerTerminated(ctx, f.ClientSet, f.Namespace.Name, p.Name, name, 3*time.Minute)) ginkgo.By("delete the pod with lifecycle hook using sleep action") start := time.Now() - podClient.DeleteSync(ctx, podWithHook.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + podClient.DeleteSync(ctx, podWithHook.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) cost := time.Since(start) // cost should be // shorter than sleep action (container is terminated and sleep action should be ignored) @@ -650,7 +650,7 @@ var _ = SIGDescribe(feature.PodLifecycleSleepActionAllowZero, func() { podClient.CreateSync(ctx, podWithHook) ginkgo.By("delete the pod with lifecycle hook using sleep action with zero duration") start := time.Now() - podClient.DeleteSync(ctx, podWithHook.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + podClient.DeleteSync(ctx, podWithHook.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) cost := time.Since(start) // cost should be // longer than 0 seconds (pod shouldn't sleep and the handler should return immediately) diff --git a/test/e2e/common/node/pod_resize.go b/test/e2e/common/node/pod_resize.go index 0265626da00..6a6c8361535 100644 --- a/test/e2e/common/node/pod_resize.go +++ b/test/e2e/common/node/pod_resize.go @@ -1158,7 +1158,6 @@ func doPodResizeErrorTests() { for idx := range tests { tc := tests[idx] f := framework.NewDefaultFramework("pod-resize-error-tests") - timeouts := f.Timeouts ginkgo.It(tc.name, func(ctx context.Context) { podClient := e2epod.NewPodClient(f) @@ -1197,7 +1196,7 @@ func doPodResizeErrorTests() { framework.ExpectNoError(e2epod.VerifyPodStatusResources(patchedPod, tc.expected)) ginkgo.By("deleting pod") - podClient.DeleteSync(ctx, newPod.Name, metav1.DeleteOptions{}, timeouts.PodDelete) + podClient.DeleteSync(ctx, newPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) }) } } diff --git a/test/e2e/common/node/pods.go b/test/e2e/common/node/pods.go index 7df674b9b6d..3e59de4e28e 100644 --- a/test/e2e/common/node/pods.go +++ b/test/e2e/common/node/pods.go @@ -307,7 +307,7 @@ var _ = SIGDescribe("Pods", func() { ginkgo.By("verifying pod deletion was observed") deleted := false var lastPod *v1.Pod - timer := time.After(e2epod.DefaultPodDeletionTimeout) + timer := time.After(f.Timeouts.PodDelete) for !deleted { select { case event := <-w.ResultChan(): diff --git a/test/e2e/common/node/security_context.go b/test/e2e/common/node/security_context.go index 9d0f021b808..57753d8e618 100644 --- a/test/e2e/common/node/security_context.go +++ b/test/e2e/common/node/security_context.go @@ -83,8 +83,8 @@ var _ = SIGDescribe("Security Context", func() { createdPod2 := podClient.Create(ctx, makePod(false)) ginkgo.DeferCleanup(func(ctx context.Context) { ginkgo.By("delete the pods") - podClient.DeleteSync(ctx, createdPod1.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) - podClient.DeleteSync(ctx, createdPod2.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + podClient.DeleteSync(ctx, createdPod1.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) + podClient.DeleteSync(ctx, createdPod2.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) }) getLogs := func(pod *v1.Pod) (string, error) { err := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, createdPod1.Name, f.Namespace.Name, f.Timeouts.PodStart) diff --git a/test/e2e/framework/pod/output/output.go b/test/e2e/framework/pod/output/output.go index b649f99c1fc..fac00764444 100644 --- a/test/e2e/framework/pod/output/output.go +++ b/test/e2e/framework/pod/output/output.go @@ -176,7 +176,7 @@ func MatchMultipleContainerOutputs( createdPod := podClient.Create(ctx, pod) defer func() { ginkgo.By("delete the pod") - podClient.DeleteSync(ctx, createdPod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + podClient.DeleteSync(ctx, createdPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) }() // Wait for client pod to complete. diff --git a/test/e2e/kubectl/portforward.go b/test/e2e/kubectl/portforward.go index b3b9b8352fc..2c46c0c0f8a 100644 --- a/test/e2e/kubectl/portforward.go +++ b/test/e2e/kubectl/portforward.go @@ -606,7 +606,7 @@ var _ = SIGDescribe("Kubectl Port forwarding", func() { ginkgo.By("Wait for client being interrupted") select { case err = <-errorChan: - case <-time.After(e2epod.DefaultPodDeletionTimeout): + case <-time.After(f.Timeouts.PodDelete): } ginkgo.By("Check the client error") diff --git a/test/e2e/network/conntrack.go b/test/e2e/network/conntrack.go index 9d2f036225f..1cfeaba9d54 100644 --- a/test/e2e/network/conntrack.go +++ b/test/e2e/network/conntrack.go @@ -197,7 +197,7 @@ var _ = common.SIGDescribe("Conntrack", func() { // and delete the first pod framework.Logf("Cleaning up %s pod", podBackend1) - e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, fr.Timeouts.PodDelete) validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}}) @@ -273,7 +273,7 @@ var _ = common.SIGDescribe("Conntrack", func() { // and delete the first pod framework.Logf("Cleaning up %s pod", podBackend1) - e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, fr.Timeouts.PodDelete) validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}}) @@ -358,7 +358,7 @@ var _ = common.SIGDescribe("Conntrack", func() { // Now recreate the first backend pod framework.Logf("Cleaning up %s pod", podBackend1) - e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, fr.Timeouts.PodDelete) ginkgo.By("Waiting for DaemonSet pods to become ready") err = wait.PollUntilContextTimeout(ctx, framework.Poll, framework.PodStartTimeout, false, func(ctx context.Context) (bool, error) { @@ -445,7 +445,7 @@ var _ = common.SIGDescribe("Conntrack", func() { // and delete the first pod framework.Logf("Cleaning up %s pod", podBackend1) - e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, fr.Timeouts.PodDelete) validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}}) diff --git a/test/e2e/network/loadbalancer.go b/test/e2e/network/loadbalancer.go index 5612a4b89d4..3a9af712cab 100644 --- a/test/e2e/network/loadbalancer.go +++ b/test/e2e/network/loadbalancer.go @@ -820,7 +820,7 @@ var _ = common.SIGDescribe("LoadBalancers", feature.LoadBalancer, func() { // and delete the first pod framework.Logf("Cleaning up %s pod", podBackend1) - e2epod.NewPodClient(f).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, f.Timeouts.PodDelete) validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}}) @@ -952,7 +952,7 @@ var _ = common.SIGDescribe("LoadBalancers", feature.LoadBalancer, func() { // and delete the first pod framework.Logf("Cleaning up %s pod", podBackend1) - e2epod.NewPodClient(f).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, f.Timeouts.PodDelete) validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}}) diff --git a/test/e2e_node/container_log_rotation_test.go b/test/e2e_node/container_log_rotation_test.go index d80cc26fae0..75aece36043 100644 --- a/test/e2e_node/container_log_rotation_test.go +++ b/test/e2e_node/container_log_rotation_test.go @@ -76,7 +76,7 @@ var _ = SIGDescribe("ContainerLogRotation", framework.WithSlow(), framework.With }, } logRotationPod = e2epod.NewPodClient(f).CreateSync(ctx, pod) - ginkgo.DeferCleanup(e2epod.NewPodClient(f).DeleteSync, logRotationPod.Name, metav1.DeleteOptions{}, time.Minute) + ginkgo.DeferCleanup(e2epod.NewPodClient(f).DeleteSync, logRotationPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) }) ginkgo.It("should be rotated and limited to a fixed amount of files", func(ctx context.Context) { @@ -146,7 +146,7 @@ var _ = SIGDescribe("ContainerLogRotationWithMultipleWorkers", framework.WithSlo } logRotationPod := e2epod.NewPodClient(f).CreateSync(ctx, pod) logRotationPods = append(logRotationPods, logRotationPod) - ginkgo.DeferCleanup(e2epod.NewPodClient(f).DeleteSync, logRotationPod.Name, metav1.DeleteOptions{}, time.Minute) + ginkgo.DeferCleanup(e2epod.NewPodClient(f).DeleteSync, logRotationPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) } }) diff --git a/test/e2e_node/cpu_manager_test.go b/test/e2e_node/cpu_manager_test.go index 7152782552f..9b9d3e07642 100644 --- a/test/e2e_node/cpu_manager_test.go +++ b/test/e2e_node/cpu_manager_test.go @@ -148,7 +148,7 @@ func deletePodSyncByName(ctx context.Context, f *framework.Framework, podName st delOpts := metav1.DeleteOptions{ GracePeriodSeconds: &gp, } - e2epod.NewPodClient(f).DeleteSync(ctx, podName, delOpts, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).DeleteSync(ctx, podName, delOpts, f.Timeouts.PodDelete) } func deletePods(ctx context.Context, f *framework.Framework, podNames []string) { diff --git a/test/e2e_node/critical_pod_test.go b/test/e2e_node/critical_pod_test.go index ef53228713b..5daee4a4720 100644 --- a/test/e2e_node/critical_pod_test.go +++ b/test/e2e_node/critical_pod_test.go @@ -136,10 +136,10 @@ var _ = SIGDescribe("CriticalPod", framework.WithSerial(), framework.WithDisrupt }) ginkgo.AfterEach(func(ctx context.Context) { // Delete Pods - e2epod.NewPodClient(f).DeleteSync(ctx, guaranteedPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) - e2epod.NewPodClient(f).DeleteSync(ctx, burstablePodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) - e2epod.NewPodClient(f).DeleteSync(ctx, bestEffortPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) - e2epod.PodClientNS(f, kubeapi.NamespaceSystem).DeleteSync(ctx, criticalPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).DeleteSync(ctx, guaranteedPodName, metav1.DeleteOptions{}, f.Timeouts.PodDelete) + e2epod.NewPodClient(f).DeleteSync(ctx, burstablePodName, metav1.DeleteOptions{}, f.Timeouts.PodDelete) + e2epod.NewPodClient(f).DeleteSync(ctx, bestEffortPodName, metav1.DeleteOptions{}, f.Timeouts.PodDelete) + e2epod.PodClientNS(f, kubeapi.NamespaceSystem).DeleteSync(ctx, criticalPodName, metav1.DeleteOptions{}, f.Timeouts.PodDelete) // Log Events logPodEvents(ctx, f) logNodeEvents(ctx, f) diff --git a/test/e2e_node/device_manager_test.go b/test/e2e_node/device_manager_test.go index e96bc04bf0a..558870fe305 100644 --- a/test/e2e_node/device_manager_test.go +++ b/test/e2e_node/device_manager_test.go @@ -256,12 +256,12 @@ var _ = SIGDescribe("Device Manager", framework.WithSerial(), nodefeature.Device "the pod succeeded to start, when it should fail with the admission error") ginkgo.By("removing application pods") - e2epod.NewPodClient(f).DeleteSync(ctx, testPod.Name, metav1.DeleteOptions{}, 2*time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, testPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) }) ginkgo.AfterEach(func(ctx context.Context) { ginkgo.By("Deleting the device plugin pod") - e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, metav1.DeleteOptions{}, time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) ginkgo.By("Deleting the directory and file setup for controlling registration") err := os.RemoveAll(triggerPathDir) @@ -276,7 +276,7 @@ var _ = SIGDescribe("Device Manager", framework.WithSerial(), nodefeature.Device } framework.Logf("Deleting pod: %s", p.Name) - e2epod.NewPodClient(f).DeleteSync(ctx, p.Name, metav1.DeleteOptions{}, 2*time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, p.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) } ginkgo.By("Waiting for devices to become unavailable on the local node") diff --git a/test/e2e_node/device_plugin_test.go b/test/e2e_node/device_plugin_test.go index 985a686d151..b19af3a8e65 100644 --- a/test/e2e_node/device_plugin_test.go +++ b/test/e2e_node/device_plugin_test.go @@ -197,7 +197,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { ginkgo.AfterEach(func(ctx context.Context) { ginkgo.By("Deleting the device plugin pod") - e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, metav1.DeleteOptions{}, time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) ginkgo.By("Deleting any Pods created by the test") l, err := e2epod.NewPodClient(f).List(ctx, metav1.ListOptions{}) @@ -208,7 +208,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { } framework.Logf("Deleting pod: %s", p.Name) - e2epod.NewPodClient(f).DeleteSync(ctx, p.Name, metav1.DeleteOptions{}, 2*time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, p.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) } restartKubelet(ctx, true) @@ -492,7 +492,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { gomega.Expect(e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace)).To(gomega.Succeed()) ginkgo.By("Deleting the device plugin") - e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, metav1.DeleteOptions{}, time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) waitForContainerRemoval(ctx, devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace) gomega.Eventually(getNodeResourceValues, devicePluginGracefulTimeout, f.Timeouts.Poll).WithContext(ctx).WithArguments(SampleDeviceResourceName).Should(gomega.Equal(ResourceValue{Allocatable: 0, Capacity: int(expectedSampleDevsAmount)})) @@ -539,7 +539,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gp, } - e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, deleteOptions, time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, deleteOptions, f.Timeouts.PodDelete) waitForContainerRemoval(ctx, devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace) ginkgo.By("Recreating the plugin pod") @@ -616,7 +616,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gp, } - e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, deleteOptions, time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, deleteOptions, f.Timeouts.PodDelete) waitForContainerRemoval(ctx, devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace) ginkgo.By("Recreating the plugin pod") @@ -901,7 +901,7 @@ func testDevicePluginNodeReboot(f *framework.Framework, pluginSockDir string) { ginkgo.AfterEach(func(ctx context.Context) { ginkgo.By("Deleting the device plugin pod") - e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, metav1.DeleteOptions{}, time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) ginkgo.By("Deleting any Pods created by the test") l, err := e2epod.NewPodClient(f).List(ctx, metav1.ListOptions{}) @@ -915,7 +915,7 @@ func testDevicePluginNodeReboot(f *framework.Framework, pluginSockDir string) { e2epod.NewPodClient(f).RemoveFinalizer(context.TODO(), p.Name, testFinalizer) framework.Logf("Deleting pod: %s", p.Name) - e2epod.NewPodClient(f).DeleteSync(ctx, p.Name, metav1.DeleteOptions{}, 2*time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, p.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) } err = os.Remove(triggerPathDir) diff --git a/test/e2e_node/garbage_collector_test.go b/test/e2e_node/garbage_collector_test.go index 4c37332af62..f3b47c784a8 100644 --- a/test/e2e_node/garbage_collector_test.go +++ b/test/e2e_node/garbage_collector_test.go @@ -251,7 +251,7 @@ func containerGCTest(f *framework.Framework, test testRun) { ginkgo.AfterEach(func(ctx context.Context) { for _, pod := range test.testPods { ginkgo.By(fmt.Sprintf("Deleting Pod %v", pod.podName)) - e2epod.NewPodClient(f).DeleteSync(ctx, pod.podName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).DeleteSync(ctx, pod.podName, metav1.DeleteOptions{}, f.Timeouts.PodDelete) } ginkgo.By("Making sure all containers get cleaned up") diff --git a/test/e2e_node/hugepages_test.go b/test/e2e_node/hugepages_test.go index c45cef94436..4330f026cd1 100644 --- a/test/e2e_node/hugepages_test.go +++ b/test/e2e_node/hugepages_test.go @@ -365,7 +365,7 @@ var _ = SIGDescribe("HugePages", framework.WithSerial(), feature.HugePages, func // we should use JustAfterEach because framework will teardown the client under the AfterEach method ginkgo.JustAfterEach(func(ctx context.Context) { ginkgo.By(fmt.Sprintf("deleting test pod %s", testpod.Name)) - e2epod.NewPodClient(f).DeleteSync(ctx, testpod.Name, metav1.DeleteOptions{}, 2*time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, testpod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) releaseHugepages(ctx) diff --git a/test/e2e_node/image_gc_test.go b/test/e2e_node/image_gc_test.go index a6e23388b16..a4d7145a0ac 100644 --- a/test/e2e_node/image_gc_test.go +++ b/test/e2e_node/image_gc_test.go @@ -74,7 +74,7 @@ var _ = SIGDescribe("ImageGarbageCollect", framework.WithSerial(), nodefeature.G allImages, err := is.ListImages(context.Background(), &runtimeapi.ImageFilter{}) framework.ExpectNoError(err) - e2epod.NewPodClient(f).DeleteSync(ctx, pod.ObjectMeta.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).DeleteSync(ctx, pod.ObjectMeta.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) // Even though the image gc max timing is less, we are bound by the kubelet's // ImageGCPeriod, which is hardcoded to 5 minutes. @@ -94,7 +94,7 @@ var _ = SIGDescribe("ImageGarbageCollect", framework.WithSerial(), nodefeature.G allImages, err := is.ListImages(context.Background(), &runtimeapi.ImageFilter{}) framework.ExpectNoError(err) - e2epod.NewPodClient(f).DeleteSync(ctx, pod.ObjectMeta.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).DeleteSync(ctx, pod.ObjectMeta.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) restartKubelet(ctx, true) diff --git a/test/e2e_node/memory_manager_test.go b/test/e2e_node/memory_manager_test.go index b69b08d2446..1ff4688d59c 100644 --- a/test/e2e_node/memory_manager_test.go +++ b/test/e2e_node/memory_manager_test.go @@ -350,7 +350,7 @@ var _ = SIGDescribe("Memory Manager", framework.WithDisruptive(), framework.With ginkgo.JustAfterEach(func(ctx context.Context) { // delete the test pod if testPod != nil && testPod.Name != "" { - e2epod.NewPodClient(f).DeleteSync(ctx, testPod.Name, metav1.DeleteOptions{}, 2*time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, testPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) } // release hugepages @@ -555,7 +555,7 @@ var _ = SIGDescribe("Memory Manager", framework.WithDisruptive(), framework.With ginkgo.JustAfterEach(func(ctx context.Context) { // delete the test pod 2 if testPod2.Name != "" { - e2epod.NewPodClient(f).DeleteSync(ctx, testPod2.Name, metav1.DeleteOptions{}, 2*time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, testPod2.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) } }) }) @@ -634,7 +634,7 @@ var _ = SIGDescribe("Memory Manager", framework.WithDisruptive(), framework.With ginkgo.JustAfterEach(func(ctx context.Context) { for _, workloadPod := range workloadPods { if workloadPod.Name != "" { - e2epod.NewPodClient(f).DeleteSync(ctx, workloadPod.Name, metav1.DeleteOptions{}, 2*time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, workloadPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) } } }) diff --git a/test/e2e_node/node_perf_test.go b/test/e2e_node/node_perf_test.go index 08a02b4ff3d..d2a5cab49c5 100644 --- a/test/e2e_node/node_perf_test.go +++ b/test/e2e_node/node_perf_test.go @@ -94,7 +94,7 @@ var _ = SIGDescribe("Node Performance Testing", framework.WithSerial(), framewor delOpts := metav1.DeleteOptions{ GracePeriodSeconds: &gp, } - e2epod.NewPodClient(f).DeleteSync(ctx, pod.Name, delOpts, e2epod.DefaultPodDeletionTimeout) + e2epod.NewPodClient(f).DeleteSync(ctx, pod.Name, delOpts, f.Timeouts.PodDelete) // We are going to give some more time for the CPU manager to do any clean // up it needs to do now that the pod has been deleted. Otherwise we may diff --git a/test/e2e_node/pod_hostnamefqdn_test.go b/test/e2e_node/pod_hostnamefqdn_test.go index 4d6c1dbd247..51b9bf51bfc 100644 --- a/test/e2e_node/pod_hostnamefqdn_test.go +++ b/test/e2e_node/pod_hostnamefqdn_test.go @@ -175,7 +175,7 @@ var _ = SIGDescribe("Hostname of Pod", framework.WithNodeConformance(), func() { // Create Pod launchedPod := e2epod.NewPodClient(f).Create(ctx, pod) // Ensure we delete pod - ginkgo.DeferCleanup(e2epod.NewPodClient(f).DeleteSync, launchedPod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) + ginkgo.DeferCleanup(e2epod.NewPodClient(f).DeleteSync, launchedPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) // Pod should remain in the pending state generating events with reason FailedCreatePodSandBox // Expected Message Error Event diff --git a/test/e2e_node/podresources_test.go b/test/e2e_node/podresources_test.go index f61ae71b4b9..63a37b1b1be 100644 --- a/test/e2e_node/podresources_test.go +++ b/test/e2e_node/podresources_test.go @@ -1034,7 +1034,7 @@ var _ = SIGDescribe("POD Resources", framework.WithSerial(), feature.PodResource pod := makePodResourcesTestPod(pd) pod.Spec.Containers[0].Command = []string{"sh", "-c", "/bin/true"} pod = e2epod.NewPodClient(f).Create(ctx, pod) - defer e2epod.NewPodClient(f).DeleteSync(ctx, pod.Name, metav1.DeleteOptions{}, time.Minute) + defer e2epod.NewPodClient(f).DeleteSync(ctx, pod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) err := e2epod.WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "Pod Succeeded", time.Minute*2, testutils.PodSucceeded) framework.ExpectNoError(err) endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket) diff --git a/test/e2e_node/resource_collector.go b/test/e2e_node/resource_collector.go index f7576277a75..bf38c7ec137 100644 --- a/test/e2e_node/resource_collector.go +++ b/test/e2e_node/resource_collector.go @@ -34,7 +34,6 @@ import ( cadvisorapiv2 "github.com/google/cadvisor/info/v2" "github.com/opencontainers/runc/libcontainer/cgroups" v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" @@ -373,12 +372,7 @@ func deletePodsSync(ctx context.Context, f *framework.Framework, pods []*v1.Pod) defer ginkgo.GinkgoRecover() defer wg.Done() - err := e2epod.NewPodClient(f).Delete(ctx, pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30)) - if apierrors.IsNotFound(err) { - framework.Failf("Unexpected error trying to delete pod %s: %v", pod.Name, err) - } - - framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, pod.ObjectMeta.Name, f.Namespace.Name, 10*time.Minute)) + e2epod.NewPodClient(f).DeleteSync(ctx, pod.ObjectMeta.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) }() } wg.Wait() diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index ff91b76249b..d6d37fc650d 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -181,7 +181,7 @@ func addAfterEachForCleaningUpPods(f *framework.Framework) { continue } framework.Logf("Deleting pod: %s", p.Name) - e2epod.NewPodClient(f).DeleteSync(ctx, p.Name, metav1.DeleteOptions{}, 2*time.Minute) + e2epod.NewPodClient(f).DeleteSync(ctx, p.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete) } }) }