From d6c78f853a77f77cf74fe93a8407b425a27d35f0 Mon Sep 17 00:00:00 2001 From: Ed Bartosh Date: Sat, 25 May 2024 00:16:27 +0300 Subject: [PATCH] e2e_node: add deferPodDeletion parameter --- test/e2e_node/dra_test.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/test/e2e_node/dra_test.go b/test/e2e_node/dra_test.go index 4bf4d368c48..a41ae79b4a5 100644 --- a/test/e2e_node/dra_test.go +++ b/test/e2e_node/dra_test.go @@ -97,7 +97,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, ginkgo.It("must process pod created when kubelet is not running", func(ctx context.Context) { // Stop Kubelet startKubelet := stopKubelet() - pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod") + pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod", true) // Pod must be in pending state err := e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "Pending", framework.PodStartShortTimeout, func(pod *v1.Pod) (bool, error) { return pod.Status.Phase == v1.PodPending, nil @@ -113,7 +113,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, ginkgo.It("must keep pod in pending state if NodePrepareResources times out", func(ctx context.Context) { ginkgo.By("set delay for the NodePrepareResources call") kubeletPlugin.Block() - pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod") + pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod", true) ginkgo.By("wait for pod to be in Pending state") err := e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "Pending", framework.PodStartShortTimeout, func(pod *v1.Pod) (bool, error) { @@ -170,7 +170,7 @@ func newKubeletPlugin(ctx context.Context, nodeName string) *testdriver.ExampleP // NOTE: as scheduler and controller manager are not running by the Node e2e, // the objects must contain all required data to be processed correctly by the API server // and placed on the node without involving the scheduler and the DRA controller -func createTestObjects(ctx context.Context, clientSet kubernetes.Interface, nodename, namespace, className, claimName, podName string) *v1.Pod { +func createTestObjects(ctx context.Context, clientSet kubernetes.Interface, nodename, namespace, className, claimName, podName string, deferPodDeletion bool) *v1.Pod { // ResourceClass class := &resourcev1alpha2.ResourceClass{ ObjectMeta: metav1.ObjectMeta{ @@ -231,7 +231,9 @@ func createTestObjects(ctx context.Context, clientSet kubernetes.Interface, node createdPod, err := clientSet.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) - ginkgo.DeferCleanup(clientSet.CoreV1().Pods(namespace).Delete, podName, metav1.DeleteOptions{}) + if deferPodDeletion { + ginkgo.DeferCleanup(clientSet.CoreV1().Pods(namespace).Delete, podName, metav1.DeleteOptions{}) + } // Update claim status: set ReservedFor and AllocationResult // NOTE: This is usually done by the DRA controller