From 38c56883f1c2e9830b259a361a70ae16d327c5a2 Mon Sep 17 00:00:00 2001 From: Ed Bartosh Date: Tue, 25 May 2021 16:28:31 +0300 Subject: [PATCH] e2e: hugepages: delete test pod after the test Current test assumes that test pod is deleted when the test namespace is deleted. However, namespace deletion is an asynchronous operation. The pod may still be running and allocating hugepages resources when next test case creates another pod that requests the same hugepages resources. This can cause kubelet to fail the test pod with this kind of error: OutOfhugepages-2Mi: Node didn't have enough resource: hugepages-2Mi requested: 6291456, used: 6291456, capacity: 10485760 Explicitly deleting test pod should fix this issue. --- test/e2e_node/hugepages_test.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/e2e_node/hugepages_test.go b/test/e2e_node/hugepages_test.go index d3c54c457dd..e62376d8c38 100644 --- a/test/e2e_node/hugepages_test.go +++ b/test/e2e_node/hugepages_test.go @@ -352,12 +352,15 @@ var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:H pod := getHugepagesTestPod(f, limits, mounts, volumes) - ginkgo.By("by running a guarantee pod that requests hugepages") + ginkgo.By("by running a test pod that requests hugepages") testpod = f.PodClient().CreateSync(pod) }) // we should use JustAfterEach because framework will teardown the client under the AfterEach method ginkgo.JustAfterEach(func() { + ginkgo.By(fmt.Sprintf("deleting test pod %s", testpod.Name)) + f.PodClient().DeleteSync(testpod.Name, metav1.DeleteOptions{}, 2*time.Minute) + releaseHugepages() ginkgo.By("restarting kubelet to pick up pre-allocated hugepages")