From 00b41334bf3f0c716c3a5080ffec0352d282db8a Mon Sep 17 00:00:00 2001 From: Francesco Romani Date: Wed, 22 Feb 2023 13:50:31 +0100 Subject: [PATCH] e2e: node: podresources: fix restart wait Fix the waiting logic in the e2e test loop to wait for resources to be reported again instead of making logic on the timestamp. The idea is that waiting for resource availability is the canonical way clients should observe the desired state, and it should also be more robust than comparing timestamps, especially on CI environments. Signed-off-by: Francesco Romani --- test/e2e_node/podresources_test.go | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/test/e2e_node/podresources_test.go b/test/e2e_node/podresources_test.go index 11fd14dc88e..cba77b8234a 100644 --- a/test/e2e_node/podresources_test.go +++ b/test/e2e_node/podresources_test.go @@ -806,23 +806,13 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P expectPodResources(ctx, 1, cli, []podDesc{desc}) - restartTime := time.Now() ginkgo.By("Restarting Kubelet") restartKubelet(true) // we need to wait for the node to be reported ready before we can safely query // the podresources endpoint again. Otherwise we will have false negatives. ginkgo.By("Wait for node to be ready") - gomega.Eventually(ctx, func() bool { - node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{}) - framework.ExpectNoError(err) - for _, cond := range node.Status.Conditions { - if cond.Type == v1.NodeReady && cond.Status == v1.ConditionTrue && cond.LastHeartbeatTime.After(restartTime) { - return true - } - } - return false - }, 5*time.Minute, framework.Poll).Should(gomega.BeTrue()) + waitForTopologyUnawareResources(ctx, f) expectPodResources(ctx, 1, cli, []podDesc{desc}) tpd.deletePodsForTest(ctx, f)