From bf9bab5bc6521967812b6322b06f84b5e12f1497 Mon Sep 17 00:00:00 2001 From: Francesco Romani Date: Tue, 9 Nov 2021 16:14:35 +0100 Subject: [PATCH] e2e: podresources: wait for local node ready again Let's wait for the local node (aka the kubelet) to be ready before to query podresources again, to avoid false negatives. Co-authored-by: Artyom Lukianov Signed-off-by: Francesco Romani --- test/e2e_node/podresources_test.go | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/test/e2e_node/podresources_test.go b/test/e2e_node/podresources_test.go index d75acb18f7a..9a7188905ef 100644 --- a/test/e2e_node/podresources_test.go +++ b/test/e2e_node/podresources_test.go @@ -796,9 +796,24 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P expectPodResources(1, cli, []podDesc{desc}) + restartTime := time.Now() ginkgo.By("Restarting Kubelet") restartKubelet(true) - framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout) + + // we need to wait for the node to be reported ready before we can safely query + // the podresources endpoint again. Otherwise we will have false negatives. + ginkgo.By("Wait for node to be ready") + gomega.Eventually(func() bool { + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + for _, cond := range node.Status.Conditions { + if cond.Type == v1.NodeReady && cond.Status == v1.ConditionTrue && cond.LastHeartbeatTime.After(restartTime) { + return true + } + } + return false + }, 5*time.Minute, framework.Poll).Should(gomega.BeTrue()) + expectPodResources(1, cli, []podDesc{desc}) tpd.deletePodsForTest(f) })