e2e: podresources: wait for local node ready again

Let's wait for the local node (aka the kubelet)
to be ready before to query podresources again,
to avoid false negatives.

Co-authored-by: Artyom Lukianov <alukiano@redhat.com>
Signed-off-by: Francesco Romani <fromani@redhat.com>
This commit is contained in:
Francesco Romani 2021-11-09 16:14:35 +01:00
parent 14105c09fb
commit bf9bab5bc6

View File

@ -796,9 +796,24 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
expectPodResources(1, cli, []podDesc{desc})
restartTime := time.Now()
ginkgo.By("Restarting Kubelet")
restartKubelet(true)
framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)
// we need to wait for the node to be reported ready before we can safely query
// the podresources endpoint again. Otherwise we will have false negatives.
ginkgo.By("Wait for node to be ready")
gomega.Eventually(func() bool {
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
for _, cond := range node.Status.Conditions {
if cond.Type == v1.NodeReady && cond.Status == v1.ConditionTrue && cond.LastHeartbeatTime.After(restartTime) {
return true
}
}
return false
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
expectPodResources(1, cli, []podDesc{desc})
tpd.deletePodsForTest(f)
})