e2e: node: podresources: fix restart wait

Fix the waiting logic in the e2e test loop to wait
for resources to be reported again instead of making logic on the
timestamp. The idea is that waiting for resource availability
is the canonical way clients should observe the desired state,
and it should also be more robust than comparing timestamps,
especially on CI environments.

Signed-off-by: Francesco Romani <fromani@redhat.com>
This commit is contained in:
Francesco Romani 2023-02-22 13:50:31 +01:00
parent 92e00203e0
commit 00b41334bf

View File

@ -806,23 +806,13 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
expectPodResources(ctx, 1, cli, []podDesc{desc})
restartTime := time.Now()
ginkgo.By("Restarting Kubelet")
restartKubelet(true)
// we need to wait for the node to be reported ready before we can safely query
// the podresources endpoint again. Otherwise we will have false negatives.
ginkgo.By("Wait for node to be ready")
gomega.Eventually(ctx, func() bool {
node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
for _, cond := range node.Status.Conditions {
if cond.Type == v1.NodeReady && cond.Status == v1.ConditionTrue && cond.LastHeartbeatTime.After(restartTime) {
return true
}
}
return false
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
waitForTopologyUnawareResources(ctx, f)
expectPodResources(ctx, 1, cli, []podDesc{desc})
tpd.deletePodsForTest(ctx, f)