mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-20 01:01:22 +00:00
e2e: podresources: wait for local node ready again
Let's wait for the local node (aka the kubelet) to be ready before to query podresources again, to avoid false negatives. Co-authored-by: Artyom Lukianov <alukiano@redhat.com> Signed-off-by: Francesco Romani <fromani@redhat.com>
This commit is contained in:
parent
14105c09fb
commit
bf9bab5bc6
@ -796,9 +796,24 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
|
|||||||
|
|
||||||
expectPodResources(1, cli, []podDesc{desc})
|
expectPodResources(1, cli, []podDesc{desc})
|
||||||
|
|
||||||
|
restartTime := time.Now()
|
||||||
ginkgo.By("Restarting Kubelet")
|
ginkgo.By("Restarting Kubelet")
|
||||||
restartKubelet(true)
|
restartKubelet(true)
|
||||||
framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)
|
|
||||||
|
// we need to wait for the node to be reported ready before we can safely query
|
||||||
|
// the podresources endpoint again. Otherwise we will have false negatives.
|
||||||
|
ginkgo.By("Wait for node to be ready")
|
||||||
|
gomega.Eventually(func() bool {
|
||||||
|
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
for _, cond := range node.Status.Conditions {
|
||||||
|
if cond.Type == v1.NodeReady && cond.Status == v1.ConditionTrue && cond.LastHeartbeatTime.After(restartTime) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||||
|
|
||||||
expectPodResources(1, cli, []podDesc{desc})
|
expectPodResources(1, cli, []podDesc{desc})
|
||||||
tpd.deletePodsForTest(f)
|
tpd.deletePodsForTest(f)
|
||||||
})
|
})
|
||||||
|
Loading…
Reference in New Issue
Block a user