mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-18 08:09:58 +00:00
e2e/framework/node: use gomega.Eventually to poll
This commit is contained in:
parent
f3f44f70bf
commit
c916f5e755
@ -22,6 +22,7 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/gomega"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
@ -165,20 +166,16 @@ func WaitForNodeSchedulable(ctx context.Context, c clientset.Interface, name str
|
|||||||
//
|
//
|
||||||
// To ensure the node status is posted by a restarted kubelet process,
|
// To ensure the node status is posted by a restarted kubelet process,
|
||||||
// after should be retrieved by [GetNodeHeartbeatTime] while the kubelet is down.
|
// after should be retrieved by [GetNodeHeartbeatTime] while the kubelet is down.
|
||||||
func WaitForNodeHeartbeatAfter(ctx context.Context, c clientset.Interface, name string, after metav1.Time, timeout time.Duration) bool {
|
func WaitForNodeHeartbeatAfter(ctx context.Context, c clientset.Interface, name string, after metav1.Time, timeout time.Duration) {
|
||||||
framework.Logf("Waiting up to %v for node %s to send a heartbeat after %v", timeout, name, after)
|
framework.Logf("Waiting up to %v for node %s to send a heartbeat after %v", timeout, name, after)
|
||||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
gomega.Eventually(ctx, func() (time.Time, error) {
|
||||||
node, err := c.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{})
|
node, err := c.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Couldn't get node %s", name)
|
framework.Logf("Couldn't get node %s", name)
|
||||||
continue
|
return time.Time{}, err
|
||||||
}
|
}
|
||||||
if GetNodeHeartbeatTime(node).After(after.Time) {
|
return GetNodeHeartbeatTime(node).Time, nil
|
||||||
return true
|
}, timeout, poll).Should(gomega.BeTemporally(">", after.Time), "Node %s didn't send a heartbeat", name)
|
||||||
}
|
|
||||||
}
|
|
||||||
framework.Logf("Node %s didn't send a heartbeat after %v within %v", name, after, timeout)
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckReady waits up to timeout for cluster to has desired size and
|
// CheckReady waits up to timeout for cluster to has desired size and
|
||||||
|
@ -138,9 +138,7 @@ func KubeletCommand(ctx context.Context, kOp KubeletOpt, c clientset.Interface,
|
|||||||
|
|
||||||
runCmd("start")
|
runCmd("start")
|
||||||
// Wait for next heartbeat, which must be sent by the new kubelet process.
|
// Wait for next heartbeat, which must be sent by the new kubelet process.
|
||||||
if ok := e2enode.WaitForNodeHeartbeatAfter(ctx, c, pod.Spec.NodeName, heartbeatTime, NodeStateTimeout); !ok {
|
e2enode.WaitForNodeHeartbeatAfter(ctx, c, pod.Spec.NodeName, heartbeatTime, NodeStateTimeout)
|
||||||
framework.Failf("Node %s failed to send a heartbeat after %v", pod.Spec.NodeName, heartbeatTime)
|
|
||||||
}
|
|
||||||
// Then wait until Node with new process becomes Ready.
|
// Then wait until Node with new process becomes Ready.
|
||||||
if ok := e2enode.WaitForNodeToBeReady(ctx, c, pod.Spec.NodeName, NodeStateTimeout); !ok {
|
if ok := e2enode.WaitForNodeToBeReady(ctx, c, pod.Spec.NodeName, NodeStateTimeout); !ok {
|
||||||
framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
|
framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
|
||||||
|
Loading…
Reference in New Issue
Block a user