mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 05:57:25 +00:00
Merge pull request #120406 from wlq1212/cheanup/framework/timeout
e2e_framework:stop using deprecated wait.ErrwaitTimeout
This commit is contained in:
commit
cc0a24d2e8
@ -27,10 +27,11 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
@ -170,7 +171,7 @@ var _ = SIGDescribe("NodeLease", func() {
|
||||
return false, fmt.Errorf("node status heartbeat changed in %s (with no other status changes), was waiting for %s", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration)
|
||||
})
|
||||
// a timeout is acceptable, since it means we waited 5 minutes and didn't see any unwarranted node status updates
|
||||
if err != nil && err != wait.ErrWaitTimeout {
|
||||
if !wait.Interrupted(err) {
|
||||
framework.ExpectNoError(err, "error waiting for infrequent nodestatus update")
|
||||
}
|
||||
|
||||
|
@ -524,7 +524,7 @@ func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(ctx context.Context, mi
|
||||
}
|
||||
})
|
||||
// The call above always returns an error, but if it is timeout, it's OK (condition satisfied all the time).
|
||||
if err == wait.ErrWaitTimeout {
|
||||
if wait.Interrupted(err) {
|
||||
framework.Logf("Number of replicas was stable over %v", duration)
|
||||
return
|
||||
}
|
||||
|
@ -141,7 +141,7 @@ func allNodesReady(ctx context.Context, c clientset.Interface, timeout time.Dura
|
||||
return len(notReady) <= framework.TestContext.AllowedNotReadyNodes, nil
|
||||
})
|
||||
|
||||
if err != nil && err != wait.ErrWaitTimeout {
|
||||
if err != nil && !wait.Interrupted(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -96,7 +96,7 @@ func WaitForTotalHealthy(ctx context.Context, c clientset.Interface, timeout tim
|
||||
return len(notReady) == 0 && len(missingPodsPerNode) == 0, nil
|
||||
})
|
||||
|
||||
if err != nil && err != wait.ErrWaitTimeout {
|
||||
if err != nil && !wait.Interrupted(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,7 @@ func WaitForReadyReplicaSet(ctx context.Context, c clientset.Interface, ns, name
|
||||
}
|
||||
return *(rs.Spec.Replicas) == rs.Status.Replicas && *(rs.Spec.Replicas) == rs.Status.ReadyReplicas, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
if wait.Interrupted(err) {
|
||||
err = fmt.Errorf("replicaset %q never became ready", name)
|
||||
}
|
||||
return err
|
||||
@ -59,7 +59,7 @@ func WaitForReplicaSetTargetAvailableReplicasWithTimeout(ctx context.Context, c
|
||||
}
|
||||
return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.AvailableReplicas == targetReplicaNum, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
if wait.Interrupted(err) {
|
||||
err = fmt.Errorf("replicaset %q never had desired number of .status.availableReplicas", replicaSet.Name)
|
||||
}
|
||||
return err
|
||||
|
@ -182,7 +182,7 @@ func waitForPodsGone(ctx context.Context, ps *testutils.PodStore, interval, time
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
if wait.Interrupted(err) {
|
||||
for _, pod := range pods {
|
||||
framework.Logf("ERROR: Pod %q still exists. Node: %q", pod.Name, pod.Spec.NodeName)
|
||||
}
|
||||
@ -206,7 +206,7 @@ func waitForPodsInactive(ctx context.Context, ps *testutils.PodStore, interval,
|
||||
return true, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
if wait.Interrupted(err) {
|
||||
for _, pod := range activePods {
|
||||
framework.Logf("ERROR: Pod %q running on %q is still active", pod.Name, pod.Spec.NodeName)
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ func TestReachableHTTPWithRetriableErrorCodes(ctx context.Context, host string,
|
||||
}
|
||||
|
||||
if err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, pollfn); err != nil {
|
||||
if err == wait.ErrWaitTimeout {
|
||||
if wait.Interrupted(err) {
|
||||
framework.Failf("Could not reach HTTP service through %v:%v after %v", host, port, timeout)
|
||||
} else {
|
||||
framework.Failf("Failed to reach HTTP service through %v:%v: %v", host, port, err)
|
||||
|
@ -159,7 +159,7 @@ func WaitForBootstrapTokenSecretNotDisappear(c clientset.Interface, tokenID stri
|
||||
}
|
||||
return true, err
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
if wait.Interrupted(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
|
@ -210,7 +210,7 @@ func checkAffinity(ctx context.Context, cs clientset.Interface, execPod *v1.Pod,
|
||||
return false, nil
|
||||
}); pollErr != nil {
|
||||
trackerFulfilled, _ := tracker.checkHostTrace(AffinityConfirmCount)
|
||||
if pollErr != wait.ErrWaitTimeout {
|
||||
if !wait.Interrupted(pollErr) {
|
||||
checkAffinityFailed(tracker, pollErr.Error())
|
||||
return false
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user