Merge pull request #120406 from wlq1212/cheanup/framework/timeout

e2e_framework:stop using deprecated wait.ErrwaitTimeout
This commit is contained in:
Kubernetes Prow Robot 2023-09-10 21:10:10 -07:00 committed by GitHub
commit cc0a24d2e8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 13 additions and 12 deletions

View File

@ -27,10 +27,11 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
admissionapi "k8s.io/pod-security-admission/api"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
testutils "k8s.io/kubernetes/test/utils"
admissionapi "k8s.io/pod-security-admission/api"
"github.com/google/go-cmp/cmp"
"github.com/onsi/ginkgo/v2"
@ -170,7 +171,7 @@ var _ = SIGDescribe("NodeLease", func() {
return false, fmt.Errorf("node status heartbeat changed in %s (with no other status changes), was waiting for %s", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration)
})
// a timeout is acceptable, since it means we waited 5 minutes and didn't see any unwarranted node status updates
if err != nil && err != wait.ErrWaitTimeout {
if !wait.Interrupted(err) {
framework.ExpectNoError(err, "error waiting for infrequent nodestatus update")
}

View File

@ -524,7 +524,7 @@ func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(ctx context.Context, mi
}
})
// The call above always returns an error, but if it is timeout, it's OK (condition satisfied all the time).
if err == wait.ErrWaitTimeout {
if wait.Interrupted(err) {
framework.Logf("Number of replicas was stable over %v", duration)
return
}

View File

@ -141,7 +141,7 @@ func allNodesReady(ctx context.Context, c clientset.Interface, timeout time.Dura
return len(notReady) <= framework.TestContext.AllowedNotReadyNodes, nil
})
if err != nil && err != wait.ErrWaitTimeout {
if err != nil && !wait.Interrupted(err) {
return err
}

View File

@ -96,7 +96,7 @@ func WaitForTotalHealthy(ctx context.Context, c clientset.Interface, timeout tim
return len(notReady) == 0 && len(missingPodsPerNode) == 0, nil
})
if err != nil && err != wait.ErrWaitTimeout {
if err != nil && !wait.Interrupted(err) {
return err
}

View File

@ -37,7 +37,7 @@ func WaitForReadyReplicaSet(ctx context.Context, c clientset.Interface, ns, name
}
return *(rs.Spec.Replicas) == rs.Status.Replicas && *(rs.Spec.Replicas) == rs.Status.ReadyReplicas, nil
})
if err == wait.ErrWaitTimeout {
if wait.Interrupted(err) {
err = fmt.Errorf("replicaset %q never became ready", name)
}
return err
@ -59,7 +59,7 @@ func WaitForReplicaSetTargetAvailableReplicasWithTimeout(ctx context.Context, c
}
return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.AvailableReplicas == targetReplicaNum, nil
})
if err == wait.ErrWaitTimeout {
if wait.Interrupted(err) {
err = fmt.Errorf("replicaset %q never had desired number of .status.availableReplicas", replicaSet.Name)
}
return err

View File

@ -182,7 +182,7 @@ func waitForPodsGone(ctx context.Context, ps *testutils.PodStore, interval, time
return false, nil
})
if err == wait.ErrWaitTimeout {
if wait.Interrupted(err) {
for _, pod := range pods {
framework.Logf("ERROR: Pod %q still exists. Node: %q", pod.Name, pod.Spec.NodeName)
}
@ -206,7 +206,7 @@ func waitForPodsInactive(ctx context.Context, ps *testutils.PodStore, interval,
return true, nil
})
if err == wait.ErrWaitTimeout {
if wait.Interrupted(err) {
for _, pod := range activePods {
framework.Logf("ERROR: Pod %q running on %q is still active", pod.Name, pod.Spec.NodeName)
}

View File

@ -45,7 +45,7 @@ func TestReachableHTTPWithRetriableErrorCodes(ctx context.Context, host string,
}
if err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, pollfn); err != nil {
if err == wait.ErrWaitTimeout {
if wait.Interrupted(err) {
framework.Failf("Could not reach HTTP service through %v:%v after %v", host, port, timeout)
} else {
framework.Failf("Failed to reach HTTP service through %v:%v: %v", host, port, err)

View File

@ -159,7 +159,7 @@ func WaitForBootstrapTokenSecretNotDisappear(c clientset.Interface, tokenID stri
}
return true, err
})
if err == wait.ErrWaitTimeout {
if wait.Interrupted(err) {
return nil
}
return err

View File

@ -210,7 +210,7 @@ func checkAffinity(ctx context.Context, cs clientset.Interface, execPod *v1.Pod,
return false, nil
}); pollErr != nil {
trackerFulfilled, _ := tracker.checkHostTrace(AffinityConfirmCount)
if pollErr != wait.ErrWaitTimeout {
if !wait.Interrupted(pollErr) {
checkAffinityFailed(tracker, pollErr.Error())
return false
}