mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 17:30:00 +00:00
e2e_framework:stop using deprecated wait.ErrwaitTimeout
This commit is contained in:
parent
6586296afc
commit
ef235c4eac
@ -27,10 +27,11 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
admissionapi "k8s.io/pod-security-admission/api"
|
||||||
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
admissionapi "k8s.io/pod-security-admission/api"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/onsi/ginkgo/v2"
|
"github.com/onsi/ginkgo/v2"
|
||||||
@ -170,7 +171,7 @@ var _ = SIGDescribe("NodeLease", func() {
|
|||||||
return false, fmt.Errorf("node status heartbeat changed in %s (with no other status changes), was waiting for %s", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration)
|
return false, fmt.Errorf("node status heartbeat changed in %s (with no other status changes), was waiting for %s", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration)
|
||||||
})
|
})
|
||||||
// a timeout is acceptable, since it means we waited 5 minutes and didn't see any unwarranted node status updates
|
// a timeout is acceptable, since it means we waited 5 minutes and didn't see any unwarranted node status updates
|
||||||
if err != nil && err != wait.ErrWaitTimeout {
|
if !wait.Interrupted(err) {
|
||||||
framework.ExpectNoError(err, "error waiting for infrequent nodestatus update")
|
framework.ExpectNoError(err, "error waiting for infrequent nodestatus update")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -524,7 +524,7 @@ func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(ctx context.Context, mi
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
// The call above always returns an error, but if it is timeout, it's OK (condition satisfied all the time).
|
// The call above always returns an error, but if it is timeout, it's OK (condition satisfied all the time).
|
||||||
if err == wait.ErrWaitTimeout {
|
if wait.Interrupted(err) {
|
||||||
framework.Logf("Number of replicas was stable over %v", duration)
|
framework.Logf("Number of replicas was stable over %v", duration)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -141,7 +141,7 @@ func allNodesReady(ctx context.Context, c clientset.Interface, timeout time.Dura
|
|||||||
return len(notReady) <= framework.TestContext.AllowedNotReadyNodes, nil
|
return len(notReady) <= framework.TestContext.AllowedNotReadyNodes, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil && err != wait.ErrWaitTimeout {
|
if err != nil && !wait.Interrupted(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,7 +96,7 @@ func WaitForTotalHealthy(ctx context.Context, c clientset.Interface, timeout tim
|
|||||||
return len(notReady) == 0 && len(missingPodsPerNode) == 0, nil
|
return len(notReady) == 0 && len(missingPodsPerNode) == 0, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil && err != wait.ErrWaitTimeout {
|
if err != nil && !wait.Interrupted(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ func WaitForReadyReplicaSet(ctx context.Context, c clientset.Interface, ns, name
|
|||||||
}
|
}
|
||||||
return *(rs.Spec.Replicas) == rs.Status.Replicas && *(rs.Spec.Replicas) == rs.Status.ReadyReplicas, nil
|
return *(rs.Spec.Replicas) == rs.Status.Replicas && *(rs.Spec.Replicas) == rs.Status.ReadyReplicas, nil
|
||||||
})
|
})
|
||||||
if err == wait.ErrWaitTimeout {
|
if wait.Interrupted(err) {
|
||||||
err = fmt.Errorf("replicaset %q never became ready", name)
|
err = fmt.Errorf("replicaset %q never became ready", name)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
@ -59,7 +59,7 @@ func WaitForReplicaSetTargetAvailableReplicasWithTimeout(ctx context.Context, c
|
|||||||
}
|
}
|
||||||
return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.AvailableReplicas == targetReplicaNum, nil
|
return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.AvailableReplicas == targetReplicaNum, nil
|
||||||
})
|
})
|
||||||
if err == wait.ErrWaitTimeout {
|
if wait.Interrupted(err) {
|
||||||
err = fmt.Errorf("replicaset %q never had desired number of .status.availableReplicas", replicaSet.Name)
|
err = fmt.Errorf("replicaset %q never had desired number of .status.availableReplicas", replicaSet.Name)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
@ -182,7 +182,7 @@ func waitForPodsGone(ctx context.Context, ps *testutils.PodStore, interval, time
|
|||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
if err == wait.ErrWaitTimeout {
|
if wait.Interrupted(err) {
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
framework.Logf("ERROR: Pod %q still exists. Node: %q", pod.Name, pod.Spec.NodeName)
|
framework.Logf("ERROR: Pod %q still exists. Node: %q", pod.Name, pod.Spec.NodeName)
|
||||||
}
|
}
|
||||||
@ -206,7 +206,7 @@ func waitForPodsInactive(ctx context.Context, ps *testutils.PodStore, interval,
|
|||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
if err == wait.ErrWaitTimeout {
|
if wait.Interrupted(err) {
|
||||||
for _, pod := range activePods {
|
for _, pod := range activePods {
|
||||||
framework.Logf("ERROR: Pod %q running on %q is still active", pod.Name, pod.Spec.NodeName)
|
framework.Logf("ERROR: Pod %q running on %q is still active", pod.Name, pod.Spec.NodeName)
|
||||||
}
|
}
|
||||||
|
@ -45,7 +45,7 @@ func TestReachableHTTPWithRetriableErrorCodes(ctx context.Context, host string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, pollfn); err != nil {
|
if err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, pollfn); err != nil {
|
||||||
if err == wait.ErrWaitTimeout {
|
if wait.Interrupted(err) {
|
||||||
framework.Failf("Could not reach HTTP service through %v:%v after %v", host, port, timeout)
|
framework.Failf("Could not reach HTTP service through %v:%v after %v", host, port, timeout)
|
||||||
} else {
|
} else {
|
||||||
framework.Failf("Failed to reach HTTP service through %v:%v: %v", host, port, err)
|
framework.Failf("Failed to reach HTTP service through %v:%v: %v", host, port, err)
|
||||||
|
@ -159,7 +159,7 @@ func WaitForBootstrapTokenSecretNotDisappear(c clientset.Interface, tokenID stri
|
|||||||
}
|
}
|
||||||
return true, err
|
return true, err
|
||||||
})
|
})
|
||||||
if err == wait.ErrWaitTimeout {
|
if wait.Interrupted(err) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
@ -210,7 +210,7 @@ func checkAffinity(ctx context.Context, cs clientset.Interface, execPod *v1.Pod,
|
|||||||
return false, nil
|
return false, nil
|
||||||
}); pollErr != nil {
|
}); pollErr != nil {
|
||||||
trackerFulfilled, _ := tracker.checkHostTrace(AffinityConfirmCount)
|
trackerFulfilled, _ := tracker.checkHostTrace(AffinityConfirmCount)
|
||||||
if pollErr != wait.ErrWaitTimeout {
|
if !wait.Interrupted(pollErr) {
|
||||||
checkAffinityFailed(tracker, pollErr.Error())
|
checkAffinityFailed(tracker, pollErr.Error())
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user