drop deprecated PollWithContext and adopt PollUntilContextTimeout instead

Signed-off-by: yintong.huang <yintong.huang@daocloud.io>
This commit is contained in:
yintong.huang 2024-06-21 19:23:31 +08:00
parent 5ec31e84d6
commit 2db1b321e0
27 changed files with 62 additions and 62 deletions

View File

@ -132,7 +132,7 @@ func StartTestServer(ctx context.Context, customFlags []string) (result TestServ
if err != nil {
return result, fmt.Errorf("failed to create a client: %v", err)
}
err = wait.PollWithContext(ctx, 100*time.Millisecond, 30*time.Second, func(ctx context.Context) (bool, error) {
err = wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
select {
case <-ctx.Done():
return false, ctx.Err()

View File

@ -496,7 +496,7 @@ func TestInformerList(t *testing.T) {
require.NoError(t, tracker.Add(object1))
require.NoError(t, tracker.Add(object2))
require.NoError(t, wait.PollWithContext(testContext, 100*time.Millisecond, 500*time.Millisecond, func(ctx context.Context) (done bool, err error) {
require.NoError(t, wait.PollUntilContextTimeout(testContext, 100*time.Millisecond, 500*time.Millisecond, false, func(ctx context.Context) (done bool, err error) {
return myController.Informer().LastSyncResourceVersion() == object2.GetResourceVersion(), nil
}))
@ -508,7 +508,7 @@ func TestInformerList(t *testing.T) {
require.NoError(t, tracker.Delete(fakeGVR, object2.GetNamespace(), object2.GetName()))
require.NoError(t, tracker.Add(object3))
require.NoError(t, wait.PollWithContext(testContext, 100*time.Millisecond, 500*time.Millisecond, func(ctx context.Context) (done bool, err error) {
require.NoError(t, wait.PollUntilContextTimeout(testContext, 100*time.Millisecond, 500*time.Millisecond, false, func(ctx context.Context) (done bool, err error) {
return myController.Informer().LastSyncResourceVersion() == object3.GetResourceVersion(), nil
}))
@ -519,7 +519,7 @@ func TestInformerList(t *testing.T) {
require.NoError(t, tracker.Add(namespacedObject1))
require.NoError(t, tracker.Add(namespacedObject2))
require.NoError(t, wait.PollWithContext(testContext, 100*time.Millisecond, 500*time.Millisecond, func(ctx context.Context) (done bool, err error) {
require.NoError(t, wait.PollUntilContextTimeout(testContext, 100*time.Millisecond, 500*time.Millisecond, false, func(ctx context.Context) (done bool, err error) {
return myController.Informer().LastSyncResourceVersion() == namespacedObject2.GetResourceVersion(), nil
}))
values, err = myController.Informer().Namespaced(namespacedObject1.GetNamespace()).List(labels.Everything())

View File

@ -748,7 +748,7 @@ func pollTimed(ctx context.Context, interval, timeout time.Duration, condition w
elapsed := time.Since(start)
framework.Logf(msg, elapsed)
}(time.Now(), msg)
return wait.PollWithContext(ctx, interval, timeout, condition)
return wait.PollUntilContextTimeout(ctx, interval, timeout, false, condition)
}
func validateErrorWithDebugInfo(ctx context.Context, f *framework.Framework, err error, pods *v1.PodList, msg string, fields ...interface{}) {

View File

@ -118,7 +118,7 @@ func checkExistingRCRecovers(ctx context.Context, f *framework.Framework) {
rcSelector := labels.Set{"name": "baz"}.AsSelector()
ginkgo.By("deleting pods from existing replication controller")
framework.ExpectNoError(wait.PollWithContext(ctx, time.Millisecond*500, time.Second*60, func(ctx context.Context) (bool, error) {
framework.ExpectNoError(wait.PollUntilContextTimeout(ctx, time.Millisecond*500, time.Second*60, false, func(ctx context.Context) (bool, error) {
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
pods, err := podClient.List(ctx, options)
if err != nil {
@ -137,7 +137,7 @@ func checkExistingRCRecovers(ctx context.Context, f *framework.Framework) {
}))
ginkgo.By("waiting for replication controller to recover")
framework.ExpectNoError(wait.PollWithContext(ctx, time.Millisecond*500, time.Second*60, func(ctx context.Context) (bool, error) {
framework.ExpectNoError(wait.PollUntilContextTimeout(ctx, time.Millisecond*500, time.Second*60, false, func(ctx context.Context) (bool, error) {
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
pods, err := podClient.List(ctx, options)
framework.ExpectNoError(err, "failed to list pods in namespace: %s, that match label selector: %s", f.Namespace.Name, rcSelector.String())

View File

@ -832,7 +832,7 @@ func createFlowSchema(ctx context.Context, f *framework.Framework, flowSchemaNam
// by checking: (1) the dangling priority level reference condition in the flow
// schema status, and (2) metrics. The function times out after 30 seconds.
func waitForSteadyState(ctx context.Context, f *framework.Framework, flowSchemaName string, priorityLevelName string) {
framework.ExpectNoError(wait.PollWithContext(ctx, time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
framework.ExpectNoError(wait.PollUntilContextTimeout(ctx, time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) {
fs, err := f.ClientSet.FlowcontrolV1().FlowSchemas().Get(ctx, flowSchemaName, metav1.GetOptions{})
if err != nil {
return false, err

View File

@ -357,7 +357,7 @@ var _ = SIGDescribe("Garbage collector", func() {
}
ginkgo.By("wait for all pods to be garbage collected")
// wait for the RCs and Pods to reach the expected numbers.
if err := wait.PollWithContext(ctx, 5*time.Second, (60*time.Second)+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, (60*time.Second)+gcInformerResyncRetryTimeout, false, func(ctx context.Context) (bool, error) {
objects := map[string]int{"ReplicationControllers": 0, "Pods": 0}
return verifyRemainingObjects(ctx, f, objects)
}); err != nil {
@ -406,7 +406,7 @@ var _ = SIGDescribe("Garbage collector", func() {
// actual qps is less than 5. Also, the e2e tests are running in
// parallel, the GC controller might get distracted by other tests.
// According to the test logs, 120s is enough time.
if err := wait.PollWithContext(ctx, 5*time.Second, 120*time.Second+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 120*time.Second+gcInformerResyncRetryTimeout, false, func(ctx context.Context) (bool, error) {
rcs, err := rcClient.List(ctx, metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list rcs: %w", err)
@ -663,7 +663,7 @@ var _ = SIGDescribe("Garbage collector", func() {
// owner deletion, but in practice there can be a long delay between owner
// deletion and dependent deletion processing. For now, increase the timeout
// and investigate the processing delay.
if err := wait.PollWithContext(ctx, 1*time.Second, 30*time.Second+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 30*time.Second+gcInformerResyncRetryTimeout, false, func(ctx context.Context) (bool, error) {
_, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{})
if err == nil {
pods, _ := podClient.List(ctx, metav1.ListOptions{})
@ -755,7 +755,7 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("wait for the rc to be deleted")
// TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient.
// Tracked at https://github.com/kubernetes/kubernetes/issues/50046.
if err := wait.PollWithContext(ctx, 5*time.Second, 90*time.Second, func(ctx context.Context) (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 90*time.Second, false, func(ctx context.Context) (bool, error) {
_, err := rcClient.Get(ctx, rc1.Name, metav1.GetOptions{})
if err == nil {
pods, _ := podClient.List(ctx, metav1.ListOptions{})
@ -855,7 +855,7 @@ var _ = SIGDescribe("Garbage collector", func() {
var err2 error
// TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient.
// Tracked at https://github.com/kubernetes/kubernetes/issues/50046.
if err := wait.PollWithContext(ctx, 5*time.Second, 90*time.Second+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 90*time.Second+gcInformerResyncRetryTimeout, false, func(ctx context.Context) (bool, error) {
pods, err2 = podClient.List(ctx, metav1.ListOptions{})
if err2 != nil {
return false, fmt.Errorf("failed to list pods: %w", err)
@ -985,7 +985,7 @@ var _ = SIGDescribe("Garbage collector", func() {
// Ensure the dependent is deleted.
var lastDependent *unstructured.Unstructured
var err2 error
if err := wait.PollWithContext(ctx, 5*time.Second, 60*time.Second, func(ctx context.Context) (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 60*time.Second, false, func(ctx context.Context) (bool, error) {
lastDependent, err2 = resourceClient.Get(ctx, dependentName, metav1.GetOptions{})
return apierrors.IsNotFound(err2), nil
}); err != nil {
@ -1088,7 +1088,7 @@ var _ = SIGDescribe("Garbage collector", func() {
}
ginkgo.By("wait for the owner to be deleted")
if err := wait.PollWithContext(ctx, 5*time.Second, 120*time.Second, func(ctx context.Context) (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 120*time.Second, false, func(ctx context.Context) (bool, error) {
_, err = resourceClient.Get(ctx, ownerName, metav1.GetOptions{})
if err == nil {
return false, nil
@ -1150,7 +1150,7 @@ func waitForReplicas(ctx context.Context, rc *v1.ReplicationController, rcClient
lastObservedRC *v1.ReplicationController
err error
)
if err := wait.PollWithContext(ctx, framework.Poll, replicaSyncTimeout, func(ctx context.Context) (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, framework.Poll, replicaSyncTimeout, false, func(ctx context.Context) (bool, error) {
lastObservedRC, err = rcClient.Get(ctx, rc.Name, metav1.GetOptions{})
if err != nil {
return false, err

View File

@ -38,7 +38,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
utilrand "k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/wait"
watch "k8s.io/apimachinery/pkg/watch"
"k8s.io/apimachinery/pkg/watch"
quota "k8s.io/apiserver/pkg/quota/v1"
clientset "k8s.io/client-go/kubernetes"
clientscheme "k8s.io/client-go/kubernetes/scheme"
@ -165,7 +165,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
found, unchanged := 0, 0
// On contended servers the service account controller can slow down, leading to the count changing during a run.
// Wait up to 5s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
err := wait.PollWithContext(ctx, 1*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) {
secrets, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err)
if len(secrets.Items) == found {
@ -331,7 +331,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
found, unchanged := 0, 0
// On contended servers the service account controller can slow down, leading to the count changing during a run.
// Wait up to 15s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
err := wait.PollWithContext(ctx, 1*time.Second, time.Minute, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, 1*time.Second, time.Minute, false, func(ctx context.Context) (bool, error) {
configmaps, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err)
if len(configmaps.Items) == found {
@ -2121,7 +2121,7 @@ func deleteResourceQuota(ctx context.Context, c clientset.Interface, namespace,
// Wait up to 5s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
func countResourceQuota(ctx context.Context, c clientset.Interface, namespace string) (int, error) {
found, unchanged := 0, 0
return found, wait.PollWithContext(ctx, 1*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
return found, wait.PollUntilContextTimeout(ctx, 1*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) {
resourceQuotas, err := c.CoreV1().ResourceQuotas(namespace).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err)
if len(resourceQuotas.Items) == found {
@ -2137,7 +2137,7 @@ func countResourceQuota(ctx context.Context, c clientset.Interface, namespace st
// wait for resource quota status to show the expected used resources value
func waitForResourceQuota(ctx context.Context, c clientset.Interface, ns, quotaName string, used v1.ResourceList) error {
return wait.PollWithContext(ctx, framework.Poll, resourceQuotaTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, resourceQuotaTimeout, false, func(ctx context.Context) (bool, error) {
resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(ctx, quotaName, metav1.GetOptions{})
if err != nil {
return false, err
@ -2160,7 +2160,7 @@ func waitForResourceQuota(ctx context.Context, c clientset.Interface, ns, quotaN
// updateResourceQuotaUntilUsageAppears updates the resource quota object until the usage is populated
// for the specific resource name.
func updateResourceQuotaUntilUsageAppears(ctx context.Context, c clientset.Interface, ns, quotaName string, resourceName v1.ResourceName) error {
return wait.PollWithContext(ctx, framework.Poll, resourceQuotaTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, resourceQuotaTimeout, false, func(ctx context.Context) (bool, error) {
resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(ctx, quotaName, metav1.GetOptions{})
if err != nil {
return false, err

View File

@ -622,7 +622,7 @@ func deleteCronJob(ctx context.Context, c clientset.Interface, ns, name string)
// Wait for at least given amount of active jobs.
func waitForActiveJobs(ctx context.Context, c clientset.Interface, ns, cronJobName string, active int) error {
return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, cronJobTimeout, false, func(ctx context.Context) (bool, error) {
curr, err := getCronJob(ctx, c, ns, cronJobName)
if err != nil {
return false, err
@ -633,7 +633,7 @@ func waitForActiveJobs(ctx context.Context, c clientset.Interface, ns, cronJobNa
// Wait till a given job actually goes away from the Active list for a given cronjob
func waitForJobNotActive(ctx context.Context, c clientset.Interface, ns, cronJobName, jobName string) error {
return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, cronJobTimeout, false, func(ctx context.Context) (bool, error) {
curr, err := getCronJob(ctx, c, ns, cronJobName)
if err != nil {
return false, err
@ -650,7 +650,7 @@ func waitForJobNotActive(ctx context.Context, c clientset.Interface, ns, cronJob
// Wait for a job to disappear by listing them explicitly.
func waitForJobToDisappear(ctx context.Context, c clientset.Interface, ns string, targetJob *batchv1.Job) error {
return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, cronJobTimeout, false, func(ctx context.Context) (bool, error) {
jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{})
if err != nil {
return false, err
@ -667,7 +667,7 @@ func waitForJobToDisappear(ctx context.Context, c clientset.Interface, ns string
// Wait for a pod to disappear by listing them explicitly.
func waitForJobsPodToDisappear(ctx context.Context, c clientset.Interface, ns string, targetJob *batchv1.Job) error {
return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, cronJobTimeout, false, func(ctx context.Context) (bool, error) {
options := metav1.ListOptions{LabelSelector: fmt.Sprintf("controller-uid=%s", targetJob.UID)}
pods, err := c.CoreV1().Pods(ns).List(ctx, options)
if err != nil {
@ -679,7 +679,7 @@ func waitForJobsPodToDisappear(ctx context.Context, c clientset.Interface, ns st
// Wait for a job to be replaced with a new one.
func waitForJobReplaced(ctx context.Context, c clientset.Interface, ns, previousJobName string) error {
return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, cronJobTimeout, false, func(ctx context.Context) (bool, error) {
jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{})
if err != nil {
return false, err
@ -698,7 +698,7 @@ func waitForJobReplaced(ctx context.Context, c clientset.Interface, ns, previous
// waitForJobsAtLeast waits for at least a number of jobs to appear.
func waitForJobsAtLeast(ctx context.Context, c clientset.Interface, ns string, atLeast int) error {
return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, cronJobTimeout, false, func(ctx context.Context) (bool, error) {
jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{})
if err != nil {
return false, err
@ -709,7 +709,7 @@ func waitForJobsAtLeast(ctx context.Context, c clientset.Interface, ns string, a
// waitForAnyFinishedJob waits for any completed job to appear.
func waitForAnyFinishedJob(ctx context.Context, c clientset.Interface, ns string) error {
return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, cronJobTimeout, false, func(ctx context.Context) (bool, error) {
jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{})
if err != nil {
return false, err
@ -725,7 +725,7 @@ func waitForAnyFinishedJob(ctx context.Context, c clientset.Interface, ns string
// waitForEventWithReason waits for events with a reason within a list has occurred
func waitForEventWithReason(ctx context.Context, c clientset.Interface, ns, cronJobName string, reasons []string) error {
return wait.PollWithContext(ctx, framework.Poll, 30*time.Second, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, 30*time.Second, false, func(ctx context.Context) (bool, error) {
sj, err := getCronJob(ctx, c, ns, cronJobName)
if err != nil {
return false, err

View File

@ -112,7 +112,7 @@ func (r *RestartDaemonConfig) waitUp(ctx context.Context) {
"curl -s -o %v -I -w \"%%{http_code}\" http://localhost:%v/healthz", nullDev, r.healthzPort)
}
err := wait.PollWithContext(ctx, r.pollInterval, r.pollTimeout, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, r.pollInterval, r.pollTimeout, false, func(ctx context.Context) (bool, error) {
result, err := e2essh.NodeExec(ctx, r.nodeName, healthzCheck, framework.TestContext.Provider)
if err != nil {
return false, err

View File

@ -1153,7 +1153,7 @@ func testDeploymentsControllerRef(ctx context.Context, f *framework.Framework) {
framework.ExpectNoError(err)
ginkgo.By("Wait for the ReplicaSet to be orphaned")
err = wait.PollWithContext(ctx, dRetryPeriod, dRetryTimeout, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels))
err = wait.PollUntilContextTimeout(ctx, dRetryPeriod, dRetryTimeout, false, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels))
framework.ExpectNoError(err, "error waiting for Deployment ReplicaSet to be orphaned")
deploymentName = "test-adopt-deployment"

View File

@ -65,7 +65,7 @@ func WaitForNamedAuthorizationUpdate(ctx context.Context, c v1authorization.Subj
},
}
err := wait.PollWithContext(ctx, policyCachePollInterval, policyCachePollTimeout, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, policyCachePollInterval, policyCachePollTimeout, false, func(ctx context.Context) (bool, error) {
response, err := c.SubjectAccessReviews().Create(ctx, review, metav1.CreateOptions{})
if err != nil {
return false, err

View File

@ -720,7 +720,7 @@ func (cl *ClusterVerification) WaitFor(ctx context.Context, atLeast int, timeout
pods := []v1.Pod{}
var returnedErr error
err := wait.PollWithContext(ctx, 1*time.Second, timeout, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, 1*time.Second, timeout, false, func(ctx context.Context) (bool, error) {
pods, returnedErr = cl.podState.filter(ctx, cl.client, cl.namespace)
// Failure

View File

@ -775,7 +775,7 @@ func (j *TestJig) WaitForIngress(ctx context.Context, waitForNodePort bool) {
// WaitForIngressToStable waits for the LB return 100 consecutive 200 responses.
func (j *TestJig) WaitForIngressToStable(ctx context.Context) {
if err := wait.PollWithContext(ctx, 10*time.Second, e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, j.Client), func(ctx context.Context) (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, 10*time.Second, e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, j.Client), false, func(ctx context.Context) (bool, error) {
_, err := j.GetDistinctResponseFromIngress(ctx)
if err != nil {
return false, nil

View File

@ -52,7 +52,7 @@ func WaitForJobPodsSucceeded(ctx context.Context, c clientset.Interface, ns, job
// waitForJobPodsInPhase wait for all pods for the Job named JobName in namespace ns to be in a given phase.
func waitForJobPodsInPhase(ctx context.Context, c clientset.Interface, ns, jobName string, expectedCount int32, phase v1.PodPhase) error {
return wait.PollWithContext(ctx, framework.Poll, JobTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, JobTimeout, false, func(ctx context.Context) (bool, error) {
pods, err := GetJobPods(ctx, c, ns, jobName)
if err != nil {
return false, err
@ -69,7 +69,7 @@ func waitForJobPodsInPhase(ctx context.Context, c clientset.Interface, ns, jobNa
// WaitForJobComplete uses c to wait for completions to complete for the Job jobName in namespace ns.
func WaitForJobComplete(ctx context.Context, c clientset.Interface, ns, jobName string, completions int32) error {
return wait.PollWithContext(ctx, framework.Poll, JobTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, JobTimeout, false, func(ctx context.Context) (bool, error) {
curr, err := c.BatchV1().Jobs(ns).Get(ctx, jobName, metav1.GetOptions{})
if err != nil {
return false, err
@ -145,7 +145,7 @@ func isJobFinished(j *batchv1.Job) bool {
// WaitForJobGone uses c to wait for up to timeout for the Job named jobName in namespace ns to be removed.
func WaitForJobGone(ctx context.Context, c clientset.Interface, ns, jobName string, timeout time.Duration) error {
return wait.PollWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, false, func(ctx context.Context) (bool, error) {
_, err := c.BatchV1().Jobs(ns).Get(ctx, jobName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
return true, nil

View File

@ -1191,7 +1191,7 @@ func UnblockNetwork(ctx context.Context, from string, to string) {
// not coming back. Subsequent tests will run or fewer nodes (some of the tests
// may fail). Manual intervention is required in such case (recreating the
// cluster solves the problem too).
err := wait.PollWithContext(ctx, time.Millisecond*100, time.Second*30, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, time.Millisecond*100, time.Second*30, false, func(ctx context.Context) (bool, error) {
result, err := e2essh.SSH(ctx, undropCmd, from, framework.TestContext.Provider)
if result.Code == 0 && err == nil {
return true, nil

View File

@ -162,7 +162,7 @@ func (c *PodClient) CreateBatch(ctx context.Context, pods []*v1.Pod) []*v1.Pod {
// there is any other apierrors. name is the pod name, updateFn is the function updating the
// pod object.
func (c *PodClient) Update(ctx context.Context, name string, updateFn func(pod *v1.Pod)) {
framework.ExpectNoError(wait.PollWithContext(ctx, time.Millisecond*500, time.Second*30, func(ctx context.Context) (bool, error) {
framework.ExpectNoError(wait.PollUntilContextTimeout(ctx, time.Millisecond*500, time.Second*30, false, func(ctx context.Context) (bool, error) {
pod, err := c.PodInterface.Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("failed to get pod %q: %w", name, err)
@ -309,7 +309,7 @@ func (c *PodClient) WaitForFinish(ctx context.Context, name string, timeout time
// WaitForErrorEventOrSuccess waits for pod to succeed or an error event for that pod.
func (c *PodClient) WaitForErrorEventOrSuccess(ctx context.Context, pod *v1.Pod) (*v1.Event, error) {
var ev *v1.Event
err := wait.PollWithContext(ctx, framework.Poll, framework.PodStartTimeout, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, framework.Poll, framework.PodStartTimeout, false, func(ctx context.Context) (bool, error) {
evnts, err := c.f.ClientSet.CoreV1().Events(pod.Namespace).Search(scheme.Scheme, pod)
if err != nil {
return false, fmt.Errorf("error in listing events: %w", err)

View File

@ -30,7 +30,7 @@ import (
// WaitForReadyReplicaSet waits until the replicaset has all of its replicas ready.
func WaitForReadyReplicaSet(ctx context.Context, c clientset.Interface, ns, name string) error {
err := wait.PollWithContext(ctx, framework.Poll, framework.PodStartTimeout, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, framework.Poll, framework.PodStartTimeout, false, func(ctx context.Context) (bool, error) {
rs, err := c.AppsV1().ReplicaSets(ns).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, err

View File

@ -440,7 +440,7 @@ func (j *TestJig) waitForAvailableEndpoint(ctx context.Context, timeout time.Dur
go esController.Run(stopCh)
err := wait.PollWithContext(ctx, 1*time.Second, timeout, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, 1*time.Second, timeout, false, func(ctx context.Context) (bool, error) {
return endpointAvailable && endpointSliceAvailable, nil
})
if err != nil {
@ -475,7 +475,7 @@ func (j *TestJig) sanityCheckService(svc *v1.Service, svcType v1.ServiceType) (*
expectNodePorts := needsNodePorts(svc)
for i, port := range svc.Spec.Ports {
hasNodePort := (port.NodePort != 0)
hasNodePort := port.NodePort != 0
if hasNodePort != expectNodePorts {
return nil, fmt.Errorf("unexpected Spec.Ports[%d].NodePort (%d) for service", i, port.NodePort)
}

View File

@ -246,7 +246,7 @@ func runSSHCommand(ctx context.Context, cmd, user, host string, signer ssh.Signe
}
client, err := ssh.Dial("tcp", host, config)
if err != nil {
err = wait.PollWithContext(ctx, 5*time.Second, 20*time.Second, func(ctx context.Context) (bool, error) {
err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
fmt.Printf("error dialing %s@%s: '%v', retrying\n", user, host, err)
if client, err = ssh.Dial("tcp", host, config); err != nil {
return false, nil // retrying, error will be logged above
@ -300,7 +300,7 @@ func runSSHCommandViaBastion(ctx context.Context, cmd, user, bastion, host strin
}
bastionClient, err := ssh.Dial("tcp", bastion, config)
if err != nil {
err = wait.PollWithContext(ctx, 5*time.Second, 20*time.Second, func(ctx context.Context) (bool, error) {
err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
fmt.Printf("error dialing %s@%s: '%v', retrying\n", user, bastion, err)
if bastionClient, err = ssh.Dial("tcp", bastion, config); err != nil {
return false, err

View File

@ -238,7 +238,7 @@ func WaitForNamespacesDeleted(ctx context.Context, c clientset.Interface, namesp
nsMap[ns] = true
}
//Now POLL until all namespaces have been eradicated.
return wait.PollWithContext(ctx, 2*time.Second, timeout,
return wait.PollUntilContextTimeout(ctx, 2*time.Second, timeout, false,
func(ctx context.Context) (bool, error) {
nsList, err := c.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
if err != nil {
@ -416,7 +416,7 @@ func CheckTestingNSDeletedExcept(ctx context.Context, c clientset.Interface, ski
// WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum.
// Some components use EndpointSlices other Endpoints, we must verify that both objects meet the requirements.
func WaitForServiceEndpointsNum(ctx context.Context, c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error {
return wait.PollWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, interval, timeout, false, func(ctx context.Context) (bool, error) {
Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum)
endpoint, err := c.CoreV1().Endpoints(namespace).Get(ctx, serviceName, metav1.GetOptions{})
if err != nil {

View File

@ -24,7 +24,7 @@ import (
networkingv1 "k8s.io/api/networking/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
@ -57,7 +57,7 @@ var _ = common.SIGDescribe("IngressClass", feature.Ingress, func() {
lastFailure := ""
// the admission controller may take a few seconds to observe the ingress classes
if err := wait.PollWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, false, func(ctx context.Context) (bool, error) {
lastFailure = ""
ingress, err := createBasicIngress(ctx, cs, f.Namespace.Name)
@ -94,7 +94,7 @@ var _ = common.SIGDescribe("IngressClass", feature.Ingress, func() {
lastFailure := ""
// the admission controller may take a few seconds to observe the ingress classes
if err := wait.PollWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, false, func(ctx context.Context) (bool, error) {
lastFailure = ""
ingress, err := createBasicIngress(ctx, cs, f.Namespace.Name)

View File

@ -81,7 +81,7 @@ var _ = common.SIGDescribe(feature.TopologyHints, func() {
},
})
err = wait.PollWithContext(ctx, 5*time.Second, framework.PodStartTimeout, func(ctx context.Context) (bool, error) {
err = wait.PollUntilContextTimeout(ctx, 5*time.Second, framework.PodStartTimeout, false, func(ctx context.Context) (bool, error) {
return e2edaemonset.CheckRunningOnAllNodes(ctx, f, ds)
})
framework.ExpectNoError(err, "timed out waiting for DaemonSets to be ready")
@ -119,7 +119,7 @@ var _ = common.SIGDescribe(feature.TopologyHints, func() {
framework.Logf("Waiting for %d endpoints to be tracked in EndpointSlices", len(schedulableNodes))
var finalSlices []discoveryv1.EndpointSlice
err = wait.PollWithContext(ctx, 5*time.Second, 3*time.Minute, func(ctx context.Context) (bool, error) {
err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 3*time.Minute, false, func(ctx context.Context) (bool, error) {
slices, listErr := c.DiscoveryV1().EndpointSlices(f.Namespace.Name).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", discoveryv1.LabelServiceName, svc.Name)})
if listErr != nil {
return false, listErr
@ -190,7 +190,7 @@ var _ = common.SIGDescribe(feature.TopologyHints, func() {
framework.Logf("Ensuring that requests from %s pod on %s node stay in %s zone", clientPod.Name, nodeName, fromZone)
var logs string
if pollErr := wait.PollWithContext(ctx, 5*time.Second, e2eservice.KubeProxyLagTimeout, func(ctx context.Context) (bool, error) {
if pollErr := wait.PollUntilContextTimeout(ctx, 5*time.Second, e2eservice.KubeProxyLagTimeout, false, func(ctx context.Context) (bool, error) {
var err error
logs, err = e2epod.GetPodLogs(ctx, c, f.Namespace.Name, clientPod.Name, clientPod.Name)
framework.ExpectNoError(err)

View File

@ -87,7 +87,7 @@ func getPodMatches(ctx context.Context, c clientset.Interface, nodeName string,
// the scope of this test, we do not expect pod naming conflicts so
// podNamePrefix should be sufficient to identify the pods.
func waitTillNPodsRunningOnNodes(ctx context.Context, c clientset.Interface, nodeNames sets.String, podNamePrefix string, namespace string, targetNumPods int, timeout time.Duration) error {
return wait.PollWithContext(ctx, pollInterval, timeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, pollInterval, timeout, false, func(ctx context.Context) (bool, error) {
matchCh := make(chan sets.String, len(nodeNames))
for _, item := range nodeNames.List() {
// Launch a goroutine per node to check the pods running on the nodes.
@ -249,11 +249,11 @@ func checkPodCleanup(ctx context.Context, c clientset.Interface, pod *v1.Pod, ex
for _, test := range tests {
framework.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg)
err = wait.PollWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) {
err = wait.PollUntilContextTimeout(ctx, poll, timeout, false, func(ctx context.Context) (bool, error) {
result, err := e2essh.NodeExec(ctx, nodeIP, test.cmd, framework.TestContext.Provider)
framework.ExpectNoError(err)
e2essh.LogResult(result)
ok := (result.Code == 0 && len(result.Stdout) > 0 && len(result.Stderr) == 0)
ok := result.Code == 0 && len(result.Stdout) > 0 && len(result.Stderr) == 0
if expectClean && ok { // keep trying
return false, nil
}
@ -679,7 +679,7 @@ func isNode(node *v1.Node, os string) bool {
return false
}
if foundOS, found := node.Labels[v1.LabelOSStable]; found {
return (os == foundOS)
return os == foundOS
}
return false
}

View File

@ -164,7 +164,7 @@ func temporarilyUnsetDefaultClasses(ctx context.Context, client clientset.Interf
func waitForPVCStorageClass(ctx context.Context, c clientset.Interface, namespace, pvcName, scName string, timeout time.Duration) (*v1.PersistentVolumeClaim, error) {
var watchedPVC *v1.PersistentVolumeClaim
err := wait.PollWithContext(ctx, 1*time.Second, timeout, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, 1*time.Second, timeout, false, func(ctx context.Context) (bool, error) {
var err error
watchedPVC, err = c.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{})
if err != nil {

View File

@ -245,7 +245,7 @@ var _ = utils.SIGDescribe(framework.WithSerial(), "Volume metrics", func() {
// Poll kubelet metrics waiting for the volume to be picked up
// by the volume stats collector
var kubeMetrics e2emetrics.KubeletMetrics
waitErr := wait.PollWithContext(ctx, 30*time.Second, 5*time.Minute, func(ctx context.Context) (bool, error) {
waitErr := wait.PollUntilContextTimeout(ctx, 30*time.Second, 5*time.Minute, false, func(ctx context.Context) (bool, error) {
framework.Logf("Grabbing Kubelet metrics")
// Grab kubelet metrics from the node the pod was scheduled on
var err error

View File

@ -60,7 +60,7 @@ func (t *DaemonSetUpgradeTest) Setup(ctx context.Context, f *framework.Framework
}
ginkgo.By("Waiting for DaemonSet pods to become ready")
err = wait.PollWithContext(ctx, framework.Poll, framework.PodStartTimeout, func(ctx context.Context) (bool, error) {
err = wait.PollUntilContextTimeout(ctx, framework.Poll, framework.PodStartTimeout, false, func(ctx context.Context) (bool, error) {
return e2edaemonset.CheckRunningOnAllNodes(ctx, f, t.daemonSet)
})
framework.ExpectNoError(err)

View File

@ -85,7 +85,7 @@ var _ = SIGDescribe("OSArchLabelReconciliation", framework.WithSerial(), framewo
func waitForNodeLabels(ctx context.Context, c v1core.CoreV1Interface, nodeName string, timeout time.Duration) error {
ginkgo.By(fmt.Sprintf("Waiting for node %v to have appropriate labels", nodeName))
// Poll until the node has desired labels
return wait.PollWithContext(ctx, framework.Poll, timeout,
return wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, false,
func(ctx context.Context) (bool, error) {
node, err := c.Nodes().Get(ctx, nodeName, metav1.GetOptions{})
if err != nil {