use PollUntilContextTimeout to replace PollImmediateWithContext in test

Signed-off-by: bzsuni <bingzhe.sun@daocloud.io>
This commit is contained in:
bzsuni 2023-10-19 22:50:21 +08:00
parent de054fbf94
commit 8775d805fa
55 changed files with 140 additions and 140 deletions

View File

@ -483,7 +483,7 @@ func testCRListConversion(ctx context.Context, f *framework.Framework, testCrd *
// waitWebhookConversionReady sends stub custom resource creation requests requiring conversion until one succeeds. // waitWebhookConversionReady sends stub custom resource creation requests requiring conversion until one succeeds.
func waitWebhookConversionReady(ctx context.Context, f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, customResourceClients map[string]dynamic.ResourceInterface, version string) { func waitWebhookConversionReady(ctx context.Context, f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, customResourceClients map[string]dynamic.ResourceInterface, version string) {
framework.ExpectNoError(wait.PollImmediateWithContext(ctx, 100*time.Millisecond, 30*time.Second, func(ctx context.Context) (bool, error) { framework.ExpectNoError(wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 30*time.Second, true, func(ctx context.Context) (bool, error) {
crInstance := &unstructured.Unstructured{ crInstance := &unstructured.Unstructured{
Object: map[string]interface{}{ Object: map[string]interface{}{
"kind": crd.Spec.Names.Kind, "kind": crd.Spec.Names.Kind,

View File

@ -491,7 +491,7 @@ var _ = SIGDescribe("Garbage collector", func() {
} }
// wait for deployment to create some rs // wait for deployment to create some rs
ginkgo.By("Wait for the Deployment to create new ReplicaSet") ginkgo.By("Wait for the Deployment to create new ReplicaSet")
err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) { err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 1*time.Minute, true, func(ctx context.Context) (bool, error) {
rsList, err := rsClient.List(ctx, metav1.ListOptions{}) rsList, err := rsClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list rs: %w", err) return false, fmt.Errorf("failed to list rs: %w", err)
@ -510,7 +510,7 @@ var _ = SIGDescribe("Garbage collector", func() {
framework.Failf("failed to delete the deployment: %v", err) framework.Failf("failed to delete the deployment: %v", err)
} }
ginkgo.By("wait for all rs to be garbage collected") ginkgo.By("wait for all rs to be garbage collected")
err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) { err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 1*time.Minute+gcInformerResyncRetryTimeout, true, func(ctx context.Context) (bool, error) {
objects := map[string]int{"Deployments": 0, "ReplicaSets": 0, "Pods": 0} objects := map[string]int{"Deployments": 0, "ReplicaSets": 0, "Pods": 0}
return verifyRemainingObjects(ctx, f, objects) return verifyRemainingObjects(ctx, f, objects)
}) })
@ -551,7 +551,7 @@ var _ = SIGDescribe("Garbage collector", func() {
// wait for deployment to create some rs // wait for deployment to create some rs
ginkgo.By("Wait for the Deployment to create new ReplicaSet") ginkgo.By("Wait for the Deployment to create new ReplicaSet")
var replicaset appsv1.ReplicaSet var replicaset appsv1.ReplicaSet
err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) { err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 1*time.Minute, true, func(ctx context.Context) (bool, error) {
rsList, err := rsClient.List(ctx, metav1.ListOptions{}) rsList, err := rsClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list rs: %w", err) return false, fmt.Errorf("failed to list rs: %w", err)
@ -568,7 +568,7 @@ var _ = SIGDescribe("Garbage collector", func() {
} }
desiredGeneration := replicaset.Generation desiredGeneration := replicaset.Generation
if err := wait.PollImmediateWithContext(ctx, 100*time.Millisecond, 60*time.Second, func(ctx context.Context) (bool, error) { if err := wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 60*time.Second, true, func(ctx context.Context) (bool, error) {
newRS, err := clientSet.AppsV1().ReplicaSets(replicaset.Namespace).Get(ctx, replicaset.Name, metav1.GetOptions{}) newRS, err := clientSet.AppsV1().ReplicaSets(replicaset.Namespace).Get(ctx, replicaset.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
@ -585,7 +585,7 @@ var _ = SIGDescribe("Garbage collector", func() {
framework.Failf("failed to delete the deployment: %v", err) framework.Failf("failed to delete the deployment: %v", err)
} }
ginkgo.By("wait for deployment deletion to see if the garbage collector mistakenly deletes the rs") ginkgo.By("wait for deployment deletion to see if the garbage collector mistakenly deletes the rs")
err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) { err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 1*time.Minute+gcInformerResyncRetryTimeout, true, func(ctx context.Context) (bool, error) {
dList, err := deployClient.List(ctx, metav1.ListOptions{}) dList, err := deployClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list deployments: %w", err) return false, fmt.Errorf("failed to list deployments: %w", err)
@ -974,7 +974,7 @@ var _ = SIGDescribe("Garbage collector", func() {
} }
// Wait for the canary foreground finalization to complete, which means GC is aware of our new custom resource type // Wait for the canary foreground finalization to complete, which means GC is aware of our new custom resource type
var lastCanary *unstructured.Unstructured var lastCanary *unstructured.Unstructured
if err := wait.PollImmediateWithContext(ctx, 5*time.Second, 3*time.Minute, func(ctx context.Context) (bool, error) { if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 3*time.Minute, true, func(ctx context.Context) (bool, error) {
lastCanary, err = resourceClient.Get(ctx, dependentName, metav1.GetOptions{}) lastCanary, err = resourceClient.Get(ctx, dependentName, metav1.GetOptions{})
return apierrors.IsNotFound(err), nil return apierrors.IsNotFound(err), nil
}); err != nil { }); err != nil {
@ -1119,7 +1119,7 @@ var _ = SIGDescribe("Garbage collector", func() {
framework.ExpectNoError(err, "failed to create cronjob: %+v, in namespace: %s", cronJob, f.Namespace.Name) framework.ExpectNoError(err, "failed to create cronjob: %+v, in namespace: %s", cronJob, f.Namespace.Name)
ginkgo.By("Wait for the CronJob to create new Job") ginkgo.By("Wait for the CronJob to create new Job")
err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 2*time.Minute, func(ctx context.Context) (bool, error) { err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 2*time.Minute, true, func(ctx context.Context) (bool, error) {
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{}) jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list jobs: %w", err) return false, fmt.Errorf("failed to list jobs: %w", err)
@ -1135,7 +1135,7 @@ var _ = SIGDescribe("Garbage collector", func() {
framework.Failf("Failed to delete the CronJob: %v", err) framework.Failf("Failed to delete the CronJob: %v", err)
} }
ginkgo.By("Verify if cronjob does not leave jobs nor pods behind") ginkgo.By("Verify if cronjob does not leave jobs nor pods behind")
err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) { err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 1*time.Minute, true, func(ctx context.Context) (bool, error) {
objects := map[string]int{"CronJobs": 0, "Jobs": 0, "Pods": 0} objects := map[string]int{"CronJobs": 0, "Jobs": 0, "Pods": 0}
return verifyRemainingObjects(ctx, f, objects) return verifyRemainingObjects(ctx, f, objects)
}) })

View File

@ -1185,7 +1185,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
}) })
framework.ExpectNoError(err, "failed to locate ResourceQuota %q in namespace %q", patchedResourceQuota.Name, ns) framework.ExpectNoError(err, "failed to locate ResourceQuota %q in namespace %q", patchedResourceQuota.Name, ns)
err = wait.PollImmediateWithContext(ctx, 5*time.Second, 5*time.Minute, func(ctx context.Context) (bool, error) { err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 5*time.Minute, true, func(ctx context.Context) (bool, error) {
resourceQuotaResult, err := rqClient.Get(ctx, rqName, metav1.GetOptions{}) resourceQuotaResult, err := rqClient.Get(ctx, rqName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -65,7 +65,7 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() {
for _, ds := range daemonsets.Items { for _, ds := range daemonsets.Items {
ginkgo.By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name)) ginkgo.By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name))
framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(ctx, f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name)) framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(ctx, f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name))
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnNoNodes(f, &ds))
framework.ExpectNoError(err, "error waiting for daemon pod to be reaped") framework.ExpectNoError(err, "error waiting for daemon pod to be reaped")
} }
} }
@ -134,7 +134,7 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.") ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, testDaemonset))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -191,7 +191,7 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() {
framework.Logf("Created ControllerRevision: %s", newControllerRevision.Name) framework.Logf("Created ControllerRevision: %s", newControllerRevision.Name)
ginkgo.By("Confirm that there are two ControllerRevisions") ginkgo.By("Confirm that there are two ControllerRevisions")
err = wait.PollImmediateWithContext(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 2)) err = wait.PollUntilContextTimeout(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, true, checkControllerRevisionListQuantity(f, dsLabelSelector, 2))
framework.ExpectNoError(err, "failed to count required ControllerRevisions") framework.ExpectNoError(err, "failed to count required ControllerRevisions")
ginkgo.By(fmt.Sprintf("Deleting ControllerRevision %q", initialRevision.Name)) ginkgo.By(fmt.Sprintf("Deleting ControllerRevision %q", initialRevision.Name))
@ -199,7 +199,7 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() {
framework.ExpectNoError(err, "Failed to delete ControllerRevision: %v", err) framework.ExpectNoError(err, "Failed to delete ControllerRevision: %v", err)
ginkgo.By("Confirm that there is only one ControllerRevision") ginkgo.By("Confirm that there is only one ControllerRevision")
err = wait.PollImmediateWithContext(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 1)) err = wait.PollUntilContextTimeout(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, true, checkControllerRevisionListQuantity(f, dsLabelSelector, 1))
framework.ExpectNoError(err, "failed to count required ControllerRevisions") framework.ExpectNoError(err, "failed to count required ControllerRevisions")
listControllerRevisions, err := csAppsV1.ControllerRevisions(ns).List(ctx, metav1.ListOptions{}) listControllerRevisions, err := csAppsV1.ControllerRevisions(ns).List(ctx, metav1.ListOptions{})
@ -226,7 +226,7 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() {
framework.ExpectNoError(err, "error patching daemon set") framework.ExpectNoError(err, "error patching daemon set")
ginkgo.By("Confirm that there are two ControllerRevisions") ginkgo.By("Confirm that there are two ControllerRevisions")
err = wait.PollImmediateWithContext(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 2)) err = wait.PollUntilContextTimeout(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, true, checkControllerRevisionListQuantity(f, dsLabelSelector, 2))
framework.ExpectNoError(err, "failed to count required ControllerRevisions") framework.ExpectNoError(err, "failed to count required ControllerRevisions")
updatedLabel := map[string]string{updatedControllerRevision.Name: "updated"} updatedLabel := map[string]string{updatedControllerRevision.Name: "updated"}
@ -237,7 +237,7 @@ var _ = SIGDescribe("ControllerRevision [Serial]", func() {
framework.ExpectNoError(err, "Failed to delete ControllerRevision: %v", err) framework.ExpectNoError(err, "Failed to delete ControllerRevision: %v", err)
ginkgo.By("Confirm that there is only one ControllerRevision") ginkgo.By("Confirm that there is only one ControllerRevision")
err = wait.PollImmediateWithContext(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, checkControllerRevisionListQuantity(f, dsLabelSelector, 1)) err = wait.PollUntilContextTimeout(ctx, controllerRevisionRetryPeriod, controllerRevisionRetryTimeout, true, checkControllerRevisionListQuantity(f, dsLabelSelector, 1))
framework.ExpectNoError(err, "failed to count required ControllerRevisions") framework.ExpectNoError(err, "failed to count required ControllerRevisions")
list, err := csAppsV1.ControllerRevisions(ns).List(ctx, metav1.ListOptions{}) list, err := csAppsV1.ControllerRevisions(ns).List(ctx, metav1.ListOptions{})

View File

@ -92,7 +92,7 @@ type updateDSFunc func(*appsv1.DaemonSet)
func updateDaemonSetWithRetries(ctx context.Context, c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *appsv1.DaemonSet, err error) { func updateDaemonSetWithRetries(ctx context.Context, c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *appsv1.DaemonSet, err error) {
daemonsets := c.AppsV1().DaemonSets(namespace) daemonsets := c.AppsV1().DaemonSets(namespace)
var updateErr error var updateErr error
pollErr := wait.PollImmediateWithContext(ctx, 10*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) { pollErr := wait.PollUntilContextTimeout(ctx, 10*time.Millisecond, 1*time.Minute, true, func(ctx context.Context) (bool, error) {
if ds, err = daemonsets.Get(ctx, name, metav1.GetOptions{}); err != nil { if ds, err = daemonsets.Get(ctx, name, metav1.GetOptions{}); err != nil {
return false, err return false, err
} }
@ -127,7 +127,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
for _, ds := range daemonsets.Items { for _, ds := range daemonsets.Items {
ginkgo.By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name)) ginkgo.By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name))
framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(ctx, f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name)) framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(ctx, f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name))
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnNoNodes(f, &ds))
framework.ExpectNoError(err, "error waiting for daemon pod to be reaped") framework.ExpectNoError(err, "error waiting for daemon pod to be reaped")
} }
} }
@ -182,7 +182,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.") ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -192,7 +192,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
pod := podList.Items[0] pod := podList.Items[0]
err = c.CoreV1().Pods(ns).Delete(ctx, pod.Name, metav1.DeleteOptions{}) err = c.CoreV1().Pods(ns).Delete(ctx, pod.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to revive") framework.ExpectNoError(err, "error waiting for daemon pod to revive")
}) })
@ -212,7 +212,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Initially, daemon pods should not be running on any nodes.") ginkgo.By("Initially, daemon pods should not be running on any nodes.")
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnNoNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes") framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes")
ginkgo.By("Change node label to blue, check that daemon pod is launched.") ginkgo.By("Change node label to blue, check that daemon pod is launched.")
@ -222,7 +222,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err, "error setting labels on node") framework.ExpectNoError(err, "error setting labels on node")
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels) daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
gomega.Expect(daemonSetLabels).To(gomega.HaveLen(1)) gomega.Expect(daemonSetLabels).To(gomega.HaveLen(1))
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name})) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name}))
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes") framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -231,7 +231,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
nodeSelector[daemonsetColorLabel] = "green" nodeSelector[daemonsetColorLabel] = "green"
greenNode, err := setDaemonSetNodeLabels(ctx, c, node.Name, nodeSelector) greenNode, err := setDaemonSetNodeLabels(ctx, c, node.Name, nodeSelector)
framework.ExpectNoError(err, "error removing labels on node") framework.ExpectNoError(err, "error removing labels on node")
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnNoNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes") framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes")
ginkgo.By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate") ginkgo.By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate")
@ -241,7 +241,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err, "error patching daemon set") framework.ExpectNoError(err, "error patching daemon set")
daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels) daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels)
gomega.Expect(daemonSetLabels).To(gomega.HaveLen(1)) gomega.Expect(daemonSetLabels).To(gomega.HaveLen(1))
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{greenNode.Name})) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{greenNode.Name}))
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes") framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -275,7 +275,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Initially, daemon pods should not be running on any nodes.") ginkgo.By("Initially, daemon pods should not be running on any nodes.")
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnNoNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes") framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes")
ginkgo.By("Change node label to blue, check that daemon pod is launched.") ginkgo.By("Change node label to blue, check that daemon pod is launched.")
@ -285,7 +285,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err, "error setting labels on node") framework.ExpectNoError(err, "error setting labels on node")
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels) daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
gomega.Expect(daemonSetLabels).To(gomega.HaveLen(1)) gomega.Expect(daemonSetLabels).To(gomega.HaveLen(1))
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name})) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name}))
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes") framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -293,7 +293,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ginkgo.By("Remove the node label and wait for daemons to be unscheduled") ginkgo.By("Remove the node label and wait for daemons to be unscheduled")
_, err = setDaemonSetNodeLabels(ctx, c, node.Name, map[string]string{}) _, err = setDaemonSetNodeLabels(ctx, c, node.Name, map[string]string{})
framework.ExpectNoError(err, "error removing labels on node") framework.ExpectNoError(err, "error removing labels on node")
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnNoNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes") framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes")
}) })
@ -310,7 +310,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.") ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -322,11 +322,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
pod.Status.Phase = v1.PodFailed pod.Status.Phase = v1.PodFailed
_, err = c.CoreV1().Pods(ns).UpdateStatus(ctx, &pod, metav1.UpdateOptions{}) _, err = c.CoreV1().Pods(ns).UpdateStatus(ctx, &pod, metav1.UpdateOptions{})
framework.ExpectNoError(err, "error failing a daemon pod") framework.ExpectNoError(err, "error failing a daemon pod")
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to revive") framework.ExpectNoError(err, "error waiting for daemon pod to revive")
ginkgo.By("Wait for the failed daemon pod to be completely deleted.") ginkgo.By("Wait for the failed daemon pod to be completely deleted.")
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, waitFailedDaemonPodDeleted(c, &pod)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, waitFailedDaemonPodDeleted(c, &pod))
framework.ExpectNoError(err, "error waiting for the failed daemon pod to be completely deleted") framework.ExpectNoError(err, "error waiting for the failed daemon pod to be completely deleted")
}) })
@ -342,7 +342,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.") ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
// Check history and labels // Check history and labels
@ -360,11 +360,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods images aren't updated.") ginkgo.By("Check that daemon pods images aren't updated.")
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkDaemonPodsImageAndAvailability(c, ds, image, 0))
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods are still running on every node of the cluster.") ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
// Check history and labels // Check history and labels
@ -392,7 +392,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.") ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
// Check history and labels // Check history and labels
@ -417,11 +417,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second
ginkgo.By("Check that daemon pods images are updated.") ginkgo.By("Check that daemon pods images are updated.")
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, retryTimeout, checkDaemonPodsImageAndAvailability(c, ds, AgnhostImage, 1)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, retryTimeout, true, checkDaemonPodsImageAndAvailability(c, ds, AgnhostImage, 1))
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods are still running on every node of the cluster.") ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
// Check history and labels // Check history and labels
@ -452,7 +452,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Check that daemon pods launch on every node of the cluster") framework.Logf("Check that daemon pods launch on every node of the cluster")
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
framework.Logf("Update the DaemonSet to trigger a rollout") framework.Logf("Update the DaemonSet to trigger a rollout")
@ -464,7 +464,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Make sure we're in the middle of a rollout // Make sure we're in the middle of a rollout
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkAtLeastOneNewPod(c, ns, label, newImage)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkAtLeastOneNewPod(c, ns, label, newImage))
framework.ExpectNoError(err) framework.ExpectNoError(err)
pods := listDaemonPods(ctx, c, ns, label) pods := listDaemonPods(ctx, c, ns, label)
@ -497,7 +497,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Make sure DaemonSet rollback is complete") framework.Logf("Make sure DaemonSet rollback is complete")
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1))
framework.ExpectNoError(err) framework.ExpectNoError(err)
// After rollback is done, compare current pods with previous old pods during rollout, to make sure they're not restarted // After rollback is done, compare current pods with previous old pods during rollout, to make sure they're not restarted
@ -562,7 +562,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.") ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
// Check history and labels // Check history and labels
@ -590,7 +590,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ginkgo.By("Check that daemon pods surge and invariants are preserved during that rollout") ginkgo.By("Check that daemon pods surge and invariants are preserved during that rollout")
ageOfOldPod := make(map[string]time.Time) ageOfOldPod := make(map[string]time.Time)
deliberatelyDeletedPods := sets.NewString() deliberatelyDeletedPods := sets.NewString()
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, retryTimeout, func(ctx context.Context) (bool, error) { err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, retryTimeout, true, func(ctx context.Context) (bool, error) {
podList, err := c.CoreV1().Pods(ds.Namespace).List(ctx, metav1.ListOptions{}) podList, err := c.CoreV1().Pods(ds.Namespace).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, err return false, err
@ -813,7 +813,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods are still running on every node of the cluster.") ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
// Check history and labels // Check history and labels
@ -846,7 +846,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.") ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, testDaemonset))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -894,7 +894,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.") ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, testDaemonset)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, testDaemonset))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -1097,7 +1097,7 @@ func setDaemonSetNodeLabels(ctx context.Context, c clientset.Interface, nodeName
nodeClient := c.CoreV1().Nodes() nodeClient := c.CoreV1().Nodes()
var newNode *v1.Node var newNode *v1.Node
var newLabels map[string]string var newLabels map[string]string
err := wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, func(ctx context.Context) (bool, error) {
node, err := nodeClient.Get(ctx, nodeName, metav1.GetOptions{}) node, err := nodeClient.Get(ctx, nodeName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
@ -1226,7 +1226,7 @@ func waitForHistoryCreated(ctx context.Context, c clientset.Interface, ns string
framework.Logf("%d/%d controllerrevisions created.", len(historyList.Items), numHistory) framework.Logf("%d/%d controllerrevisions created.", len(historyList.Items), numHistory)
return false, nil return false, nil
} }
err := wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, listHistoryFn) err := wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, listHistoryFn)
framework.ExpectNoError(err, "error waiting for controllerrevisions to be created") framework.ExpectNoError(err, "error waiting for controllerrevisions to be created")
} }

View File

@ -1584,7 +1584,7 @@ func waitForDeploymentOldRSsNum(ctx context.Context, c clientset.Interface, ns,
// waitForReplicaSetDesiredReplicas waits until the replicaset has desired number of replicas. // waitForReplicaSetDesiredReplicas waits until the replicaset has desired number of replicas.
func waitForReplicaSetDesiredReplicas(ctx context.Context, rsClient appsclient.ReplicaSetsGetter, replicaSet *appsv1.ReplicaSet) error { func waitForReplicaSetDesiredReplicas(ctx context.Context, rsClient appsclient.ReplicaSetsGetter, replicaSet *appsv1.ReplicaSet) error {
desiredGeneration := replicaSet.Generation desiredGeneration := replicaSet.Generation
err := wait.PollImmediateWithContext(ctx, framework.Poll, framework.PollShortTimeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, framework.Poll, framework.PollShortTimeout, true, func(ctx context.Context) (bool, error) {
rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(ctx, replicaSet.Name, metav1.GetOptions{}) rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(ctx, replicaSet.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
@ -1600,7 +1600,7 @@ func waitForReplicaSetDesiredReplicas(ctx context.Context, rsClient appsclient.R
// waitForReplicaSetTargetSpecReplicas waits for .spec.replicas of a RS to equal targetReplicaNum // waitForReplicaSetTargetSpecReplicas waits for .spec.replicas of a RS to equal targetReplicaNum
func waitForReplicaSetTargetSpecReplicas(ctx context.Context, c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32) error { func waitForReplicaSetTargetSpecReplicas(ctx context.Context, c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32) error {
desiredGeneration := replicaSet.Generation desiredGeneration := replicaSet.Generation
err := wait.PollImmediateWithContext(ctx, framework.Poll, framework.PollShortTimeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, framework.Poll, framework.PollShortTimeout, true, func(ctx context.Context) (bool, error) {
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(ctx, replicaSet.Name, metav1.GetOptions{}) rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(ctx, replicaSet.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err

View File

@ -146,7 +146,7 @@ var _ = SIGDescribe("DisruptionController", func() {
// Since disruptionAllowed starts out 0, if we see it ever become positive, // Since disruptionAllowed starts out 0, if we see it ever become positive,
// that means the controller is working. // that means the controller is working.
err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, true, func(ctx context.Context) (bool, error) {
pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, defaultName, metav1.GetOptions{}) pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, defaultName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
@ -329,7 +329,7 @@ var _ = SIGDescribe("DisruptionController", func() {
// Since disruptionAllowed starts out false, if an eviction is ever allowed, // Since disruptionAllowed starts out false, if an eviction is ever allowed,
// that means the controller is working. // that means the controller is working.
err = wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) { err = wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, true, func(ctx context.Context) (bool, error) {
err = cs.CoreV1().Pods(ns).EvictV1(ctx, e) err = cs.CoreV1().Pods(ns).EvictV1(ctx, e)
if err != nil { if err != nil {
return false, nil return false, nil
@ -519,7 +519,7 @@ func deletePDBCollection(ctx context.Context, cs kubernetes.Interface, ns string
func waitForPDBCollectionToBeDeleted(ctx context.Context, cs kubernetes.Interface, ns string) { func waitForPDBCollectionToBeDeleted(ctx context.Context, cs kubernetes.Interface, ns string) {
ginkgo.By("Waiting for the PDB collection to be deleted") ginkgo.By("Waiting for the PDB collection to be deleted")
err := wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, framework.Poll, schedulingTimeout, true, func(ctx context.Context) (bool, error) {
pdbList, err := cs.PolicyV1().PodDisruptionBudgets(ns).List(ctx, metav1.ListOptions{}) pdbList, err := cs.PolicyV1().PodDisruptionBudgets(ns).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, err return false, err
@ -558,7 +558,7 @@ func createPodsOrDie(ctx context.Context, cs kubernetes.Interface, ns string, n
func waitForPodsOrDie(ctx context.Context, cs kubernetes.Interface, ns string, n int) { func waitForPodsOrDie(ctx context.Context, cs kubernetes.Interface, ns string, n int) {
ginkgo.By("Waiting for all pods to be running") ginkgo.By("Waiting for all pods to be running")
err := wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, framework.Poll, schedulingTimeout, true, func(ctx context.Context) (bool, error) {
pods, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: "foo=bar"}) pods, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: "foo=bar"})
if err != nil { if err != nil {
return false, err return false, err
@ -624,7 +624,7 @@ func createReplicaSetOrDie(ctx context.Context, cs kubernetes.Interface, ns stri
func locateRunningPod(ctx context.Context, cs kubernetes.Interface, ns string) (pod *v1.Pod, err error) { func locateRunningPod(ctx context.Context, cs kubernetes.Interface, ns string) (pod *v1.Pod, err error) {
ginkgo.By("locating a running pod") ginkgo.By("locating a running pod")
err = wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) { err = wait.PollUntilContextTimeout(ctx, framework.Poll, schedulingTimeout, true, func(ctx context.Context) (bool, error) {
podList, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{}) podList, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, err return false, err
@ -645,7 +645,7 @@ func locateRunningPod(ctx context.Context, cs kubernetes.Interface, ns string) (
func waitForPdbToBeProcessed(ctx context.Context, cs kubernetes.Interface, ns string, name string) { func waitForPdbToBeProcessed(ctx context.Context, cs kubernetes.Interface, ns string, name string) {
ginkgo.By("Waiting for the pdb to be processed") ginkgo.By("Waiting for the pdb to be processed")
err := wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, framework.Poll, schedulingTimeout, true, func(ctx context.Context) (bool, error) {
pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, name, metav1.GetOptions{}) pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
@ -660,7 +660,7 @@ func waitForPdbToBeProcessed(ctx context.Context, cs kubernetes.Interface, ns st
func waitForPdbToBeDeleted(ctx context.Context, cs kubernetes.Interface, ns string, name string) { func waitForPdbToBeDeleted(ctx context.Context, cs kubernetes.Interface, ns string, name string) {
ginkgo.By("Waiting for the pdb to be deleted") ginkgo.By("Waiting for the pdb to be deleted")
err := wait.PollImmediateWithContext(ctx, framework.Poll, schedulingTimeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, framework.Poll, schedulingTimeout, true, func(ctx context.Context) (bool, error) {
_, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, name, metav1.GetOptions{}) _, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, name, metav1.GetOptions{})
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
return true, nil // done return true, nil // done
@ -675,7 +675,7 @@ func waitForPdbToBeDeleted(ctx context.Context, cs kubernetes.Interface, ns stri
func waitForPdbToObserveHealthyPods(ctx context.Context, cs kubernetes.Interface, ns string, healthyCount int32) { func waitForPdbToObserveHealthyPods(ctx context.Context, cs kubernetes.Interface, ns string, healthyCount int32) {
ginkgo.By("Waiting for the pdb to observed all healthy pods") ginkgo.By("Waiting for the pdb to observed all healthy pods")
err := wait.PollImmediateWithContext(ctx, framework.Poll, wait.ForeverTestTimeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, framework.Poll, wait.ForeverTestTimeout, true, func(ctx context.Context) (bool, error) {
pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, "foo", metav1.GetOptions{}) pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, "foo", metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err

View File

@ -433,7 +433,7 @@ var _ = SIGDescribe("ReplicationController", func() {
_, err := rcClient.Create(ctx, rc, metav1.CreateOptions{}) _, err := rcClient.Create(ctx, rc, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create ReplicationController: %v", err) framework.ExpectNoError(err, "Failed to create ReplicationController: %v", err)
err = wait.PollImmediateWithContext(ctx, 1*time.Second, 1*time.Minute, checkReplicationControllerStatusReplicaCount(f, rcName, initialRCReplicaCount)) err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 1*time.Minute, true, checkReplicationControllerStatusReplicaCount(f, rcName, initialRCReplicaCount))
framework.ExpectNoError(err, "failed to confirm the quantity of ReplicationController replicas") framework.ExpectNoError(err, "failed to confirm the quantity of ReplicationController replicas")
ginkgo.By(fmt.Sprintf("Getting scale subresource for ReplicationController %q", rcName)) ginkgo.By(fmt.Sprintf("Getting scale subresource for ReplicationController %q", rcName))
@ -448,7 +448,7 @@ var _ = SIGDescribe("ReplicationController", func() {
framework.ExpectNoError(err, "Failed to update scale subresource: %v", err) framework.ExpectNoError(err, "Failed to update scale subresource: %v", err)
ginkgo.By(fmt.Sprintf("Verifying replicas where modified for replication controller %q", rcName)) ginkgo.By(fmt.Sprintf("Verifying replicas where modified for replication controller %q", rcName))
err = wait.PollImmediateWithContext(ctx, 1*time.Second, 1*time.Minute, checkReplicationControllerStatusReplicaCount(f, rcName, expectedRCReplicaCount)) err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 1*time.Minute, true, checkReplicationControllerStatusReplicaCount(f, rcName, expectedRCReplicaCount))
framework.ExpectNoError(err, "failed to confirm the quantity of ReplicationController replicas") framework.ExpectNoError(err, "failed to confirm the quantity of ReplicationController replicas")
}) })
}) })

View File

@ -1826,7 +1826,7 @@ func lastLine(out string) string {
} }
func pollReadWithTimeout(ctx context.Context, statefulPod statefulPodTester, statefulPodNumber int, key, expectedVal string) error { func pollReadWithTimeout(ctx context.Context, statefulPod statefulPodTester, statefulPodNumber int, key, expectedVal string) error {
err := wait.PollImmediateWithContext(ctx, time.Second, readTimeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, time.Second, readTimeout, true, func(ctx context.Context) (bool, error) {
val := statefulPod.read(statefulPodNumber, key) val := statefulPod.read(statefulPodNumber, key)
if val == "" { if val == "" {
return false, nil return false, nil

View File

@ -126,7 +126,7 @@ func finishTime(finishedJob *batchv1.Job) metav1.Time {
func updateJobWithRetries(ctx context.Context, c clientset.Interface, namespace, name string, applyUpdate func(*batchv1.Job)) (job *batchv1.Job, err error) { func updateJobWithRetries(ctx context.Context, c clientset.Interface, namespace, name string, applyUpdate func(*batchv1.Job)) (job *batchv1.Job, err error) {
jobs := c.BatchV1().Jobs(namespace) jobs := c.BatchV1().Jobs(namespace)
var updateErr error var updateErr error
pollErr := wait.PollImmediateWithContext(ctx, framework.Poll, JobTimeout, func(ctx context.Context) (bool, error) { pollErr := wait.PollUntilContextTimeout(ctx, framework.Poll, JobTimeout, true, func(ctx context.Context) (bool, error) {
if job, err = jobs.Get(ctx, name, metav1.GetOptions{}); err != nil { if job, err = jobs.Get(ctx, name, metav1.GetOptions{}); err != nil {
return false, err return false, err
} }
@ -148,7 +148,7 @@ func updateJobWithRetries(ctx context.Context, c clientset.Interface, namespace,
// waitForJobDeleting uses c to wait for the Job jobName in namespace ns to have // waitForJobDeleting uses c to wait for the Job jobName in namespace ns to have
// a non-nil deletionTimestamp (i.e. being deleted). // a non-nil deletionTimestamp (i.e. being deleted).
func waitForJobDeleting(ctx context.Context, c clientset.Interface, ns, jobName string) error { func waitForJobDeleting(ctx context.Context, c clientset.Interface, ns, jobName string) error {
return wait.PollImmediateWithContext(ctx, framework.Poll, JobTimeout, func(ctx context.Context) (bool, error) { return wait.PollUntilContextTimeout(ctx, framework.Poll, JobTimeout, true, func(ctx context.Context) (bool, error) {
curr, err := c.BatchV1().Jobs(ns).Get(ctx, jobName, metav1.GetOptions{}) curr, err := c.BatchV1().Jobs(ns).Get(ctx, jobName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err

View File

@ -1859,7 +1859,7 @@ func getScaleUpStatus(ctx context.Context, c clientset.Interface) (*scaleUpStatu
func waitForScaleUpStatus(ctx context.Context, c clientset.Interface, cond func(s *scaleUpStatus) bool, timeout time.Duration) (*scaleUpStatus, error) { func waitForScaleUpStatus(ctx context.Context, c clientset.Interface, cond func(s *scaleUpStatus) bool, timeout time.Duration) (*scaleUpStatus, error) {
var finalErr error var finalErr error
var status *scaleUpStatus var status *scaleUpStatus
err := wait.PollImmediateWithContext(ctx, 5*time.Second, timeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, 5*time.Second, timeout, true, func(ctx context.Context) (bool, error) {
status, finalErr = getScaleUpStatus(ctx, c) status, finalErr = getScaleUpStatus(ctx, c)
if finalErr != nil { if finalErr != nil {
return false, nil return false, nil

View File

@ -594,7 +594,7 @@ func hpa(name, namespace, deploymentName string, minReplicas, maxReplicas int32,
func waitForReplicas(ctx context.Context, deploymentName, namespace string, cs clientset.Interface, timeout time.Duration, desiredReplicas int) { func waitForReplicas(ctx context.Context, deploymentName, namespace string, cs clientset.Interface, timeout time.Duration, desiredReplicas int) {
interval := 20 * time.Second interval := 20 * time.Second
err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
deployment, err := cs.AppsV1().Deployments(namespace).Get(ctx, deploymentName, metav1.GetOptions{}) deployment, err := cs.AppsV1().Deployments(namespace).Get(ctx, deploymentName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to get replication controller %s: %v", deployment, err) framework.Failf("Failed to get replication controller %s: %v", deployment, err)
@ -610,7 +610,7 @@ func waitForReplicas(ctx context.Context, deploymentName, namespace string, cs c
func ensureDesiredReplicasInRange(ctx context.Context, deploymentName, namespace string, cs clientset.Interface, minDesiredReplicas, maxDesiredReplicas int, timeout time.Duration) { func ensureDesiredReplicasInRange(ctx context.Context, deploymentName, namespace string, cs clientset.Interface, minDesiredReplicas, maxDesiredReplicas int, timeout time.Duration) {
interval := 60 * time.Second interval := 60 * time.Second
err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
deployment, err := cs.AppsV1().Deployments(namespace).Get(ctx, deploymentName, metav1.GetOptions{}) deployment, err := cs.AppsV1().Deployments(namespace).Get(ctx, deploymentName, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to get replication controller %s: %v", deployment, err) framework.Failf("Failed to get replication controller %s: %v", deployment, err)

View File

@ -122,7 +122,7 @@ func checkControlPlaneVersion(ctx context.Context, c clientset.Interface, want s
framework.Logf("Checking control plane version") framework.Logf("Checking control plane version")
var err error var err error
var v *version.Info var v *version.Info
waitErr := wait.PollImmediateWithContext(ctx, 5*time.Second, 2*time.Minute, func(ctx context.Context) (bool, error) { waitErr := wait.PollUntilContextTimeout(ctx, 5*time.Second, 2*time.Minute, true, func(ctx context.Context) (bool, error) {
v, err = c.Discovery().ServerVersion() v, err = c.Discovery().ServerVersion()
if err != nil { if err != nil {
traceRouteToControlPlane() traceRouteToControlPlane()

View File

@ -883,7 +883,7 @@ var _ = SIGDescribe("Pods", func() {
// wait for all pods to be deleted // wait for all pods to be deleted
ginkgo.By("waiting for all pods to be deleted") ginkgo.By("waiting for all pods to be deleted")
err = wait.PollImmediateWithContext(ctx, podRetryPeriod, f.Timeouts.PodDelete, checkPodListQuantity(f, "type=Testing", 0)) err = wait.PollUntilContextTimeout(ctx, podRetryPeriod, f.Timeouts.PodDelete, true, checkPodListQuantity(f, "type=Testing", 0))
framework.ExpectNoError(err, "found a pod(s)") framework.ExpectNoError(err, "found a pod(s)")
}) })

View File

@ -150,7 +150,7 @@ func waitForDaemonSets(ctx context.Context, c clientset.Interface, ns string, al
framework.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start", framework.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start",
timeout, ns) timeout, ns)
return wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) { return wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, true, func(ctx context.Context) (bool, error) {
dsList, err := c.AppsV1().DaemonSets(ns).List(ctx, metav1.ListOptions{}) dsList, err := c.AppsV1().DaemonSets(ns).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
framework.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err) framework.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err)
@ -429,7 +429,7 @@ func prepullImages(ctx context.Context, c clientset.Interface) {
return daemonset.CheckPresentOnNodes(ctx, c, imgPuller, ns, framework.TestContext.CloudConfig.NumNodes) return daemonset.CheckPresentOnNodes(ctx, c, imgPuller, ns, framework.TestContext.CloudConfig.NumNodes)
} }
framework.Logf("Waiting for %s", imgPuller.Name) framework.Logf("Waiting for %s", imgPuller.Name)
err := wait.PollImmediateWithContext(ctx, dsRetryPeriod, dsRetryTimeout, checkDaemonset) err := wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkDaemonset)
framework.ExpectNoError(err, "error waiting for image to be pulled") framework.ExpectNoError(err, "error waiting for image to be pulled")
} }
} }

View File

@ -495,7 +495,7 @@ func (rc *ResourceConsumer) GetHpa(ctx context.Context, name string) (*autoscali
// WaitForReplicas wait for the desired replicas // WaitForReplicas wait for the desired replicas
func (rc *ResourceConsumer) WaitForReplicas(ctx context.Context, desiredReplicas int, duration time.Duration) { func (rc *ResourceConsumer) WaitForReplicas(ctx context.Context, desiredReplicas int, duration time.Duration) {
interval := 20 * time.Second interval := 20 * time.Second
err := wait.PollImmediateWithContext(ctx, interval, duration, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, interval, duration, true, func(ctx context.Context) (bool, error) {
replicas := rc.GetReplicas(ctx) replicas := rc.GetReplicas(ctx)
framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas) framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit. return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
@ -506,7 +506,7 @@ func (rc *ResourceConsumer) WaitForReplicas(ctx context.Context, desiredReplicas
// EnsureDesiredReplicasInRange ensure the replicas is in a desired range // EnsureDesiredReplicasInRange ensure the replicas is in a desired range
func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(ctx context.Context, minDesiredReplicas, maxDesiredReplicas int, duration time.Duration, hpaName string) { func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(ctx context.Context, minDesiredReplicas, maxDesiredReplicas int, duration time.Duration, hpaName string) {
interval := 10 * time.Second interval := 10 * time.Second
err := wait.PollImmediateWithContext(ctx, interval, duration, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, interval, duration, true, func(ctx context.Context) (bool, error) {
replicas := rc.GetReplicas(ctx) replicas := rc.GetReplicas(ctx)
framework.Logf("expecting there to be in [%d, %d] replicas (are: %d)", minDesiredReplicas, maxDesiredReplicas, replicas) framework.Logf("expecting there to be in [%d, %d] replicas (are: %d)", minDesiredReplicas, maxDesiredReplicas, replicas)
as, err := rc.GetHpa(ctx, hpaName) as, err := rc.GetHpa(ctx, hpaName)
@ -964,7 +964,7 @@ func CreateCustomResourceDefinition(ctx context.Context, c crdclientset.Interfac
crd, err = c.ApiextensionsV1().CustomResourceDefinitions().Create(ctx, crdSchema, metav1.CreateOptions{}) crd, err = c.ApiextensionsV1().CustomResourceDefinitions().Create(ctx, crdSchema, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Wait until just created CRD appears in discovery. // Wait until just created CRD appears in discovery.
err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 30*time.Second, func(ctx context.Context) (bool, error) { err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 30*time.Second, true, func(ctx context.Context) (bool, error) {
return ExistsInDiscovery(crd, c, "v1") return ExistsInDiscovery(crd, c, "v1")
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -34,7 +34,7 @@ type Action func() error
// Please note delivery of events is not guaranteed. Asserting on events can lead to flaky tests. // Please note delivery of events is not guaranteed. Asserting on events can lead to flaky tests.
func WaitTimeoutForEvent(ctx context.Context, c clientset.Interface, namespace, eventSelector, msg string, timeout time.Duration) error { func WaitTimeoutForEvent(ctx context.Context, c clientset.Interface, namespace, eventSelector, msg string, timeout time.Duration) error {
interval := 2 * time.Second interval := 2 * time.Second
return wait.PollImmediateWithContext(ctx, interval, timeout, eventOccurred(c, namespace, eventSelector, msg)) return wait.PollUntilContextTimeout(ctx, interval, timeout, true, eventOccurred(c, namespace, eventSelector, msg))
} }
func eventOccurred(c clientset.Interface, namespace, eventSelector, msg string) wait.ConditionWithContextFunc { func eventOccurred(c clientset.Interface, namespace, eventSelector, msg string) wait.ConditionWithContextFunc {

View File

@ -196,7 +196,7 @@ func SimpleGET(ctx context.Context, c *http.Client, url, host string) (string, e
// expectUnreachable is true, it breaks on first non-healthy http code instead. // expectUnreachable is true, it breaks on first non-healthy http code instead.
func PollURL(ctx context.Context, route, host string, timeout time.Duration, interval time.Duration, httpClient *http.Client, expectUnreachable bool) error { func PollURL(ctx context.Context, route, host string, timeout time.Duration, interval time.Duration, httpClient *http.Client, expectUnreachable bool) error {
var lastBody string var lastBody string
pollErr := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { pollErr := wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
var err error var err error
lastBody, err = SimpleGET(ctx, httpClient, route, host) lastBody, err = SimpleGET(ctx, httpClient, route, host)
if err != nil { if err != nil {
@ -733,7 +733,7 @@ func getIngressAddress(ctx context.Context, client clientset.Interface, ns, name
// WaitForIngressAddress waits for the Ingress to acquire an address. // WaitForIngressAddress waits for the Ingress to acquire an address.
func (j *TestJig) WaitForIngressAddress(ctx context.Context, c clientset.Interface, ns, ingName string, timeout time.Duration) (string, error) { func (j *TestJig) WaitForIngressAddress(ctx context.Context, c clientset.Interface, ns, ingName string, timeout time.Duration) (string, error) {
var address string var address string
err := wait.PollImmediateWithContext(ctx, 10*time.Second, timeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, 10*time.Second, timeout, true, func(ctx context.Context) (bool, error) {
ipOrNameList, err := getIngressAddress(ctx, c, ns, ingName, j.Class) ipOrNameList, err := getIngressAddress(ctx, c, ns, ingName, j.Class)
if err != nil || len(ipOrNameList) == 0 { if err != nil || len(ipOrNameList) == 0 {
j.Logger.Errorf("Waiting for Ingress %s/%s to acquire IP, error: %v, ipOrNameList: %v", ns, ingName, err, ipOrNameList) j.Logger.Errorf("Waiting for Ingress %s/%s to acquire IP, error: %v, ipOrNameList: %v", ns, ingName, err, ipOrNameList)
@ -889,7 +889,7 @@ func getPortURL(ctx context.Context, client clientset.Interface, ns, name string
// unschedulable, since control plane nodes don't run kube-proxy. Without // unschedulable, since control plane nodes don't run kube-proxy. Without
// kube-proxy NodePorts won't work. // kube-proxy NodePorts won't work.
var nodes *v1.NodeList var nodes *v1.NodeList
if wait.PollImmediateWithContext(ctx, poll, framework.SingleCallTimeout, func(ctx context.Context) (bool, error) { if wait.PollUntilContextTimeout(ctx, poll, framework.SingleCallTimeout, true, func(ctx context.Context) (bool, error) {
nodes, err = client.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{ nodes, err = client.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false", "spec.unschedulable": "false",
}.AsSelector().String()}) }.AsSelector().String()})

View File

@ -91,7 +91,7 @@ func isJobFailed(j *batchv1.Job) bool {
// WaitForJobFinish uses c to wait for the Job jobName in namespace ns to finish (either Failed or Complete). // WaitForJobFinish uses c to wait for the Job jobName in namespace ns to finish (either Failed or Complete).
func WaitForJobFinish(ctx context.Context, c clientset.Interface, ns, jobName string) error { func WaitForJobFinish(ctx context.Context, c clientset.Interface, ns, jobName string) error {
return wait.PollImmediateWithContext(ctx, framework.Poll, JobTimeout, func(ctx context.Context) (bool, error) { return wait.PollUntilContextTimeout(ctx, framework.Poll, JobTimeout, true, func(ctx context.Context) (bool, error) {
curr, err := c.BatchV1().Jobs(ns).Get(ctx, jobName, metav1.GetOptions{}) curr, err := c.BatchV1().Jobs(ns).Get(ctx, jobName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
@ -125,7 +125,7 @@ func WaitForJobGone(ctx context.Context, c clientset.Interface, ns, jobName stri
// WaitForAllJobPodsGone waits for all pods for the Job named jobName in namespace ns // WaitForAllJobPodsGone waits for all pods for the Job named jobName in namespace ns
// to be deleted. // to be deleted.
func WaitForAllJobPodsGone(ctx context.Context, c clientset.Interface, ns, jobName string) error { func WaitForAllJobPodsGone(ctx context.Context, c clientset.Interface, ns, jobName string) error {
return wait.PollImmediateWithContext(ctx, framework.Poll, JobTimeout, func(ctx context.Context) (bool, error) { return wait.PollUntilContextTimeout(ctx, framework.Poll, JobTimeout, true, func(ctx context.Context) (bool, error) {
pods, err := GetJobPods(ctx, c, ns, jobName) pods, err := GetJobPods(ctx, c, ns, jobName)
if err != nil { if err != nil {
return false, err return false, err

View File

@ -252,7 +252,7 @@ func (g *Grabber) GrabFromScheduler(ctx context.Context) (SchedulerMetrics, erro
var lastMetricsFetchErr error var lastMetricsFetchErr error
var output string var output string
if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) { if metricsWaitErr := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, true, func(ctx context.Context) (bool, error) {
output, lastMetricsFetchErr = g.getSecureMetricsFromPod(ctx, g.kubeScheduler, metav1.NamespaceSystem, kubeSchedulerPort) output, lastMetricsFetchErr = g.getSecureMetricsFromPod(ctx, g.kubeScheduler, metav1.NamespaceSystem, kubeSchedulerPort)
return lastMetricsFetchErr == nil, nil return lastMetricsFetchErr == nil, nil
}); metricsWaitErr != nil { }); metricsWaitErr != nil {
@ -303,7 +303,7 @@ func (g *Grabber) GrabFromControllerManager(ctx context.Context) (ControllerMana
var output string var output string
var lastMetricsFetchErr error var lastMetricsFetchErr error
if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) { if metricsWaitErr := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, true, func(ctx context.Context) (bool, error) {
output, lastMetricsFetchErr = g.getSecureMetricsFromPod(ctx, g.kubeControllerManager, metav1.NamespaceSystem, kubeControllerManagerPort) output, lastMetricsFetchErr = g.getSecureMetricsFromPod(ctx, g.kubeControllerManager, metav1.NamespaceSystem, kubeControllerManagerPort)
return lastMetricsFetchErr == nil, nil return lastMetricsFetchErr == nil, nil
}); metricsWaitErr != nil { }); metricsWaitErr != nil {
@ -342,7 +342,7 @@ func (g *Grabber) GrabFromSnapshotController(ctx context.Context, podName string
var output string var output string
var lastMetricsFetchErr error var lastMetricsFetchErr error
if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) { if metricsWaitErr := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, true, func(ctx context.Context) (bool, error) {
output, lastMetricsFetchErr = g.getMetricsFromPod(ctx, g.client, podName, metav1.NamespaceSystem, port) output, lastMetricsFetchErr = g.getMetricsFromPod(ctx, g.client, podName, metav1.NamespaceSystem, port)
return lastMetricsFetchErr == nil, nil return lastMetricsFetchErr == nil, nil
}); metricsWaitErr != nil { }); metricsWaitErr != nil {

View File

@ -526,7 +526,7 @@ func (config *NetworkingTestConfig) executeCurlCmd(ctx context.Context, cmd stri
const retryTimeout = 30 * time.Second const retryTimeout = 30 * time.Second
podName := config.HostTestContainerPod.Name podName := config.HostTestContainerPod.Name
var msg string var msg string
if pollErr := wait.PollImmediateWithContext(ctx, retryInterval, retryTimeout, func(ctx context.Context) (bool, error) { if pollErr := wait.PollUntilContextTimeout(ctx, retryInterval, retryTimeout, true, func(ctx context.Context) (bool, error) {
stdout, err := e2epodoutput.RunHostCmd(config.Namespace, podName, cmd) stdout, err := e2epodoutput.RunHostCmd(config.Namespace, podName, cmd)
if err != nil { if err != nil {
msg = fmt.Sprintf("failed executing cmd %v in %v/%v: %v", cmd, config.Namespace, podName, err) msg = fmt.Sprintf("failed executing cmd %v in %v/%v: %v", cmd, config.Namespace, podName, err)
@ -1172,7 +1172,7 @@ func UnblockNetwork(ctx context.Context, from string, to string) {
// WaitForService waits until the service appears (exist == true), or disappears (exist == false) // WaitForService waits until the service appears (exist == true), or disappears (exist == false)
func WaitForService(ctx context.Context, c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error { func WaitForService(ctx context.Context, c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
_, err := c.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{}) _, err := c.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{})
switch { switch {
case err == nil: case err == nil:

View File

@ -121,7 +121,7 @@ func allNodesReady(ctx context.Context, c clientset.Interface, timeout time.Dura
framework.Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, framework.TestContext.AllowedNotReadyNodes) framework.Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, framework.TestContext.AllowedNotReadyNodes)
var notReady []*v1.Node var notReady []*v1.Node
err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, true, func(ctx context.Context) (bool, error) {
notReady = nil notReady = nil
// It should be OK to list unschedulable Nodes here. // It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})

View File

@ -36,7 +36,7 @@ func WaitForSSHTunnels(ctx context.Context, namespace string) {
defer e2ekubectl.RunKubectl(namespace, "delete", "pod", "ssh-tunnel-test") defer e2ekubectl.RunKubectl(namespace, "delete", "pod", "ssh-tunnel-test")
// allow up to a minute for new ssh tunnels to establish // allow up to a minute for new ssh tunnels to establish
wait.PollImmediateWithContext(ctx, 5*time.Second, time.Minute, func(ctx context.Context) (bool, error) { wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, func(ctx context.Context) (bool, error) {
_, err := e2ekubectl.RunKubectl(namespace, "logs", "ssh-tunnel-test") _, err := e2ekubectl.RunKubectl(namespace, "logs", "ssh-tunnel-test")
return err == nil, nil return err == nil, nil
}) })

View File

@ -51,7 +51,7 @@ func WaitForTotalHealthy(ctx context.Context, c clientset.Interface, timeout tim
var notReady []v1.Node var notReady []v1.Node
var missingPodsPerNode map[string][]string var missingPodsPerNode map[string][]string
err := wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, poll, timeout, true, func(ctx context.Context) (bool, error) {
notReady = nil notReady = nil
// It should be OK to list unschedulable Nodes here. // It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{ResourceVersion: "0"}) nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{ResourceVersion: "0"})
@ -192,7 +192,7 @@ func CheckReady(ctx context.Context, c clientset.Interface, size int, timeout ti
func waitListSchedulableNodes(ctx context.Context, c clientset.Interface) (*v1.NodeList, error) { func waitListSchedulableNodes(ctx context.Context, c clientset.Interface) (*v1.NodeList, error) {
var nodes *v1.NodeList var nodes *v1.NodeList
var err error var err error
if wait.PollImmediateWithContext(ctx, poll, singleCallTimeout, func(ctx context.Context) (bool, error) { if wait.PollUntilContextTimeout(ctx, poll, singleCallTimeout, true, func(ctx context.Context) (bool, error) {
nodes, err = c.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{ nodes, err = c.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false", "spec.unschedulable": "false",
}.AsSelector().String()}) }.AsSelector().String()})

View File

@ -251,7 +251,7 @@ func WaitForFirewallRule(ctx context.Context, gceCloud *gcecloud.Cloud, fwName s
return true, nil return true, nil
} }
if err := wait.PollImmediateWithContext(ctx, 5*time.Second, timeout, condition); err != nil { if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, timeout, true, condition); err != nil {
return nil, fmt.Errorf("error waiting for firewall %v exist=%v", fwName, exist) return nil, fmt.Errorf("error waiting for firewall %v exist=%v", fwName, exist)
} }
return fw, nil return fw, nil

View File

@ -297,7 +297,7 @@ func DeletePVCandValidatePVGroup(ctx context.Context, c clientset.Interface, tim
func createPV(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) { func createPV(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
var resultPV *v1.PersistentVolume var resultPV *v1.PersistentVolume
var lastCreateErr error var lastCreateErr error
err := wait.PollImmediateWithContext(ctx, 29*time.Second, timeouts.PVCreate, func(ctx context.Context) (done bool, err error) { err := wait.PollUntilContextTimeout(ctx, 29*time.Second, timeouts.PVCreate, true, func(ctx context.Context) (done bool, err error) {
resultPV, lastCreateErr = c.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{}) resultPV, lastCreateErr = c.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{})
if lastCreateErr != nil { if lastCreateErr != nil {
// If we hit a quota problem, we are not done and should retry again. This happens to be the quota failure string for GCP. // If we hit a quota problem, we are not done and should retry again. This happens to be the quota failure string for GCP.

View File

@ -52,7 +52,7 @@ func WaitForReplicaSetTargetAvailableReplicas(ctx context.Context, c clientset.I
// with given timeout. // with given timeout.
func WaitForReplicaSetTargetAvailableReplicasWithTimeout(ctx context.Context, c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32, timeout time.Duration) error { func WaitForReplicaSetTargetAvailableReplicasWithTimeout(ctx context.Context, c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32, timeout time.Duration) error {
desiredGeneration := replicaSet.Generation desiredGeneration := replicaSet.Generation
err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, true, func(ctx context.Context) (bool, error) {
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(ctx, replicaSet.Name, metav1.GetOptions{}) rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(ctx, replicaSet.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err

View File

@ -175,7 +175,7 @@ func deleteObjectAndWaitForGC(ctx context.Context, c clientset.Interface, rtObje
// waitForPodsGone waits until there are no pods left in the PodStore. // waitForPodsGone waits until there are no pods left in the PodStore.
func waitForPodsGone(ctx context.Context, ps *testutils.PodStore, interval, timeout time.Duration) error { func waitForPodsGone(ctx context.Context, ps *testutils.PodStore, interval, timeout time.Duration) error {
var pods []*v1.Pod var pods []*v1.Pod
err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
if pods = ps.List(); len(pods) == 0 { if pods = ps.List(); len(pods) == 0 {
return true, nil return true, nil
} }
@ -197,7 +197,7 @@ func waitForPodsGone(ctx context.Context, ps *testutils.PodStore, interval, time
// when the pod is inactvie. // when the pod is inactvie.
func waitForPodsInactive(ctx context.Context, ps *testutils.PodStore, interval, timeout time.Duration) error { func waitForPodsInactive(ctx context.Context, ps *testutils.PodStore, interval, timeout time.Duration) error {
var activePods []*v1.Pod var activePods []*v1.Pod
err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
pods := ps.List() pods := ps.List()
activePods = e2epod.FilterActivePods(pods) activePods = e2epod.FilterActivePods(pods)
if len(activePods) != 0 { if len(activePods) != 0 {

View File

@ -328,7 +328,7 @@ func (j *TestJig) GetEndpointNodeNames(ctx context.Context) (sets.String, error)
// WaitForEndpointOnNode waits for a service endpoint on the given node. // WaitForEndpointOnNode waits for a service endpoint on the given node.
func (j *TestJig) WaitForEndpointOnNode(ctx context.Context, nodeName string) error { func (j *TestJig) WaitForEndpointOnNode(ctx context.Context, nodeName string) error {
return wait.PollImmediateWithContext(ctx, framework.Poll, KubeProxyLagTimeout, func(ctx context.Context) (bool, error) { return wait.PollUntilContextTimeout(ctx, framework.Poll, KubeProxyLagTimeout, true, func(ctx context.Context) (bool, error) {
endpoints, err := j.Client.CoreV1().Endpoints(j.Namespace).Get(ctx, j.Name, metav1.GetOptions{}) endpoints, err := j.Client.CoreV1().Endpoints(j.Namespace).Get(ctx, j.Name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Logf("Get endpoints for service %s/%s failed (%s)", j.Namespace, j.Name, err) framework.Logf("Get endpoints for service %s/%s failed (%s)", j.Namespace, j.Name, err)
@ -627,7 +627,7 @@ func (j *TestJig) waitForCondition(ctx context.Context, timeout time.Duration, m
} }
return false, nil return false, nil
} }
if err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, pollFunc); err != nil { if err := wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, true, pollFunc); err != nil {
return nil, fmt.Errorf("timed out waiting for service %q to %s: %w", j.Name, message, err) return nil, fmt.Errorf("timed out waiting for service %q to %s: %w", j.Name, message, err)
} }
return service, nil return service, nil
@ -910,7 +910,7 @@ func testEndpointReachability(ctx context.Context, endpoint string, port int32,
return fmt.Errorf("service reachability check is not supported for %v", protocol) return fmt.Errorf("service reachability check is not supported for %v", protocol)
} }
err := wait.PollImmediateWithContext(ctx, 1*time.Second, ServiceReachabilityShortPollTimeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, 1*time.Second, ServiceReachabilityShortPollTimeout, true, func(ctx context.Context) (bool, error) {
stdout, err := e2epodoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd) stdout, err := e2epodoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
if err != nil { if err != nil {
framework.Logf("Service reachability failing with error: %v\nRetrying...", err) framework.Logf("Service reachability failing with error: %v\nRetrying...", err)
@ -1006,7 +1006,7 @@ func (j *TestJig) checkExternalServiceReachability(ctx context.Context, svc *v1.
svcName := fmt.Sprintf("%s.%s.svc.%s", svc.Name, svc.Namespace, framework.TestContext.ClusterDNSDomain) svcName := fmt.Sprintf("%s.%s.svc.%s", svc.Name, svc.Namespace, framework.TestContext.ClusterDNSDomain)
// Service must resolve to IP // Service must resolve to IP
cmd := fmt.Sprintf("nslookup %s", svcName) cmd := fmt.Sprintf("nslookup %s", svcName)
return wait.PollImmediateWithContext(ctx, framework.Poll, ServiceReachabilityShortPollTimeout, func(ctx context.Context) (done bool, err error) { return wait.PollUntilContextTimeout(ctx, framework.Poll, ServiceReachabilityShortPollTimeout, true, func(ctx context.Context) (done bool, err error) {
_, stderr, err := e2epodoutput.RunHostCmdWithFullOutput(pod.Namespace, pod.Name, cmd) _, stderr, err := e2epodoutput.RunHostCmdWithFullOutput(pod.Namespace, pod.Name, cmd)
// NOTE(claudiub): nslookup may return 0 on Windows, even though the DNS name was not found. In this case, // NOTE(claudiub): nslookup may return 0 on Windows, even though the DNS name was not found. In this case,
// we can check stderr for the error. // we can check stderr for the error.

View File

@ -44,7 +44,7 @@ func TestReachableHTTPWithRetriableErrorCodes(ctx context.Context, host string,
return false, nil // caller can retry return false, nil // caller can retry
} }
if err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, pollfn); err != nil { if err := wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, true, pollfn); err != nil {
if wait.Interrupted(err) { if wait.Interrupted(err) {
framework.Failf("Could not reach HTTP service through %v:%v after %v", host, port, timeout) framework.Failf("Could not reach HTTP service through %v:%v after %v", host, port, timeout)
} else { } else {

View File

@ -38,7 +38,7 @@ func WaitForServiceDeletedWithFinalizer(ctx context.Context, cs clientset.Interf
} }
ginkgo.By("Wait for service to disappear") ginkgo.By("Wait for service to disappear")
if pollErr := wait.PollImmediateWithContext(ctx, LoadBalancerPollInterval, GetServiceLoadBalancerCreationTimeout(ctx, cs), func(ctx context.Context) (bool, error) { if pollErr := wait.PollUntilContextTimeout(ctx, LoadBalancerPollInterval, GetServiceLoadBalancerCreationTimeout(ctx, cs), true, func(ctx context.Context) (bool, error) {
svc, err := cs.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{}) svc, err := cs.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
@ -58,7 +58,7 @@ func WaitForServiceDeletedWithFinalizer(ctx context.Context, cs clientset.Interf
// don't have a finalizer. // don't have a finalizer.
func WaitForServiceUpdatedWithFinalizer(ctx context.Context, cs clientset.Interface, namespace, name string, hasFinalizer bool) { func WaitForServiceUpdatedWithFinalizer(ctx context.Context, cs clientset.Interface, namespace, name string, hasFinalizer bool) {
ginkgo.By(fmt.Sprintf("Wait for service to hasFinalizer=%t", hasFinalizer)) ginkgo.By(fmt.Sprintf("Wait for service to hasFinalizer=%t", hasFinalizer))
if pollErr := wait.PollImmediateWithContext(ctx, LoadBalancerPollInterval, GetServiceLoadBalancerCreationTimeout(ctx, cs), func(ctx context.Context) (bool, error) { if pollErr := wait.PollUntilContextTimeout(ctx, LoadBalancerPollInterval, GetServiceLoadBalancerCreationTimeout(ctx, cs), true, func(ctx context.Context) (bool, error) {
svc, err := cs.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{}) svc, err := cs.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err

View File

@ -422,7 +422,7 @@ func nodeAddresses(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string
func waitListSchedulableNodes(ctx context.Context, c clientset.Interface) (*v1.NodeList, error) { func waitListSchedulableNodes(ctx context.Context, c clientset.Interface) (*v1.NodeList, error) {
var nodes *v1.NodeList var nodes *v1.NodeList
var err error var err error
if wait.PollImmediateWithContext(ctx, pollNodeInterval, singleCallTimeout, func(ctx context.Context) (bool, error) { if wait.PollUntilContextTimeout(ctx, pollNodeInterval, singleCallTimeout, true, func(ctx context.Context) (bool, error) {
nodes, err = c.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{ nodes, err = c.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false", "spec.unschedulable": "false",
}.AsSelector().String()}) }.AsSelector().String()})

View File

@ -96,7 +96,7 @@ func DeleteAllStatefulSets(ctx context.Context, c clientset.Interface, ns string
// pvs are global, so we need to wait for the exact ones bound to the statefulset pvcs. // pvs are global, so we need to wait for the exact ones bound to the statefulset pvcs.
pvNames := sets.NewString() pvNames := sets.NewString()
// TODO: Don't assume all pvcs in the ns belong to a statefulset // TODO: Don't assume all pvcs in the ns belong to a statefulset
pvcPollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout, func(ctx context.Context) (bool, error) { pvcPollErr := wait.PollUntilContextTimeout(ctx, StatefulSetPoll, StatefulSetTimeout, true, func(ctx context.Context) (bool, error) {
pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()}) pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil { if err != nil {
framework.Logf("WARNING: Failed to list pvcs, retrying %v", err) framework.Logf("WARNING: Failed to list pvcs, retrying %v", err)
@ -116,7 +116,7 @@ func DeleteAllStatefulSets(ctx context.Context, c clientset.Interface, ns string
errList = append(errList, fmt.Sprintf("Timeout waiting for pvc deletion.")) errList = append(errList, fmt.Sprintf("Timeout waiting for pvc deletion."))
} }
pollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout, func(ctx context.Context) (bool, error) { pollErr := wait.PollUntilContextTimeout(ctx, StatefulSetPoll, StatefulSetTimeout, true, func(ctx context.Context) (bool, error) {
pvList, err := c.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()}) pvList, err := c.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil { if err != nil {
framework.Logf("WARNING: Failed to list pvs, retrying %v", err) framework.Logf("WARNING: Failed to list pvs, retrying %v", err)
@ -151,7 +151,7 @@ func Scale(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet, c
ss = update(ctx, c, ns, name, count) ss = update(ctx, c, ns, name, count)
var statefulPodList *v1.PodList var statefulPodList *v1.PodList
pollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout, func(ctx context.Context) (bool, error) { pollErr := wait.PollUntilContextTimeout(ctx, StatefulSetPoll, StatefulSetTimeout, true, func(ctx context.Context) (bool, error) {
statefulPodList = GetPodList(ctx, c, ss) statefulPodList = GetPodList(ctx, c, ss)
if int32(len(statefulPodList.Items)) == count { if int32(len(statefulPodList.Items)) == count {
return true, nil return true, nil

View File

@ -32,7 +32,7 @@ import (
// WaitForRunning waits for numPodsRunning in ss to be Running and for the first // WaitForRunning waits for numPodsRunning in ss to be Running and for the first
// numPodsReady ordinals to be Ready. // numPodsReady ordinals to be Ready.
func WaitForRunning(ctx context.Context, c clientset.Interface, numPodsRunning, numPodsReady int32, ss *appsv1.StatefulSet) { func WaitForRunning(ctx context.Context, c clientset.Interface, numPodsRunning, numPodsReady int32, ss *appsv1.StatefulSet) {
pollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout, pollErr := wait.PollUntilContextTimeout(ctx, StatefulSetPoll, StatefulSetTimeout, true,
func(ctx context.Context) (bool, error) { func(ctx context.Context) (bool, error) {
podList := GetPodList(ctx, c, ss) podList := GetPodList(ctx, c, ss)
SortStatefulPods(podList) SortStatefulPods(podList)
@ -61,7 +61,7 @@ func WaitForRunning(ctx context.Context, c clientset.Interface, numPodsRunning,
// WaitForState periodically polls for the ss and its pods until the until function returns either true or an error // WaitForState periodically polls for the ss and its pods until the until function returns either true or an error
func WaitForState(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet, until func(*appsv1.StatefulSet, *v1.PodList) (bool, error)) { func WaitForState(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet, until func(*appsv1.StatefulSet, *v1.PodList) (bool, error)) {
pollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout, pollErr := wait.PollUntilContextTimeout(ctx, StatefulSetPoll, StatefulSetTimeout, true,
func(ctx context.Context) (bool, error) { func(ctx context.Context) (bool, error) {
ssGet, err := c.AppsV1().StatefulSets(ss.Namespace).Get(ctx, ss.Name, metav1.GetOptions{}) ssGet, err := c.AppsV1().StatefulSets(ss.Namespace).Get(ctx, ss.Name, metav1.GetOptions{})
if err != nil { if err != nil {
@ -101,7 +101,7 @@ func WaitForStatusReadyReplicas(ctx context.Context, c clientset.Interface, ss *
framework.Logf("Waiting for statefulset status.readyReplicas updated to %d", expectedReplicas) framework.Logf("Waiting for statefulset status.readyReplicas updated to %d", expectedReplicas)
ns, name := ss.Namespace, ss.Name ns, name := ss.Namespace, ss.Name
pollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout, pollErr := wait.PollUntilContextTimeout(ctx, StatefulSetPoll, StatefulSetTimeout, true,
func(ctx context.Context) (bool, error) { func(ctx context.Context) (bool, error) {
ssGet, err := c.AppsV1().StatefulSets(ns).Get(ctx, name, metav1.GetOptions{}) ssGet, err := c.AppsV1().StatefulSets(ns).Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {
@ -126,7 +126,7 @@ func WaitForStatusAvailableReplicas(ctx context.Context, c clientset.Interface,
framework.Logf("Waiting for statefulset status.AvailableReplicas updated to %d", expectedReplicas) framework.Logf("Waiting for statefulset status.AvailableReplicas updated to %d", expectedReplicas)
ns, name := ss.Namespace, ss.Name ns, name := ss.Namespace, ss.Name
pollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout, pollErr := wait.PollUntilContextTimeout(ctx, StatefulSetPoll, StatefulSetTimeout, true,
func(ctx context.Context) (bool, error) { func(ctx context.Context) (bool, error) {
ssGet, err := c.AppsV1().StatefulSets(ns).Get(ctx, name, metav1.GetOptions{}) ssGet, err := c.AppsV1().StatefulSets(ns).Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {
@ -151,7 +151,7 @@ func WaitForStatusReplicas(ctx context.Context, c clientset.Interface, ss *appsv
framework.Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas) framework.Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
ns, name := ss.Namespace, ss.Name ns, name := ss.Namespace, ss.Name
pollErr := wait.PollImmediateWithContext(ctx, StatefulSetPoll, StatefulSetTimeout, pollErr := wait.PollUntilContextTimeout(ctx, StatefulSetPoll, StatefulSetTimeout, true,
func(ctx context.Context) (bool, error) { func(ctx context.Context) (bool, error) {
ssGet, err := c.AppsV1().StatefulSets(ns).Get(ctx, name, metav1.GetOptions{}) ssGet, err := c.AppsV1().StatefulSets(ns).Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {

View File

@ -352,7 +352,7 @@ func CreateTestingNS(ctx context.Context, baseName string, c clientset.Interface
} }
// Be robust about making the namespace creation call. // Be robust about making the namespace creation call.
var got *v1.Namespace var got *v1.Namespace
if err := wait.PollImmediateWithContext(ctx, Poll, 30*time.Second, func(ctx context.Context) (bool, error) { if err := wait.PollUntilContextTimeout(ctx, Poll, 30*time.Second, true, func(ctx context.Context) (bool, error) {
var err error var err error
got, err = c.CoreV1().Namespaces().Create(ctx, namespaceObj, metav1.CreateOptions{}) got, err = c.CoreV1().Namespaces().Create(ctx, namespaceObj, metav1.CreateOptions{})
if err != nil { if err != nil {

View File

@ -238,7 +238,7 @@ func getVolumeHandle(ctx context.Context, cs clientset.Interface, claimName stri
// WaitForVolumeAttachmentTerminated waits for the VolumeAttachment with the passed in attachmentName to be terminated. // WaitForVolumeAttachmentTerminated waits for the VolumeAttachment with the passed in attachmentName to be terminated.
func WaitForVolumeAttachmentTerminated(ctx context.Context, attachmentName string, cs clientset.Interface, timeout time.Duration) error { func WaitForVolumeAttachmentTerminated(ctx context.Context, attachmentName string, cs clientset.Interface, timeout time.Duration) error {
waitErr := wait.PollImmediateWithContext(ctx, 10*time.Second, timeout, func(ctx context.Context) (bool, error) { waitErr := wait.PollUntilContextTimeout(ctx, 10*time.Second, timeout, true, func(ctx context.Context) (bool, error) {
_, err := cs.StorageV1().VolumeAttachments().Get(ctx, attachmentName, metav1.GetOptions{}) _, err := cs.StorageV1().VolumeAttachments().Get(ctx, attachmentName, metav1.GetOptions{})
if err != nil { if err != nil {
// if the volumeattachment object is not found, it means it has been terminated. // if the volumeattachment object is not found, it means it has been terminated.

View File

@ -217,7 +217,7 @@ var _ = common.SIGDescribe("Events", func() {
ginkgo.By("check that the list of events matches the requested quantity") ginkgo.By("check that the list of events matches the requested quantity")
err = wait.PollImmediateWithContext(ctx, eventRetryPeriod, eventRetryTimeout, checkEventListQuantity(f, "testevent-set=true", 0)) err = wait.PollUntilContextTimeout(ctx, eventRetryPeriod, eventRetryTimeout, true, checkEventListQuantity(f, "testevent-set=true", 0))
framework.ExpectNoError(err, "failed to count required events") framework.ExpectNoError(err, "failed to count required events")
}) })

View File

@ -175,7 +175,7 @@ var _ = common.SIGDescribe("Conntrack", func() {
// 30 seconds by default. // 30 seconds by default.
// Based on the above check if the pod receives the traffic. // Based on the above check if the pod receives the traffic.
ginkgo.By("checking client pod connected to the backend 1 on Node IP " + serverNodeInfo.nodeIP) ginkgo.By("checking client pod connected to the backend 1 on Node IP " + serverNodeInfo.nodeIP)
if err := wait.PollImmediateWithContext(ctx, 5*time.Second, time.Minute, logContainsFn(podBackend1, podClient)); err != nil { if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn(podBackend1, podClient)); err != nil {
logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient) logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Pod client logs: %s", logs) framework.Logf("Pod client logs: %s", logs)
@ -199,7 +199,7 @@ var _ = common.SIGDescribe("Conntrack", func() {
// Check that the second pod keeps receiving traffic // Check that the second pod keeps receiving traffic
// UDP conntrack entries timeout is 30 sec by default // UDP conntrack entries timeout is 30 sec by default
ginkgo.By("checking client pod connected to the backend 2 on Node IP " + serverNodeInfo.nodeIP) ginkgo.By("checking client pod connected to the backend 2 on Node IP " + serverNodeInfo.nodeIP)
if err := wait.PollImmediateWithContext(ctx, 5*time.Second, time.Minute, logContainsFn(podBackend2, podClient)); err != nil { if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn(podBackend2, podClient)); err != nil {
logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient) logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Pod client logs: %s", logs) framework.Logf("Pod client logs: %s", logs)
@ -251,7 +251,7 @@ var _ = common.SIGDescribe("Conntrack", func() {
// 30 seconds by default. // 30 seconds by default.
// Based on the above check if the pod receives the traffic. // Based on the above check if the pod receives the traffic.
ginkgo.By("checking client pod connected to the backend 1 on Node IP " + serverNodeInfo.nodeIP) ginkgo.By("checking client pod connected to the backend 1 on Node IP " + serverNodeInfo.nodeIP)
if err := wait.PollImmediateWithContext(ctx, 5*time.Second, time.Minute, logContainsFn(podBackend1, podClient)); err != nil { if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn(podBackend1, podClient)); err != nil {
logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient) logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Pod client logs: %s", logs) framework.Logf("Pod client logs: %s", logs)
@ -275,7 +275,7 @@ var _ = common.SIGDescribe("Conntrack", func() {
// Check that the second pod keeps receiving traffic // Check that the second pod keeps receiving traffic
// UDP conntrack entries timeout is 30 sec by default // UDP conntrack entries timeout is 30 sec by default
ginkgo.By("checking client pod connected to the backend 2 on Node IP " + serverNodeInfo.nodeIP) ginkgo.By("checking client pod connected to the backend 2 on Node IP " + serverNodeInfo.nodeIP)
if err := wait.PollImmediateWithContext(ctx, 5*time.Second, time.Minute, logContainsFn(podBackend2, podClient)); err != nil { if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn(podBackend2, podClient)); err != nil {
logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient) logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Pod client logs: %s", logs) framework.Logf("Pod client logs: %s", logs)
@ -328,7 +328,7 @@ var _ = common.SIGDescribe("Conntrack", func() {
// 30 seconds by default. // 30 seconds by default.
// Based on the above check if the pod receives the traffic. // Based on the above check if the pod receives the traffic.
ginkgo.By("checking client pod connected to the backend 1 on Node IP " + serverNodeInfo.nodeIP) ginkgo.By("checking client pod connected to the backend 1 on Node IP " + serverNodeInfo.nodeIP)
if err := wait.PollImmediateWithContext(ctx, 5*time.Second, time.Minute, logContainsFn(podBackend1, podClient)); err != nil { if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn(podBackend1, podClient)); err != nil {
logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient) logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Pod client logs: %s", logs) framework.Logf("Pod client logs: %s", logs)
@ -352,7 +352,7 @@ var _ = common.SIGDescribe("Conntrack", func() {
// Check that the second pod keeps receiving traffic // Check that the second pod keeps receiving traffic
// UDP conntrack entries timeout is 30 sec by default // UDP conntrack entries timeout is 30 sec by default
ginkgo.By("checking client pod connected to the backend 2 on Node IP " + serverNodeInfo.nodeIP) ginkgo.By("checking client pod connected to the backend 2 on Node IP " + serverNodeInfo.nodeIP)
if err := wait.PollImmediateWithContext(ctx, 5*time.Second, time.Minute, logContainsFn(podBackend2, podClient)); err != nil { if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn(podBackend2, podClient)); err != nil {
logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient) logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Pod client logs: %s", logs) framework.Logf("Pod client logs: %s", logs)
@ -424,7 +424,7 @@ var _ = common.SIGDescribe("Conntrack", func() {
// 30 seconds by default. // 30 seconds by default.
// Based on the above check if the pod receives the traffic. // Based on the above check if the pod receives the traffic.
ginkgo.By("checking client pod connected to the backend on Node IP " + serverNodeInfo.nodeIP) ginkgo.By("checking client pod connected to the backend on Node IP " + serverNodeInfo.nodeIP)
if err := wait.PollImmediateWithContext(ctx, 5*time.Second, time.Minute, logContainsFn(podBackend1, podClient)); err != nil { if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn(podBackend1, podClient)); err != nil {
logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient) logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Pod client logs: %s", logs) framework.Logf("Pod client logs: %s", logs)
@ -541,7 +541,7 @@ var _ = common.SIGDescribe("Conntrack", func() {
// so the client will receive an unexpected TCP connection and RST the connection // so the client will receive an unexpected TCP connection and RST the connection
// the server will log ERROR if that happens // the server will log ERROR if that happens
ginkgo.By("checking client pod does not RST the TCP connection because it receives an INVALID packet") ginkgo.By("checking client pod does not RST the TCP connection because it receives an INVALID packet")
if err := wait.PollImmediateWithContext(ctx, 5*time.Second, time.Minute, logContainsFn("ERROR", "boom-server")); err == nil { if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn("ERROR", "boom-server")); err == nil {
logs, err := e2epod.GetPodLogs(ctx, cs, ns, "boom-server", "boom-server") logs, err := e2epod.GetPodLogs(ctx, cs, ns, "boom-server", "boom-server")
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("boom-server pod logs: %s", logs) framework.Logf("boom-server pod logs: %s", logs)

View File

@ -456,7 +456,7 @@ func assertFilesExist(ctx context.Context, fileNames []string, fileDir string, p
func assertFilesContain(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface, check bool, expected string) { func assertFilesContain(ctx context.Context, fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface, check bool, expected string) {
var failed []string var failed []string
framework.ExpectNoError(wait.PollImmediateWithContext(ctx, time.Second*5, time.Second*600, func(ctx context.Context) (bool, error) { framework.ExpectNoError(wait.PollUntilContextTimeout(ctx, time.Second*5, time.Second*600, true, func(ctx context.Context) (bool, error) {
failed = []string{} failed = []string{}
ctx, cancel := context.WithTimeout(ctx, framework.SingleCallTimeout) ctx, cancel := context.WithTimeout(ctx, framework.SingleCallTimeout)

View File

@ -740,7 +740,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() {
// the only caller of this function. // the only caller of this function.
func expectEndpointsAndSlices(ctx context.Context, cs clientset.Interface, ns string, svc *v1.Service, pods []*v1.Pod, numSubsets, numSlices int, namedPort bool) { func expectEndpointsAndSlices(ctx context.Context, cs clientset.Interface, ns string, svc *v1.Service, pods []*v1.Pod, numSubsets, numSlices int, namedPort bool) {
endpointSlices := []discoveryv1.EndpointSlice{} endpointSlices := []discoveryv1.EndpointSlice{}
if err := wait.PollImmediateWithContext(ctx, 5*time.Second, 2*time.Minute, func(ctx context.Context) (bool, error) { if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 2*time.Minute, true, func(ctx context.Context) (bool, error) {
endpointSlicesFound, hasMatchingSlices := hasMatchingEndpointSlices(ctx, cs, ns, svc.Name, len(pods), numSlices) endpointSlicesFound, hasMatchingSlices := hasMatchingEndpointSlices(ctx, cs, ns, svc.Name, len(pods), numSlices)
if !hasMatchingSlices { if !hasMatchingSlices {
return false, nil return false, nil

View File

@ -171,7 +171,7 @@ var _ = common.SIGDescribe("ClusterDns [Feature:Example]", func() {
func waitForServiceResponding(ctx context.Context, c clientset.Interface, ns, name string) error { func waitForServiceResponding(ctx context.Context, c clientset.Interface, ns, name string) error {
ginkgo.By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name)) ginkgo.By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name))
return wait.PollImmediateWithContext(ctx, framework.Poll, RespondingTimeout, func(ctx context.Context) (done bool, err error) { return wait.PollUntilContextTimeout(ctx, framework.Poll, RespondingTimeout, true, func(ctx context.Context) (done bool, err error) {
proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get()) proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get())
if errProxy != nil { if errProxy != nil {
framework.Logf("Failed to get services proxy request: %v:", errProxy) framework.Logf("Failed to get services proxy request: %v:", errProxy)

View File

@ -1674,7 +1674,7 @@ func testRollingUpdateLBConnectivityDisruption(ctx context.Context, f *framework
ginkgo.By("Checking that daemon pods launch on every schedulable node of the cluster") ginkgo.By("Checking that daemon pods launch on every schedulable node of the cluster")
creationTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs) creationTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs)
err = wait.PollImmediateWithContext(ctx, framework.Poll, creationTimeout, e2edaemonset.CheckDaemonPodOnNodes(f, ds, nodeNames)) err = wait.PollUntilContextTimeout(ctx, framework.Poll, creationTimeout, true, e2edaemonset.CheckDaemonPodOnNodes(f, ds, nodeNames))
framework.ExpectNoError(err, "error waiting for daemon pods to start") framework.ExpectNoError(err, "error waiting for daemon pods to start")
err = e2edaemonset.CheckDaemonStatus(ctx, f, name) err = e2edaemonset.CheckDaemonStatus(ctx, f, name)
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -1822,7 +1822,7 @@ var _ = common.SIGDescribe("Services", func() {
execPodName := execPod.Name execPodName := execPod.Name
cmd := fmt.Sprintf("curl -q -s --connect-timeout 2 http://%s:%d/", svcName, port) cmd := fmt.Sprintf("curl -q -s --connect-timeout 2 http://%s:%d/", svcName, port)
var stdout string var stdout string
if pollErr := wait.PollImmediateWithContext(ctx, framework.Poll, e2eservice.KubeProxyLagTimeout, func(ctx context.Context) (bool, error) { if pollErr := wait.PollUntilContextTimeout(ctx, framework.Poll, e2eservice.KubeProxyLagTimeout, true, func(ctx context.Context) (bool, error) {
var err error var err error
stdout, err = e2eoutput.RunHostCmd(f.Namespace.Name, execPodName, cmd) stdout, err = e2eoutput.RunHostCmd(f.Namespace.Name, execPodName, cmd)
if err != nil { if err != nil {

View File

@ -233,7 +233,7 @@ func testEndpointReachability(ctx context.Context, endpoint string, port int32,
return fmt.Errorf("service reachability check is not supported for %v", protocol) return fmt.Errorf("service reachability check is not supported for %v", protocol)
} }
err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, true, func(ctx context.Context) (bool, error) {
stdout, err := e2eoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd) stdout, err := e2eoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
if err != nil { if err != nil {
framework.Logf("Service reachability failing with error: %v\nRetrying...", err) framework.Logf("Service reachability failing with error: %v\nRetrying...", err)

View File

@ -134,7 +134,7 @@ func getUniqueVolumeName(pod *v1.Pod, driverName string) string {
} }
func waitForVolumesNotInUse(ctx context.Context, client clientset.Interface, nodeName, volumeName string) error { func waitForVolumesNotInUse(ctx context.Context, client clientset.Interface, nodeName, volumeName string) error {
waitErr := wait.PollImmediateWithContext(ctx, 10*time.Second, 60*time.Second, func(ctx context.Context) (bool, error) { waitErr := wait.PollUntilContextTimeout(ctx, 10*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
node, err := client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) node, err := client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("error fetching node %s with %v", nodeName, err) return false, fmt.Errorf("error fetching node %s with %v", nodeName, err)
@ -154,7 +154,7 @@ func waitForVolumesNotInUse(ctx context.Context, client clientset.Interface, nod
} }
func waitForVolumesAttached(ctx context.Context, client clientset.Interface, nodeName, volumeName string) error { func waitForVolumesAttached(ctx context.Context, client clientset.Interface, nodeName, volumeName string) error {
waitErr := wait.PollImmediateWithContext(ctx, 2*time.Second, 2*time.Minute, func(ctx context.Context) (bool, error) { waitErr := wait.PollUntilContextTimeout(ctx, 2*time.Second, 2*time.Minute, true, func(ctx context.Context) (bool, error) {
node, err := client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) node, err := client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("error fetching node %s with %v", nodeName, err) return false, fmt.Errorf("error fetching node %s with %v", nodeName, err)
@ -174,7 +174,7 @@ func waitForVolumesAttached(ctx context.Context, client clientset.Interface, nod
} }
func waitForVolumesInUse(ctx context.Context, client clientset.Interface, nodeName, volumeName string) error { func waitForVolumesInUse(ctx context.Context, client clientset.Interface, nodeName, volumeName string) error {
waitErr := wait.PollImmediateWithContext(ctx, 10*time.Second, 60*time.Second, func(ctx context.Context) (bool, error) { waitErr := wait.PollUntilContextTimeout(ctx, 10*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
node, err := client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) node, err := client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("error fetching node %s with %v", nodeName, err) return false, fmt.Errorf("error fetching node %s with %v", nodeName, err)

View File

@ -139,7 +139,7 @@ func UpdatePVSize(ctx context.Context, pv *v1.PersistentVolume, size resource.Qu
pvToUpdate := pv.DeepCopy() pvToUpdate := pv.DeepCopy()
var lastError error var lastError error
waitErr := wait.PollImmediateWithContext(ctx, 5*time.Second, csiResizeWaitPeriod, func(ctx context.Context) (bool, error) { waitErr := wait.PollUntilContextTimeout(ctx, 5*time.Second, csiResizeWaitPeriod, true, func(ctx context.Context) (bool, error) {
var err error var err error
pvToUpdate, err = c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) pvToUpdate, err = c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
if err != nil { if err != nil {

View File

@ -615,7 +615,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local", func() {
ginkgo.By("Waiting for all pods to complete successfully") ginkgo.By("Waiting for all pods to complete successfully")
const completeTimeout = 5 * time.Minute const completeTimeout = 5 * time.Minute
waitErr := wait.PollImmediateWithContext(ctx, time.Second, completeTimeout, func(ctx context.Context) (done bool, err error) { waitErr := wait.PollUntilContextTimeout(ctx, time.Second, completeTimeout, true, func(ctx context.Context) (done bool, err error) {
podsList, err := config.client.CoreV1().Pods(config.ns).List(ctx, metav1.ListOptions{}) podsList, err := config.client.CoreV1().Pods(config.ns).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, err return false, err

View File

@ -260,7 +260,7 @@ func testZonalFailover(ctx context.Context, c clientset.Interface, ns string) {
} else { } else {
otherZone = cloudZones[0] otherZone = cloudZones[0]
} }
waitErr := wait.PollImmediateWithContext(ctx, framework.Poll, statefulSetReadyTimeout, func(ctx context.Context) (bool, error) { waitErr := wait.PollUntilContextTimeout(ctx, framework.Poll, statefulSetReadyTimeout, true, func(ctx context.Context) (bool, error) {
framework.Logf("Checking whether new pod is scheduled in zone %q", otherZone) framework.Logf("Checking whether new pod is scheduled in zone %q", otherZone)
pod := getPod(ctx, c, ns, regionalPDLabels) pod := getPod(ctx, c, ns, regionalPDLabels)
node, err := c.CoreV1().Nodes().Get(ctx, pod.Spec.NodeName, metav1.GetOptions{}) node, err := c.CoreV1().Nodes().Get(ctx, pod.Spec.NodeName, metav1.GetOptions{})

View File

@ -326,7 +326,7 @@ func ExpandPVCSize(ctx context.Context, origPVC *v1.PersistentVolumeClaim, size
// WaitForResizingCondition waits for the pvc condition to be PersistentVolumeClaimResizing // WaitForResizingCondition waits for the pvc condition to be PersistentVolumeClaimResizing
func WaitForResizingCondition(ctx context.Context, pvc *v1.PersistentVolumeClaim, c clientset.Interface, duration time.Duration) error { func WaitForResizingCondition(ctx context.Context, pvc *v1.PersistentVolumeClaim, c clientset.Interface, duration time.Duration) error {
waitErr := wait.PollImmediateWithContext(ctx, resizePollInterval, duration, func(ctx context.Context) (bool, error) { waitErr := wait.PollUntilContextTimeout(ctx, resizePollInterval, duration, true, func(ctx context.Context) (bool, error) {
var err error var err error
updatedPVC, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) updatedPVC, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
@ -351,7 +351,7 @@ func WaitForResizingCondition(ctx context.Context, pvc *v1.PersistentVolumeClaim
// WaitForControllerVolumeResize waits for the controller resize to be finished // WaitForControllerVolumeResize waits for the controller resize to be finished
func WaitForControllerVolumeResize(ctx context.Context, pvc *v1.PersistentVolumeClaim, c clientset.Interface, timeout time.Duration) error { func WaitForControllerVolumeResize(ctx context.Context, pvc *v1.PersistentVolumeClaim, c clientset.Interface, timeout time.Duration) error {
pvName := pvc.Spec.VolumeName pvName := pvc.Spec.VolumeName
waitErr := wait.PollImmediateWithContext(ctx, resizePollInterval, timeout, func(ctx context.Context) (bool, error) { waitErr := wait.PollUntilContextTimeout(ctx, resizePollInterval, timeout, true, func(ctx context.Context) (bool, error) {
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
@ -376,7 +376,7 @@ func WaitForControllerVolumeResize(ctx context.Context, pvc *v1.PersistentVolume
// WaitForPendingFSResizeCondition waits for pvc to have resize condition // WaitForPendingFSResizeCondition waits for pvc to have resize condition
func WaitForPendingFSResizeCondition(ctx context.Context, pvc *v1.PersistentVolumeClaim, c clientset.Interface) (*v1.PersistentVolumeClaim, error) { func WaitForPendingFSResizeCondition(ctx context.Context, pvc *v1.PersistentVolumeClaim, c clientset.Interface) (*v1.PersistentVolumeClaim, error) {
var updatedPVC *v1.PersistentVolumeClaim var updatedPVC *v1.PersistentVolumeClaim
waitErr := wait.PollImmediateWithContext(ctx, resizePollInterval, pvcConditionSyncPeriod, func(ctx context.Context) (bool, error) { waitErr := wait.PollUntilContextTimeout(ctx, resizePollInterval, pvcConditionSyncPeriod, true, func(ctx context.Context) (bool, error) {
var err error var err error
updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
@ -404,7 +404,7 @@ func WaitForPendingFSResizeCondition(ctx context.Context, pvc *v1.PersistentVolu
// WaitForFSResize waits for the filesystem in the pv to be resized // WaitForFSResize waits for the filesystem in the pv to be resized
func WaitForFSResize(ctx context.Context, pvc *v1.PersistentVolumeClaim, c clientset.Interface) (*v1.PersistentVolumeClaim, error) { func WaitForFSResize(ctx context.Context, pvc *v1.PersistentVolumeClaim, c clientset.Interface) (*v1.PersistentVolumeClaim, error) {
var updatedPVC *v1.PersistentVolumeClaim var updatedPVC *v1.PersistentVolumeClaim
waitErr := wait.PollImmediateWithContext(ctx, resizePollInterval, totalResizeWaitPeriod, func(ctx context.Context) (bool, error) { waitErr := wait.PollUntilContextTimeout(ctx, resizePollInterval, totalResizeWaitPeriod, true, func(ctx context.Context) (bool, error) {
var err error var err error
updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})

View File

@ -93,7 +93,7 @@ func (t *CassandraUpgradeTest) Setup(ctx context.Context, f *framework.Framework
cassandraKubectlCreate(ns, "tester.yaml") cassandraKubectlCreate(ns, "tester.yaml")
ginkgo.By("Getting the ingress IPs from the services") ginkgo.By("Getting the ingress IPs from the services")
err := wait.PollImmediateWithContext(ctx, statefulsetPoll, statefulsetTimeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, statefulsetPoll, statefulsetTimeout, true, func(ctx context.Context) (bool, error) {
if t.ip = t.getServiceIP(ctx, f, ns, "test-server"); t.ip == "" { if t.ip = t.getServiceIP(ctx, f, ns, "test-server"); t.ip == "" {
return false, nil return false, nil
} }

View File

@ -88,7 +88,7 @@ func (t *EtcdUpgradeTest) Setup(ctx context.Context, f *framework.Framework) {
kubectlCreate(ns, "tester.yaml") kubectlCreate(ns, "tester.yaml")
ginkgo.By("Getting the ingress IPs from the services") ginkgo.By("Getting the ingress IPs from the services")
err := wait.PollImmediateWithContext(ctx, statefulsetPoll, statefulsetTimeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, statefulsetPoll, statefulsetTimeout, true, func(ctx context.Context) (bool, error) {
if t.ip = t.getServiceIP(ctx, f, ns, "test-server"); t.ip == "" { if t.ip = t.getServiceIP(ctx, f, ns, "test-server"); t.ip == "" {
return false, nil return false, nil
} }

View File

@ -103,7 +103,7 @@ func (t *MySQLUpgradeTest) Setup(ctx context.Context, f *framework.Framework) {
mysqlKubectlCreate(ns, "tester.yaml") mysqlKubectlCreate(ns, "tester.yaml")
ginkgo.By("Getting the ingress IPs from the test-service") ginkgo.By("Getting the ingress IPs from the test-service")
err := wait.PollImmediateWithContext(ctx, statefulsetPoll, statefulsetTimeout, func(ctx context.Context) (bool, error) { err := wait.PollUntilContextTimeout(ctx, statefulsetPoll, statefulsetTimeout, true, func(ctx context.Context) (bool, error) {
if t.ip = t.getServiceIP(ctx, f, ns, "test-server"); t.ip == "" { if t.ip = t.getServiceIP(ctx, f, ns, "test-server"); t.ip == "" {
return false, nil return false, nil
} }

View File

@ -552,7 +552,7 @@ func createBatchPodSequential(ctx context.Context, f *framework.Framework, pods
create := metav1.Now() create := metav1.Now()
createTimes[pod.Name] = create createTimes[pod.Name] = create
p := e2epod.NewPodClient(f).Create(ctx, pod) p := e2epod.NewPodClient(f).Create(ctx, pod)
framework.ExpectNoError(wait.PollImmediateWithContext(ctx, 2*time.Second, framework.PodStartTimeout, podWatchedRunning(watchTimes, p.Name))) framework.ExpectNoError(wait.PollUntilContextTimeout(ctx, 2*time.Second, framework.PodStartTimeout, true, podWatchedRunning(watchTimes, p.Name)))
e2eLags = append(e2eLags, e2eLags = append(e2eLags,
e2emetrics.PodLatencyData{Name: pod.Name, Latency: watchTimes[pod.Name].Time.Sub(create.Time)}) e2emetrics.PodLatencyData{Name: pod.Name, Latency: watchTimes[pod.Name].Time.Sub(create.Time)})
} }

View File

@ -162,7 +162,7 @@ func pollConfigz(ctx context.Context, timeout time.Duration, pollInterval time.D
req.Header.Add("Accept", "application/json") req.Header.Add("Accept", "application/json")
var respBody []byte var respBody []byte
err = wait.PollImmediateWithContext(ctx, pollInterval, timeout, func(ctx context.Context) (bool, error) { err = wait.PollUntilContextTimeout(ctx, pollInterval, timeout, true, func(ctx context.Context) (bool, error) {
resp, err := client.Do(req) resp, err := client.Do(req)
if err != nil { if err != nil {
framework.Logf("Failed to get /configz, retrying. Error: %v", err) framework.Logf("Failed to get /configz, retrying. Error: %v", err)

View File

@ -435,7 +435,7 @@ func checkMirrorPodRunning(ctx context.Context, cl clientset.Interface, name, na
func checkMirrorPodRunningWithRestartCount(ctx context.Context, interval time.Duration, timeout time.Duration, cl clientset.Interface, name, namespace string, count int32) error { func checkMirrorPodRunningWithRestartCount(ctx context.Context, interval time.Duration, timeout time.Duration, cl clientset.Interface, name, namespace string, count int32) error {
var pod *v1.Pod var pod *v1.Pod
var err error var err error
err = wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) { err = wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
pod, err = cl.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) pod, err = cl.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("expected the mirror pod %q to appear: %w", name, err) return false, fmt.Errorf("expected the mirror pod %q to appear: %w", name, err)