diff --git a/test/e2e/apimachinery/garbage_collector.go b/test/e2e/apimachinery/garbage_collector.go index 0d88f3e0d1c..a657909d454 100644 --- a/test/e2e/apimachinery/garbage_collector.go +++ b/test/e2e/apimachinery/garbage_collector.go @@ -1105,7 +1105,7 @@ var _ = SIGDescribe("Garbage collector", func() { if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) { _, err := resourceClient.Get(ctx, dependentName, metav1.GetOptions{}) return false, err - }); err != nil && err != wait.ErrWaitTimeout { + }); err != nil && !wait.Interrupted(err) { framework.Failf("failed to ensure the dependent is not deleted: %v", err) } }) diff --git a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go index 6d9d7866a09..d0b3fbfe1c1 100644 --- a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go +++ b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go @@ -626,7 +626,7 @@ func ensureDesiredReplicasInRange(ctx context.Context, deploymentName, namespace } }) // The call above always returns an error, but if it is timeout, it's OK (condition satisfied all the time). - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { framework.Logf("Number of replicas was stable over %v", timeout) return } diff --git a/test/e2e/storage/csi_mock/csi_storage_capacity.go b/test/e2e/storage/csi_mock/csi_storage_capacity.go index 0d104d5da98..48fda168fcd 100644 --- a/test/e2e/storage/csi_mock/csi_storage_capacity.go +++ b/test/e2e/storage/csi_mock/csi_storage_capacity.go @@ -367,7 +367,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() { if test.expectFailure { switch { case errors.Is(err, context.DeadlineExceeded), - errors.Is(err, wait.ErrWaitTimeout), + errors.Is(err, wait.ErrorInterrupted(errors.New("timed out waiting for the condition"))), errors.Is(err, errNotEnoughSpace): // Okay, we expected that. case err == nil: diff --git a/test/e2e/storage/local_volume_resize.go b/test/e2e/storage/local_volume_resize.go index 9bd9d09b3ef..5999dee55a2 100644 --- a/test/e2e/storage/local_volume_resize.go +++ b/test/e2e/storage/local_volume_resize.go @@ -154,7 +154,7 @@ func UpdatePVSize(ctx context.Context, pv *v1.PersistentVolume, size resource.Qu } return true, nil }) - if waitErr == wait.ErrWaitTimeout { + if wait.Interrupted(waitErr) { return nil, fmt.Errorf("timed out attempting to update PV size. last update error: %v", lastError) } if waitErr != nil { diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index f2622cca2d7..13dfdde1af3 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -909,7 +909,7 @@ func createLocalPVCsPVs(ctx context.Context, config *localTestConfig, volumes [] } return false, nil }) - if waitErr == wait.ErrWaitTimeout { + if wait.Interrupted(waitErr) { framework.Logf("PVCs were not bound within %v (that's good)", bindTimeout) waitErr = nil } diff --git a/test/e2e/storage/testsuites/volume_expand.go b/test/e2e/storage/testsuites/volume_expand.go index a6b54e6d936..70c40d83e03 100644 --- a/test/e2e/storage/testsuites/volume_expand.go +++ b/test/e2e/storage/testsuites/volume_expand.go @@ -315,7 +315,7 @@ func ExpandPVCSize(ctx context.Context, origPVC *v1.PersistentVolumeClaim, size } return true, nil }) - if waitErr == wait.ErrWaitTimeout { + if wait.Interrupted(waitErr) { return nil, fmt.Errorf("timed out attempting to update PVC size. last update error: %v", lastUpdateError) } if waitErr != nil { diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index 5b5de8592e5..4d4ad6a61a0 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -680,7 +680,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { return false, nil }) - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { framework.Logf("The test missed event about failed provisioning, but checked that no volume was provisioned for %v", framework.ClaimProvisionTimeout) err = nil } diff --git a/test/e2e/storage/vsphere/vsphere_utils.go b/test/e2e/storage/vsphere/vsphere_utils.go index 1e70e712d8b..22f2edc2047 100644 --- a/test/e2e/storage/vsphere/vsphere_utils.go +++ b/test/e2e/storage/vsphere/vsphere_utils.go @@ -91,7 +91,7 @@ func waitForVSphereDisksToDetach(ctx context.Context, nodeVolumes map[string][]s return true, nil }) if waitErr != nil { - if waitErr == wait.ErrWaitTimeout { + if wait.Interrupted(waitErr) { return fmt.Errorf("volumes have not detached after %v: %v", detachTimeout, waitErr) } return fmt.Errorf("error waiting for volumes to detach: %v", waitErr) @@ -132,7 +132,7 @@ func waitForVSphereDiskStatus(ctx context.Context, volumePath string, nodeName s return false, nil }) if waitErr != nil { - if waitErr == wait.ErrWaitTimeout { + if wait.Interrupted(waitErr) { return fmt.Errorf("volume %q is not %s %q after %v: %v", volumePath, attachedStateMsg[expectedState], nodeName, timeout, waitErr) } return fmt.Errorf("error waiting for volume %q to be %s %q: %v", volumePath, attachedStateMsg[expectedState], nodeName, waitErr) diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go index 1b423af5d44..22cc7e2fae2 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go @@ -172,7 +172,7 @@ func waitForPodToFailover(ctx context.Context, client clientset.Interface, deplo }) if waitErr != nil { - if waitErr == wait.ErrWaitTimeout { + if wait.Interrupted(waitErr) { return "", fmt.Errorf("pod has not failed over after %v: %v", timeout, waitErr) } return "", fmt.Errorf("pod did not fail over from %q: %v", oldNode, waitErr)