mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 09:22:44 +00:00
using wait.PollUntilContextTimeout instead of deprecated wait.Poll for pkg/scheduler
using wait.PollUntilContextTimeout instead of deprecated wait.Poll for test/integration/scheduler using wait.PollUntilContextTimeout instead of deprecated wait.Poll for test/e2e/scheduling using wait.ConditionWithContextFunc for PodScheduled/PodIsGettingEvicted/PodScheduledIn/PodUnschedulable/PodSchedulingError
This commit is contained in:
parent
4f874a224a
commit
ef7d404702
@ -523,7 +523,7 @@ func (b *volumeBinder) BindPodVolumes(ctx context.Context, assumedPod *v1.Pod, p
|
||||
return err
|
||||
}
|
||||
|
||||
err = wait.Poll(time.Second, b.bindTimeout, func() (bool, error) {
|
||||
err = wait.PollUntilContextTimeout(ctx, time.Second, b.bindTimeout, false, func(ctx context.Context) (bool, error) {
|
||||
b, err := b.checkBindings(assumedPod, bindings, claimsToProvision)
|
||||
return b, err
|
||||
})
|
||||
|
@ -349,7 +349,7 @@ func (env *testEnv) updateVolumes(ctx context.Context, pvs []*v1.PersistentVolum
|
||||
}
|
||||
pvs[i] = newPv
|
||||
}
|
||||
return wait.Poll(100*time.Millisecond, 3*time.Second, func() (bool, error) {
|
||||
return wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 3*time.Second, false, func(ctx context.Context) (bool, error) {
|
||||
for _, pv := range pvs {
|
||||
obj, err := env.internalPVCache.GetAPIObj(pv.Name)
|
||||
if obj == nil || err != nil {
|
||||
@ -375,7 +375,7 @@ func (env *testEnv) updateClaims(ctx context.Context, pvcs []*v1.PersistentVolum
|
||||
}
|
||||
pvcs[i] = newPvc
|
||||
}
|
||||
return wait.Poll(100*time.Millisecond, 3*time.Second, func() (bool, error) {
|
||||
return wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 3*time.Second, false, func(ctx context.Context) (bool, error) {
|
||||
for _, pvc := range pvcs {
|
||||
obj, err := env.internalPVCCache.GetAPIObj(getPVCName(pvc))
|
||||
if obj == nil || err != nil {
|
||||
|
@ -177,7 +177,7 @@ var _ = SIGDescribe("LimitRange", func() {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Verifying LimitRange updating is effective")
|
||||
err = wait.Poll(time.Second*2, time.Second*20, func() (bool, error) {
|
||||
err = wait.PollUntilContextTimeout(ctx, time.Second*2, time.Second*20, false, func(ctx context.Context) (bool, error) {
|
||||
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(ctx, limitRange.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
return reflect.DeepEqual(limitRange.Spec.Limits[0].Min, newMin), nil
|
||||
@ -199,7 +199,7 @@ var _ = SIGDescribe("LimitRange", func() {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Verifying the LimitRange was deleted")
|
||||
err = wait.Poll(time.Second*5, e2eservice.RespondingTimeout, func() (bool, error) {
|
||||
err = wait.PollUntilContextTimeout(ctx, time.Second*5, e2eservice.RespondingTimeout, false, func(ctx context.Context) (bool, error) {
|
||||
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(ctx, metav1.ListOptions{})
|
||||
|
||||
if err != nil {
|
||||
|
@ -728,7 +728,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
// - if it's less than expected replicas, it denotes its pods are under-preempted
|
||||
// "*2" means pods of ReplicaSet{1,2} are expected to be only preempted once.
|
||||
expectedRSPods := []int32{1 * 2, 1 * 2, 1}
|
||||
err := wait.Poll(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
|
||||
err := wait.PollUntilContextTimeout(ctx, framework.Poll, framework.PollShortTimeout, false, func(ctx context.Context) (bool, error) {
|
||||
for i := 0; i < len(podNamesSeen); i++ {
|
||||
got := atomic.LoadInt32(&podNamesSeen[i])
|
||||
if got < expectedRSPods[i] {
|
||||
@ -905,7 +905,7 @@ func createPod(ctx context.Context, f *framework.Framework, conf pausePodConfig)
|
||||
// waitForPreemptingWithTimeout verifies if 'pod' is preempting within 'timeout', specifically it checks
|
||||
// if the 'spec.NodeName' field of preemptor 'pod' has been set.
|
||||
func waitForPreemptingWithTimeout(ctx context.Context, f *framework.Framework, pod *v1.Pod, timeout time.Duration) {
|
||||
err := wait.Poll(2*time.Second, timeout, func() (bool, error) {
|
||||
err := wait.PollUntilContextTimeout(ctx, 2*time.Second, timeout, false, func(ctx context.Context) (bool, error) {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -154,7 +154,7 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) {
|
||||
t.Errorf("Failed to taint node in test %s <%s>, err: %v", name, nodes[nodeIndex].Name, err)
|
||||
}
|
||||
|
||||
err = wait.PollImmediate(time.Second, time.Second*20, testutils.PodIsGettingEvicted(cs, testPod.Namespace, testPod.Name))
|
||||
err = wait.PollUntilContextTimeout(testCtx.Ctx, time.Second, time.Second*20, true, testutils.PodIsGettingEvicted(cs, testPod.Namespace, testPod.Name))
|
||||
if err != nil {
|
||||
t.Fatalf("Error %q in test %q when waiting for terminating pod: %q", err, name, klog.KObj(testPod))
|
||||
}
|
||||
|
@ -148,7 +148,7 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to delete node: %v, err: %v", pod.Spec.NodeName, err)
|
||||
}
|
||||
err = wait.PollImmediate(time.Second, time.Second*15, testutils.PodIsGettingEvicted(cs, pod.Namespace, pod.Name))
|
||||
err = wait.PollUntilContextTimeout(testCtx.Ctx, time.Second, time.Second*15, true, testutils.PodIsGettingEvicted(cs, pod.Namespace, pod.Name))
|
||||
if err != nil {
|
||||
t.Fatalf("Error '%v' while waiting for the pod '%v' to be terminating", err, klog.KObj(pod))
|
||||
}
|
||||
@ -261,7 +261,7 @@ func TestTerminatingOnOutOfServiceNode(t *testing.T) {
|
||||
t.Fatalf("Error: '%v' while deleting pod: '%v'", err, klog.KObj(pod))
|
||||
}
|
||||
// wait until the pod is terminating
|
||||
err = wait.PollImmediate(time.Second, time.Second*15, testutils.PodIsGettingEvicted(cs, pod.Namespace, pod.Name))
|
||||
err = wait.PollUntilContextTimeout(testCtx.Ctx, time.Second, time.Second*15, true, testutils.PodIsGettingEvicted(cs, pod.Namespace, pod.Name))
|
||||
if err != nil {
|
||||
t.Fatalf("Error '%v' while waiting for the pod '%v' to be terminating", err, klog.KObj(pod))
|
||||
}
|
||||
|
@ -410,7 +410,8 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface)
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
err = wait.Poll(time.Second, wait.ForeverTestTimeout, testutils.PodScheduled(cs, myPod.Namespace, myPod.Name))
|
||||
err = wait.PollUntilContextTimeout(context.TODO(), time.Second, wait.ForeverTestTimeout, false,
|
||||
testutils.PodScheduled(cs, myPod.Namespace, myPod.Name))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to schedule pod: %v", err)
|
||||
}
|
||||
|
@ -832,7 +832,8 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Error while creating pod: %v", err)
|
||||
}
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
|
||||
err = wait.PollUntilContextTimeout(ctx, pollInterval, wait.ForeverTestTimeout, false,
|
||||
testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating pod: %v", err)
|
||||
}
|
||||
@ -849,9 +850,11 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
}
|
||||
|
||||
if test.fits {
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, testPod.Namespace, testPod.Name))
|
||||
err = wait.PollUntilContextTimeout(ctx, pollInterval, wait.ForeverTestTimeout, false,
|
||||
testutils.PodScheduled(cs, testPod.Namespace, testPod.Name))
|
||||
} else {
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podUnschedulable(cs, testPod.Namespace, testPod.Name))
|
||||
err = wait.PollUntilContextTimeout(ctx, pollInterval, wait.ForeverTestTimeout, false,
|
||||
podUnschedulable(cs, testPod.Namespace, testPod.Name))
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Error while trying to fit a pod: %v", err)
|
||||
@ -1016,7 +1019,8 @@ func TestInterPodAffinityWithNamespaceSelector(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Error while creating pod: %v", err)
|
||||
}
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
|
||||
err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
|
||||
testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating pod: %v", err)
|
||||
}
|
||||
@ -1033,9 +1037,11 @@ func TestInterPodAffinityWithNamespaceSelector(t *testing.T) {
|
||||
}
|
||||
|
||||
if test.fits {
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, testPod.Namespace, testPod.Name))
|
||||
err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
|
||||
testutils.PodScheduled(cs, testPod.Namespace, testPod.Name))
|
||||
} else {
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podUnschedulable(cs, testPod.Namespace, testPod.Name))
|
||||
err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
|
||||
podUnschedulable(cs, testPod.Namespace, testPod.Name))
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Error while trying to fit a pod: %v", err)
|
||||
@ -1517,7 +1523,8 @@ func TestPodTopologySpreadFilter(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Error while creating pod during test: %v", err)
|
||||
}
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
|
||||
err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
|
||||
testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Error while waiting for pod during test: %v", err)
|
||||
}
|
||||
@ -1528,9 +1535,11 @@ func TestPodTopologySpreadFilter(t *testing.T) {
|
||||
}
|
||||
|
||||
if tt.fits {
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podScheduledIn(cs, testPod.Namespace, testPod.Name, tt.candidateNodes))
|
||||
err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
|
||||
podScheduledIn(cs, testPod.Namespace, testPod.Name, tt.candidateNodes))
|
||||
} else {
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podUnschedulable(cs, testPod.Namespace, testPod.Name))
|
||||
err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
|
||||
podUnschedulable(cs, testPod.Namespace, testPod.Name))
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: %v", err)
|
||||
|
@ -672,7 +672,8 @@ func TestPreFilterPlugin(t *testing.T) {
|
||||
t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
|
||||
}
|
||||
} else if test.fail {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
|
||||
testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("Expected a scheduling error, but got: %v", err)
|
||||
}
|
||||
} else {
|
||||
@ -844,7 +845,8 @@ func TestPostFilterPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
if tt.rejectFilter {
|
||||
if err = wait.Poll(10*time.Millisecond, 10*time.Second, testutils.PodUnschedulable(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 10*time.Second, false,
|
||||
testutils.PodUnschedulable(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("Didn't expect the pod to be scheduled.")
|
||||
}
|
||||
|
||||
@ -912,7 +914,8 @@ func TestScorePlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
if test.fail {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
|
||||
testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("Expected a scheduling error, but got: %v", err)
|
||||
}
|
||||
} else {
|
||||
@ -1003,7 +1006,7 @@ func TestReservePluginReserve(t *testing.T) {
|
||||
}
|
||||
|
||||
if test.fail {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second,
|
||||
if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
|
||||
testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
|
||||
}
|
||||
@ -1131,7 +1134,8 @@ func TestPrebindPlugin(t *testing.T) {
|
||||
if err = testutils.WaitForPodToScheduleWithTimeout(testCtx.ClientSet, pod, 10*time.Second); err != nil {
|
||||
t.Errorf("Expected the pod to be schedulable on retry, but got an error: %v", err)
|
||||
}
|
||||
} else if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
} else if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
|
||||
testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("Expected a scheduling error, but didn't get it. error: %v", err)
|
||||
}
|
||||
} else if test.reject {
|
||||
@ -1148,7 +1152,7 @@ func TestPrebindPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
if test.unschedulablePod != nil {
|
||||
if err := wait.Poll(10*time.Millisecond, 15*time.Second, func() (bool, error) {
|
||||
if err := wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 15*time.Second, false, func(ctx context.Context) (bool, error) {
|
||||
// 2 means the unschedulable pod is expected to be retried at least twice.
|
||||
// (one initial attempt plus the one moved by the preBind pod)
|
||||
return filterPlugin.deepCopy().numFilterCalled >= 2*nodesNum, nil
|
||||
@ -1273,7 +1277,8 @@ func TestUnReserveReservePlugins(t *testing.T) {
|
||||
}
|
||||
|
||||
if test.fail {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
|
||||
testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("Expected a reasons other than Unschedulable, but got: %v", err)
|
||||
}
|
||||
|
||||
@ -1509,7 +1514,8 @@ func TestUnReserveBindPlugins(t *testing.T) {
|
||||
}
|
||||
|
||||
if test.fail {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
|
||||
testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("Expected a reasons other than Unschedulable, but got: %v", err)
|
||||
}
|
||||
|
||||
@ -1681,7 +1687,7 @@ func TestBindPlugin(t *testing.T) {
|
||||
t.Errorf("Expected %s not to be called, was called %d times.", p2.Name(), p2.numBindCalled)
|
||||
}
|
||||
}
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, func() (done bool, err error) {
|
||||
if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (done bool, err error) {
|
||||
p := postBindPlugin.deepCopy()
|
||||
return p.numPostBindCalled == 1, nil
|
||||
}); err != nil {
|
||||
@ -1692,7 +1698,8 @@ func TestBindPlugin(t *testing.T) {
|
||||
}
|
||||
} else {
|
||||
// bind plugin fails to bind the pod
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
|
||||
testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("Expected a scheduling error, but didn't get it. error: %v", err)
|
||||
}
|
||||
p := postBindPlugin.deepCopy()
|
||||
@ -1762,7 +1769,8 @@ func TestPostBindPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
if test.preBindFail {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
|
||||
testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("Expected a scheduling error, but didn't get it. error: %v", err)
|
||||
}
|
||||
if postBindPlugin.numPostBindCalled > 0 {
|
||||
@ -1858,7 +1866,8 @@ func TestPermitPlugin(t *testing.T) {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
if test.fail {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
|
||||
testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("Expected a scheduling error, but didn't get it. error: %v", err)
|
||||
}
|
||||
} else {
|
||||
@ -1907,7 +1916,7 @@ func TestMultiplePermitPlugins(t *testing.T) {
|
||||
|
||||
var waitingPod framework.WaitingPod
|
||||
// Wait until the test pod is actually waiting.
|
||||
wait.Poll(10*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
|
||||
waitingPod = perPlugin1.fh.GetWaitingPod(pod.UID)
|
||||
return waitingPod != nil, nil
|
||||
})
|
||||
@ -1959,14 +1968,14 @@ func TestPermitPluginsCancelled(t *testing.T) {
|
||||
|
||||
var waitingPod framework.WaitingPod
|
||||
// Wait until the test pod is actually waiting.
|
||||
wait.Poll(10*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
|
||||
waitingPod = perPlugin1.fh.GetWaitingPod(pod.UID)
|
||||
return waitingPod != nil, nil
|
||||
})
|
||||
|
||||
perPlugin1.rejectAllPods()
|
||||
// Wait some time for the permit plugins to be cancelled
|
||||
err = wait.Poll(10*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
|
||||
p1 := perPlugin1.deepCopy()
|
||||
p2 := perPlugin2.deepCopy()
|
||||
return p1.cancelled && p2.cancelled, nil
|
||||
@ -2100,7 +2109,8 @@ func TestFilterPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
if test.fail {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
|
||||
testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("Expected a scheduling error, but got: %v", err)
|
||||
}
|
||||
if filterPlugin.numFilterCalled < 1 {
|
||||
@ -2156,7 +2166,8 @@ func TestPreScorePlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
if test.fail {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
|
||||
testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("Expected a scheduling error, but got: %v", err)
|
||||
}
|
||||
} else {
|
||||
@ -2361,7 +2372,7 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
|
||||
t.Fatalf("Error while creating the waiting pod: %v", err)
|
||||
}
|
||||
// Wait until the waiting-pod is actually waiting.
|
||||
if err := wait.Poll(10*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
if err := wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
|
||||
w := false
|
||||
permitPlugin.fh.IterateOverWaitingPods(func(wp framework.WaitingPod) { w = true })
|
||||
return w, nil
|
||||
@ -2386,7 +2397,7 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
if w := tt.waitingPod; w != nil {
|
||||
if err := wait.Poll(200*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
if err := wait.PollUntilContextTimeout(testCtx.Ctx, 200*time.Millisecond, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
|
||||
w := false
|
||||
permitPlugin.fh.IterateOverWaitingPods(func(wp framework.WaitingPod) { w = true })
|
||||
return !w, nil
|
||||
|
@ -78,8 +78,8 @@ const filterPluginName = "filter-plugin"
|
||||
var lowPriority, mediumPriority, highPriority = int32(100), int32(200), int32(300)
|
||||
|
||||
func waitForNominatedNodeNameWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
|
||||
if err := wait.Poll(100*time.Millisecond, timeout, func() (bool, error) {
|
||||
pod, err := cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
if err := wait.PollUntilContextTimeout(context.TODO(), 100*time.Millisecond, timeout, false, func(ctx context.Context) (bool, error) {
|
||||
pod, err := cs.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -504,7 +504,8 @@ func TestPreemption(t *testing.T) {
|
||||
// Wait for preemption of pods and make sure the other ones are not preempted.
|
||||
for i, p := range pods {
|
||||
if _, found := test.preemptedPodIndexes[i]; found {
|
||||
if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
|
||||
if err = wait.PollUntilContextTimeout(testCtx.Ctx, time.Second, wait.ForeverTestTimeout, false,
|
||||
podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
|
||||
t.Errorf("Pod %v/%v is not getting evicted.", p.Namespace, p.Name)
|
||||
}
|
||||
pod, err := cs.CoreV1().Pods(p.Namespace).Get(testCtx.Ctx, p.Name, metav1.GetOptions{})
|
||||
@ -883,7 +884,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
}
|
||||
// Make sure that all pending pods are being marked unschedulable.
|
||||
for _, p := range pendingPods {
|
||||
if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout,
|
||||
if err := wait.PollUntilContextTimeout(testCtx.Ctx, 100*time.Millisecond, wait.ForeverTestTimeout, false,
|
||||
podUnschedulable(cs, p.Namespace, p.Name)); err != nil {
|
||||
t.Errorf("Pod %v/%v didn't get marked unschedulable: %v", p.Namespace, p.Name, err)
|
||||
}
|
||||
@ -1214,8 +1215,8 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify if .status.nominatedNodeName is cleared.
|
||||
if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
pod, err := cs.CoreV1().Pods(ns).Get(context.TODO(), "medium", metav1.GetOptions{})
|
||||
if err := wait.PollUntilContextTimeout(testCtx.Ctx, 100*time.Millisecond, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
|
||||
pod, err := cs.CoreV1().Pods(ns).Get(ctx, "medium", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Error getting the medium pod: %v", err)
|
||||
}
|
||||
@ -1485,7 +1486,8 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
// Wait for preemption of pods and make sure the other ones are not preempted.
|
||||
for i, p := range pods {
|
||||
if _, found := test.preemptedPodIndexes[i]; found {
|
||||
if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
|
||||
if err = wait.PollUntilContextTimeout(testCtx.Ctx, time.Second, wait.ForeverTestTimeout, false,
|
||||
podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
|
||||
t.Errorf("Test [%v]: Pod %v/%v is not getting evicted.", test.name, p.Namespace, p.Name)
|
||||
}
|
||||
} else {
|
||||
@ -1622,8 +1624,8 @@ func TestPreferNominatedNode(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating high priority pod: %v", err)
|
||||
}
|
||||
err = wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
preemptor, err = cs.CoreV1().Pods(test.pod.Namespace).Get(context.TODO(), test.pod.Name, metav1.GetOptions{})
|
||||
err = wait.PollUntilContextTimeout(testCtx.Ctx, 100*time.Millisecond, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
|
||||
preemptor, err = cs.CoreV1().Pods(test.pod.Namespace).Get(ctx, test.pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Error getting the preemptor pod info: %v", err)
|
||||
}
|
||||
@ -1975,7 +1977,8 @@ func TestReadWriteOncePodPreemption(t *testing.T) {
|
||||
// Wait for preemption of pods and make sure the other ones are not preempted.
|
||||
for i, p := range pods {
|
||||
if _, found := test.preemptedPodIndexes[i]; found {
|
||||
if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
|
||||
if err = wait.PollUntilContextTimeout(testCtx.Ctx, time.Second, wait.ForeverTestTimeout, false,
|
||||
podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
|
||||
t.Errorf("Pod %v/%v is not getting evicted.", p.Namespace, p.Name)
|
||||
}
|
||||
} else {
|
||||
|
@ -138,7 +138,7 @@ func TestSchedulingGates(t *testing.T) {
|
||||
}
|
||||
|
||||
// Wait for the pods to be present in the scheduling queue.
|
||||
if err := wait.Poll(time.Millisecond*200, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
if err := wait.PollUntilContextTimeout(ctx, time.Millisecond*200, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
|
||||
pendingPods, _ := testCtx.Scheduler.SchedulingQueue.PendingPods()
|
||||
return len(pendingPods) == len(tt.pods), nil
|
||||
}); err != nil {
|
||||
@ -215,7 +215,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
|
||||
}
|
||||
|
||||
// Wait for the three pods to be present in the scheduling queue.
|
||||
if err := wait.Poll(time.Millisecond*200, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
if err := wait.PollUntilContextTimeout(ctx, time.Millisecond*200, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
|
||||
pendingPods, _ := testCtx.Scheduler.SchedulingQueue.PendingPods()
|
||||
return len(pendingPods) == 3, nil
|
||||
}); err != nil {
|
||||
@ -396,7 +396,7 @@ func TestCustomResourceEnqueue(t *testing.T) {
|
||||
}
|
||||
|
||||
// Wait for the testing Pod to be present in the scheduling queue.
|
||||
if err := wait.Poll(time.Millisecond*200, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
if err := wait.PollUntilContextTimeout(ctx, time.Millisecond*200, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
|
||||
pendingPods, _ := testCtx.Scheduler.SchedulingQueue.PendingPods()
|
||||
return len(pendingPods) == 1, nil
|
||||
}); err != nil {
|
||||
@ -489,14 +489,14 @@ func TestRequeueByBindFailure(t *testing.T) {
|
||||
}
|
||||
|
||||
// first binding try should fail.
|
||||
err := wait.Poll(200*time.Millisecond, wait.ForeverTestTimeout, testutils.PodSchedulingError(cs, ns, "pod-1"))
|
||||
err := wait.PollUntilContextTimeout(ctx, 200*time.Millisecond, wait.ForeverTestTimeout, false, testutils.PodSchedulingError(cs, ns, "pod-1"))
|
||||
if err != nil {
|
||||
t.Fatalf("Expect pod-1 to be rejected by the bind plugin")
|
||||
}
|
||||
|
||||
// The pod should be enqueued to activeQ/backoffQ without any event.
|
||||
// The pod should be scheduled in the second binding try.
|
||||
err = wait.Poll(200*time.Millisecond, wait.ForeverTestTimeout, testutils.PodScheduled(cs, ns, "pod-1"))
|
||||
err = wait.PollUntilContextTimeout(ctx, 200*time.Millisecond, wait.ForeverTestTimeout, false, testutils.PodScheduled(cs, ns, "pod-1"))
|
||||
if err != nil {
|
||||
t.Fatalf("Expect pod-1 to be scheduled by the bind plugin in the second binding try")
|
||||
}
|
||||
@ -610,20 +610,20 @@ func TestRequeueByPermitRejection(t *testing.T) {
|
||||
})
|
||||
|
||||
// Wait for pod-2 to be scheduled.
|
||||
err := wait.Poll(200*time.Millisecond, wait.ForeverTestTimeout, func() (done bool, err error) {
|
||||
err := wait.PollUntilContextTimeout(ctx, 200*time.Millisecond, wait.ForeverTestTimeout, false, func(ctx context.Context) (done bool, err error) {
|
||||
fakePermit.frameworkHandler.IterateOverWaitingPods(func(wp framework.WaitingPod) {
|
||||
if wp.GetPod().Name == "pod-2" {
|
||||
wp.Allow(fakePermitPluginName)
|
||||
}
|
||||
})
|
||||
|
||||
return testutils.PodScheduled(cs, ns, "pod-2")()
|
||||
return testutils.PodScheduled(cs, ns, "pod-2")(ctx)
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Expect pod-2 to be scheduled")
|
||||
}
|
||||
|
||||
err = wait.Poll(200*time.Millisecond, wait.ForeverTestTimeout, func() (done bool, err error) {
|
||||
err = wait.PollUntilContextTimeout(ctx, 200*time.Millisecond, wait.ForeverTestTimeout, false, func(ctx context.Context) (done bool, err error) {
|
||||
pod1Found := false
|
||||
fakePermit.frameworkHandler.IterateOverWaitingPods(func(wp framework.WaitingPod) {
|
||||
if wp.GetPod().Name == "pod-1" {
|
||||
|
@ -218,7 +218,8 @@ func TestReScheduling(t *testing.T) {
|
||||
|
||||
// The first time for scheduling, pod is error or unschedulable, controlled by wantFirstSchedulingError
|
||||
if test.wantFirstSchedulingError {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
|
||||
testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("Expected a scheduling error, but got: %v", err)
|
||||
}
|
||||
} else {
|
||||
@ -238,7 +239,8 @@ func TestReScheduling(t *testing.T) {
|
||||
t.Errorf("Didn't expect the pod to be unschedulable. error: %v", err)
|
||||
}
|
||||
} else if test.wantError {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
|
||||
testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("Expected a scheduling error, but got: %v", err)
|
||||
}
|
||||
} else {
|
||||
|
@ -144,7 +144,7 @@ func TestUnschedulableNodes(t *testing.T) {
|
||||
if err == nil {
|
||||
t.Errorf("Test %d: Pod scheduled successfully on unschedulable nodes", i)
|
||||
}
|
||||
if err != wait.ErrWaitTimeout {
|
||||
if !wait.Interrupted(err) {
|
||||
t.Errorf("Test %d: failed while trying to confirm the pod does not get scheduled on the node: %v", i, err)
|
||||
} else {
|
||||
t.Logf("Test %d: Pod did not get scheduled on an unschedulable node", i)
|
||||
@ -321,7 +321,7 @@ func TestMultipleSchedulingProfiles(t *testing.T) {
|
||||
}
|
||||
|
||||
gotProfiles := make(map[string]string)
|
||||
if err := wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
if err := wait.PollUntilContextTimeout(testCtx.Ctx, 100*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
|
||||
var ev watch.Event
|
||||
select {
|
||||
case ev = <-evs.ResultChan():
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package scoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -628,7 +629,8 @@ func TestPodTopologySpreadScoring(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Test Failed: error while creating pod during test: %v", err)
|
||||
}
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
|
||||
err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
|
||||
testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error while waiting for pod during test: %v", err)
|
||||
}
|
||||
@ -640,9 +642,11 @@ func TestPodTopologySpreadScoring(t *testing.T) {
|
||||
}
|
||||
|
||||
if tt.fits {
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podScheduledIn(cs, testPod.Namespace, testPod.Name, tt.want))
|
||||
err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
|
||||
podScheduledIn(cs, testPod.Namespace, testPod.Name, tt.want))
|
||||
} else {
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podUnschedulable(cs, testPod.Namespace, testPod.Name))
|
||||
err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
|
||||
podUnschedulable(cs, testPod.Namespace, testPod.Name))
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: %v", err)
|
||||
@ -706,8 +710,8 @@ func TestDefaultPodTopologySpreadScoring(t *testing.T) {
|
||||
}
|
||||
var pods []v1.Pod
|
||||
// Wait for all Pods scheduled.
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
podList, err := cs.CoreV1().Pods(ns).List(testCtx.Ctx, metav1.ListOptions{})
|
||||
err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
|
||||
podList, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot list pods to verify scheduling: %v", err)
|
||||
}
|
||||
|
@ -579,7 +579,7 @@ func InitTestSchedulerWithOptions(
|
||||
// WaitForPodToScheduleWithTimeout waits for a pod to get scheduled and returns
|
||||
// an error if it does not scheduled within the given timeout.
|
||||
func WaitForPodToScheduleWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
|
||||
return wait.Poll(100*time.Millisecond, timeout, PodScheduled(cs, pod.Namespace, pod.Name))
|
||||
return wait.PollUntilContextTimeout(context.TODO(), 100*time.Millisecond, timeout, false, PodScheduled(cs, pod.Namespace, pod.Name))
|
||||
}
|
||||
|
||||
// WaitForPodToSchedule waits for a pod to get scheduled and returns an error if
|
||||
@ -589,9 +589,9 @@ func WaitForPodToSchedule(cs clientset.Interface, pod *v1.Pod) error {
|
||||
}
|
||||
|
||||
// PodScheduled checks if the pod has been scheduled
|
||||
func PodScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
func PodScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionWithContextFunc {
|
||||
return func(ctx context.Context) (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// This could be a connection error so we want to retry.
|
||||
return false, nil
|
||||
@ -899,9 +899,9 @@ func RunPodWithContainers(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error)
|
||||
}
|
||||
|
||||
// PodIsGettingEvicted returns true if the pod's deletion timestamp is set.
|
||||
func PodIsGettingEvicted(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
func PodIsGettingEvicted(c clientset.Interface, podNamespace, podName string) wait.ConditionWithContextFunc {
|
||||
return func(ctx context.Context) (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -913,9 +913,9 @@ func PodIsGettingEvicted(c clientset.Interface, podNamespace, podName string) wa
|
||||
}
|
||||
|
||||
// PodScheduledIn returns true if a given pod is placed onto one of the expected nodes.
|
||||
func PodScheduledIn(c clientset.Interface, podNamespace, podName string, nodeNames []string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
func PodScheduledIn(c clientset.Interface, podNamespace, podName string, nodeNames []string) wait.ConditionWithContextFunc {
|
||||
return func(ctx context.Context) (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// This could be a connection error so we want to retry.
|
||||
return false, nil
|
||||
@ -934,9 +934,9 @@ func PodScheduledIn(c clientset.Interface, podNamespace, podName string, nodeNam
|
||||
|
||||
// PodUnschedulable returns a condition function that returns true if the given pod
|
||||
// gets unschedulable status of reason 'Unschedulable'.
|
||||
func PodUnschedulable(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
func PodUnschedulable(c clientset.Interface, podNamespace, podName string) wait.ConditionWithContextFunc {
|
||||
return func(ctx context.Context) (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// This could be a connection error so we want to retry.
|
||||
return false, nil
|
||||
@ -950,9 +950,9 @@ func PodUnschedulable(c clientset.Interface, podNamespace, podName string) wait.
|
||||
// PodSchedulingError returns a condition function that returns true if the given pod
|
||||
// gets unschedulable status for reasons other than "Unschedulable". The scheduler
|
||||
// records such reasons in case of error.
|
||||
func PodSchedulingError(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
func PodSchedulingError(c clientset.Interface, podNamespace, podName string) wait.ConditionWithContextFunc {
|
||||
return func(ctx context.Context) (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// This could be a connection error so we want to retry.
|
||||
return false, nil
|
||||
@ -981,7 +981,7 @@ func PodSchedulingGated(c clientset.Interface, podNamespace, podName string) wai
|
||||
// WaitForPodUnschedulableWithTimeout waits for a pod to fail scheduling and returns
|
||||
// an error if it does not become unschedulable within the given timeout.
|
||||
func WaitForPodUnschedulableWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
|
||||
return wait.Poll(100*time.Millisecond, timeout, PodUnschedulable(cs, pod.Namespace, pod.Name))
|
||||
return wait.PollUntilContextTimeout(context.TODO(), 100*time.Millisecond, timeout, false, PodUnschedulable(cs, pod.Namespace, pod.Name))
|
||||
}
|
||||
|
||||
// WaitForPodUnschedulable waits for a pod to fail scheduling and returns
|
||||
|
Loading…
Reference in New Issue
Block a user