mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 12:43:23 +00:00
Merge pull request #124785 from kerthcet/cleanup/deprecate-wait-pull
Avoid to use deprecated wait.Poll in scheduler tests
This commit is contained in:
commit
68091805a5
@ -436,7 +436,7 @@ func TestTaintBasedEvictions(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
if err := testutils.WaitForNodeTaints(cs, nodes[nodeIndex], test.nodeTaints); err != nil {
|
||||
if err := testutils.WaitForNodeTaints(testCtx.Ctx, cs, nodes[nodeIndex], test.nodeTaints); err != nil {
|
||||
t.Errorf("Failed to taint node %q, err: %v", klog.KObj(nodes[nodeIndex]), err)
|
||||
}
|
||||
|
||||
|
@ -114,7 +114,7 @@ func TestUpdateNodeEvent(t *testing.T) {
|
||||
t.Fatalf("Creating pod error: %v", err)
|
||||
}
|
||||
|
||||
if err := testutils.WaitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
|
||||
if err := testutils.WaitForPodUnschedulable(testCtx.Ctx, testCtx.ClientSet, pod); err != nil {
|
||||
t.Fatalf("Pod %v got scheduled: %v", pod.Name, err)
|
||||
}
|
||||
node, err = testCtx.ClientSet.CoreV1().Nodes().Get(testCtx.Ctx, node.Name, metav1.GetOptions{})
|
||||
|
@ -2158,7 +2158,7 @@ func TestUnschedulablePodBecomesSchedulable(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := waitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
|
||||
if err := waitForPodUnschedulable(testCtx.Ctx, testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("Pod %v got scheduled: %v", pod.Name, err)
|
||||
}
|
||||
if err := tt.update(testCtx.ClientSet, testCtx.NS.Name); err != nil {
|
||||
|
@ -681,7 +681,7 @@ func TestPreFilterPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
if test.reject {
|
||||
if err = testutils.WaitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
|
||||
if err = testutils.WaitForPodUnschedulable(testCtx.Ctx, testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
|
||||
}
|
||||
} else if test.fail {
|
||||
@ -1152,7 +1152,7 @@ func TestPrebindPlugin(t *testing.T) {
|
||||
t.Errorf("Expected a scheduling error, but didn't get it. error: %v", err)
|
||||
}
|
||||
} else if test.reject {
|
||||
if err = testutils.WaitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
|
||||
if err = testutils.WaitForPodUnschedulable(testCtx.Ctx, testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("Expected the pod to be unschedulable")
|
||||
}
|
||||
} else if err = testutils.WaitForPodToSchedule(testCtx.ClientSet, pod); err != nil {
|
||||
@ -1384,7 +1384,7 @@ func TestUnReservePermitPlugins(t *testing.T) {
|
||||
}
|
||||
|
||||
if test.reject {
|
||||
if err = testutils.WaitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
|
||||
if err = testutils.WaitForPodUnschedulable(testCtx.Ctx, testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
|
||||
}
|
||||
|
||||
@ -1456,7 +1456,7 @@ func TestUnReservePreBindPlugins(t *testing.T) {
|
||||
}
|
||||
|
||||
if test.wantReject {
|
||||
if err = testutils.WaitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
|
||||
if err = testutils.WaitForPodUnschedulable(testCtx.Ctx, testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("Expected a reasons other than Unschedulable, but got: %v", err)
|
||||
}
|
||||
|
||||
@ -1902,7 +1902,7 @@ func TestPermitPlugin(t *testing.T) {
|
||||
}
|
||||
} else {
|
||||
if test.reject || test.timeout {
|
||||
if err = testutils.WaitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
|
||||
if err = testutils.WaitForPodUnschedulable(testCtx.Ctx, testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
|
||||
}
|
||||
} else {
|
||||
@ -2068,10 +2068,10 @@ func TestCoSchedulingWithPermitPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
if test.waitReject {
|
||||
if err = testutils.WaitForPodUnschedulable(testCtx.ClientSet, podA); err != nil {
|
||||
if err = testutils.WaitForPodUnschedulable(testCtx.Ctx, testCtx.ClientSet, podA); err != nil {
|
||||
t.Errorf("Didn't expect the first pod to be scheduled. error: %v", err)
|
||||
}
|
||||
if err = testutils.WaitForPodUnschedulable(testCtx.ClientSet, podB); err != nil {
|
||||
if err = testutils.WaitForPodUnschedulable(testCtx.Ctx, testCtx.ClientSet, podB); err != nil {
|
||||
t.Errorf("Didn't expect the second pod to be scheduled. error: %v", err)
|
||||
}
|
||||
if !((permitPlugin.waitingPod == podA.Name && permitPlugin.rejectingPod == podB.Name) ||
|
||||
@ -2263,7 +2263,7 @@ func TestPreEnqueuePlugin(t *testing.T) {
|
||||
t.Errorf("Expected the enqueuePlugin plugin to be called at least once, but got 0")
|
||||
}
|
||||
} else {
|
||||
if err := testutils.WaitForPodSchedulingGated(testCtx.ClientSet, pod, 10*time.Second); err != nil {
|
||||
if err := testutils.WaitForPodSchedulingGated(testCtx.Ctx, testCtx.ClientSet, pod, 10*time.Second); err != nil {
|
||||
t.Errorf("Expected the pod to be scheduling waiting, but got: %v", err)
|
||||
}
|
||||
// Also verify preFilterPlugin is not called.
|
||||
@ -2575,7 +2575,7 @@ func TestActivatePods(t *testing.T) {
|
||||
|
||||
// Wait for the 2 executor pods to be unschedulable.
|
||||
for _, pod := range pods {
|
||||
if err := testutils.WaitForPodUnschedulable(cs, pod); err != nil {
|
||||
if err := testutils.WaitForPodUnschedulable(testCtx.Ctx, cs, pod); err != nil {
|
||||
t.Errorf("Failed to wait for Pod %v to be unschedulable: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
@ -2721,7 +2721,7 @@ func TestSchedulingGatesPluginEventsToRegister(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := testutils.WaitForPodSchedulingGated(testCtx.ClientSet, gatedPod, 10*time.Second); err != nil {
|
||||
if err := testutils.WaitForPodSchedulingGated(testCtx.Ctx, testCtx.ClientSet, gatedPod, 10*time.Second); err != nil {
|
||||
t.Errorf("Expected the pod to be gated, but got: %v", err)
|
||||
return
|
||||
}
|
||||
@ -2761,7 +2761,7 @@ func TestSchedulingGatesPluginEventsToRegister(t *testing.T) {
|
||||
}
|
||||
|
||||
// Pod should still be unschedulable because scheduling gates still exist, theoretically, it's a waste rescheduling.
|
||||
if err := testutils.WaitForPodSchedulingGated(testCtx.ClientSet, gatedPod, 10*time.Second); err != nil {
|
||||
if err := testutils.WaitForPodSchedulingGated(testCtx.Ctx, testCtx.ClientSet, gatedPod, 10*time.Second); err != nil {
|
||||
t.Errorf("Expected the pod to be gated, but got: %v", err)
|
||||
return
|
||||
}
|
||||
|
@ -679,7 +679,7 @@ func TestDisablePreemption(t *testing.T) {
|
||||
t.Errorf("Error while creating high priority pod: %v", err)
|
||||
}
|
||||
// Ensure preemptor should keep unschedulable.
|
||||
if err := waitForPodUnschedulable(cs, preemptor); err != nil {
|
||||
if err := waitForPodUnschedulable(testCtx.Ctx, cs, preemptor); err != nil {
|
||||
t.Errorf("Preemptor %v should not become scheduled", preemptor.Name)
|
||||
}
|
||||
|
||||
|
@ -223,7 +223,7 @@ func TestReScheduling(t *testing.T) {
|
||||
t.Errorf("Expected a scheduling error, but got: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err = testutils.WaitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
|
||||
if err = testutils.WaitForPodUnschedulable(testCtx.Ctx, testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
|
||||
}
|
||||
}
|
||||
@ -244,7 +244,7 @@ func TestReScheduling(t *testing.T) {
|
||||
t.Errorf("Expected a scheduling error, but got: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err = testutils.WaitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
|
||||
if err = testutils.WaitForPodUnschedulable(testCtx.Ctx, testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ func TestUnschedulableNodes(t *testing.T) {
|
||||
if _, err := c.CoreV1().Nodes().Update(context.TODO(), n, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("Failed to update node with unschedulable=true: %v", err)
|
||||
}
|
||||
err = testutils.WaitForReflection(t, nodeLister, nodeKey, func(node interface{}) bool {
|
||||
err = testutils.WaitForReflection(testCtx.Ctx, t, nodeLister, nodeKey, func(node interface{}) bool {
|
||||
// An unschedulable node should still be present in the store
|
||||
// Nodes that are unschedulable or that are not ready or
|
||||
// have their disk full (Node.Spec.Conditions) are excluded
|
||||
@ -122,7 +122,7 @@ func TestUnschedulableNodes(t *testing.T) {
|
||||
if _, err := c.CoreV1().Nodes().Update(context.TODO(), n, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("Failed to update node with unschedulable=false: %v", err)
|
||||
}
|
||||
err = testutils.WaitForReflection(t, nodeLister, nodeKey, func(node interface{}) bool {
|
||||
err = testutils.WaitForReflection(testCtx.Ctx, t, nodeLister, nodeKey, func(node interface{}) bool {
|
||||
return node != nil && node.(*v1.Node).Spec.Unschedulable == false
|
||||
})
|
||||
if err != nil {
|
||||
@ -518,7 +518,7 @@ func TestSchedulerInformers(t *testing.T) {
|
||||
}
|
||||
}
|
||||
// Ensure nodes are present in scheduler cache.
|
||||
if err := testutils.WaitForNodesInCache(testCtx.Scheduler, len(test.nodes)); err != nil {
|
||||
if err := testutils.WaitForNodesInCache(testCtx.Ctx, testCtx.Scheduler, len(test.nodes)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -535,7 +535,7 @@ func TestSchedulerInformers(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating new pod: %v", err)
|
||||
}
|
||||
if err := testutils.WaitForPodUnschedulable(cs, unschedulable); err != nil {
|
||||
if err := testutils.WaitForPodUnschedulable(testCtx.Ctx, cs, unschedulable); err != nil {
|
||||
t.Errorf("Pod %v got scheduled: %v", unschedulable.Name, err)
|
||||
}
|
||||
|
||||
@ -597,7 +597,7 @@ func TestNodeEvents(t *testing.T) {
|
||||
t.Fatalf("Failed to create pod %v: %v", pod2.Name, err)
|
||||
}
|
||||
|
||||
if err := testutils.WaitForPodUnschedulable(testCtx.ClientSet, pod2); err != nil {
|
||||
if err := testutils.WaitForPodUnschedulable(testCtx.Ctx, testCtx.ClientSet, pod2); err != nil {
|
||||
t.Errorf("Pod %v got scheduled: %v", pod2.Name, err)
|
||||
}
|
||||
|
||||
@ -630,7 +630,7 @@ func TestNodeEvents(t *testing.T) {
|
||||
}
|
||||
|
||||
// 3.2 pod2 still unschedulable
|
||||
if err := testutils.WaitForPodUnschedulable(testCtx.ClientSet, pod2); err != nil {
|
||||
if err := testutils.WaitForPodUnschedulable(testCtx.Ctx, testCtx.ClientSet, pod2); err != nil {
|
||||
t.Errorf("Pod %v got scheduled: %v", pod2.Name, err)
|
||||
}
|
||||
|
||||
|
@ -35,11 +35,6 @@ import (
|
||||
testutils "k8s.io/kubernetes/test/integration/util"
|
||||
)
|
||||
|
||||
// imported from testutils
|
||||
var (
|
||||
waitForPodUnschedulable = testutils.WaitForPodUnschedulable
|
||||
)
|
||||
|
||||
func newPod(nsName, name string, req, limit v1.ResourceList) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -526,7 +521,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
if _, err := cs.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{}); err != nil {
|
||||
t.Errorf("Failed to create node, err: %v", err)
|
||||
}
|
||||
if err := testutils.WaitForNodeTaints(cs, node, test.expectedTaints); err != nil {
|
||||
if err := testutils.WaitForNodeTaints(testCtx.Ctx, cs, node, test.expectedTaints); err != nil {
|
||||
node, err = cs.CoreV1().Nodes().Get(testCtx.Ctx, node.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get node <%s>", node.Name)
|
||||
@ -555,7 +550,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
pod.Namespace, pod.Name, err)
|
||||
}
|
||||
} else {
|
||||
if err := waitForPodUnschedulable(cs, createdPod); err != nil {
|
||||
if err := testutils.WaitForPodUnschedulable(testCtx.Ctx, cs, createdPod); err != nil {
|
||||
t.Errorf("Unschedulable pod %s/%s gets scheduled on the node, err: %v",
|
||||
pod.Namespace, pod.Name, err)
|
||||
}
|
||||
@ -564,7 +559,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
|
||||
testutils.CleanupPods(testCtx.Ctx, cs, t, pods)
|
||||
testutils.CleanupNodes(cs, t)
|
||||
testutils.WaitForSchedulerCacheCleanup(testCtx.Scheduler, t)
|
||||
testutils.WaitForSchedulerCacheCleanup(testCtx.Ctx, testCtx.Scheduler, t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -431,15 +431,15 @@ func RemoveTaintOffNode(cs clientset.Interface, nodeName string, taint v1.Taint)
|
||||
|
||||
// WaitForNodeTaints waits for a node to have the target taints and returns
|
||||
// an error if it does not have taints within the given timeout.
|
||||
func WaitForNodeTaints(cs clientset.Interface, node *v1.Node, taints []v1.Taint) error {
|
||||
return wait.Poll(100*time.Millisecond, 30*time.Second, NodeTainted(cs, node.Name, taints))
|
||||
func WaitForNodeTaints(ctx context.Context, cs clientset.Interface, node *v1.Node, taints []v1.Taint) error {
|
||||
return wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 30*time.Second, false, NodeTainted(ctx, cs, node.Name, taints))
|
||||
}
|
||||
|
||||
// NodeTainted return a condition function that returns true if the given node contains
|
||||
// the taints.
|
||||
func NodeTainted(cs clientset.Interface, nodeName string, taints []v1.Taint) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
func NodeTainted(ctx context.Context, cs clientset.Interface, nodeName string, taints []v1.Taint) wait.ConditionWithContextFunc {
|
||||
return func(context.Context) (bool, error) {
|
||||
node, err := cs.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -553,14 +553,14 @@ func InitTestAPIServer(t *testing.T, nsPrefix string, admission admission.Interf
|
||||
}
|
||||
|
||||
// WaitForSchedulerCacheCleanup waits for cleanup of scheduler's cache to complete
|
||||
func WaitForSchedulerCacheCleanup(sched *scheduler.Scheduler, t *testing.T) {
|
||||
schedulerCacheIsEmpty := func() (bool, error) {
|
||||
func WaitForSchedulerCacheCleanup(ctx context.Context, sched *scheduler.Scheduler, t *testing.T) {
|
||||
schedulerCacheIsEmpty := func(context.Context) (bool, error) {
|
||||
dump := sched.Cache.Dump()
|
||||
|
||||
return len(dump.Nodes) == 0 && len(dump.AssumedPods) == 0, nil
|
||||
}
|
||||
|
||||
if err := wait.Poll(time.Second, wait.ForeverTestTimeout, schedulerCacheIsEmpty); err != nil {
|
||||
if err := wait.PollUntilContextTimeout(ctx, time.Second, wait.ForeverTestTimeout, false, schedulerCacheIsEmpty); err != nil {
|
||||
t.Errorf("Failed to wait for scheduler cache cleanup: %v", err)
|
||||
}
|
||||
}
|
||||
@ -726,10 +726,10 @@ func InitTestDisablePreemption(t *testing.T, nsPrefix string) *TestContext {
|
||||
|
||||
// WaitForReflection waits till the passFunc confirms that the object it expects
|
||||
// to see is in the store. Used to observe reflected events.
|
||||
func WaitForReflection(t *testing.T, nodeLister corelisters.NodeLister, key string,
|
||||
func WaitForReflection(ctx context.Context, t *testing.T, nodeLister corelisters.NodeLister, key string,
|
||||
passFunc func(n interface{}) bool) error {
|
||||
var nodes []*v1.Node
|
||||
err := wait.Poll(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
err := wait.PollUntilContextTimeout(ctx, time.Millisecond*100, wait.ForeverTestTimeout, false, func(context.Context) (bool, error) {
|
||||
n, err := nodeLister.Get(key)
|
||||
|
||||
switch {
|
||||
@ -783,13 +783,13 @@ func CreateAndWaitForNodesInCache(testCtx *TestContext, prefix string, wrapper *
|
||||
if err != nil {
|
||||
return nodes, fmt.Errorf("cannot create nodes: %v", err)
|
||||
}
|
||||
return nodes, WaitForNodesInCache(testCtx.Scheduler, numNodes+existingNodes)
|
||||
return nodes, WaitForNodesInCache(testCtx.Ctx, testCtx.Scheduler, numNodes+existingNodes)
|
||||
}
|
||||
|
||||
// WaitForNodesInCache ensures at least <nodeCount> nodes are present in scheduler cache
|
||||
// within 30 seconds; otherwise returns false.
|
||||
func WaitForNodesInCache(sched *scheduler.Scheduler, nodeCount int) error {
|
||||
err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
func WaitForNodesInCache(ctx context.Context, sched *scheduler.Scheduler, nodeCount int) error {
|
||||
err := wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, wait.ForeverTestTimeout, false, func(context.Context) (bool, error) {
|
||||
return sched.Cache.NodeCount() >= nodeCount, nil
|
||||
})
|
||||
if err != nil {
|
||||
@ -1018,9 +1018,9 @@ func PodSchedulingError(c clientset.Interface, podNamespace, podName string) wai
|
||||
|
||||
// PodSchedulingGated returns a condition function that returns true if the given pod
|
||||
// gets unschedulable status of reason 'SchedulingGated'.
|
||||
func PodSchedulingGated(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
func PodSchedulingGated(ctx context.Context, c clientset.Interface, podNamespace, podName string) wait.ConditionWithContextFunc {
|
||||
return func(context.Context) (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// This could be a connection error so we want to retry.
|
||||
return false, nil
|
||||
@ -1033,27 +1033,27 @@ func PodSchedulingGated(c clientset.Interface, podNamespace, podName string) wai
|
||||
|
||||
// WaitForPodUnschedulableWithTimeout waits for a pod to fail scheduling and returns
|
||||
// an error if it does not become unschedulable within the given timeout.
|
||||
func WaitForPodUnschedulableWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
|
||||
return wait.PollUntilContextTimeout(context.TODO(), 100*time.Millisecond, timeout, false, PodUnschedulable(cs, pod.Namespace, pod.Name))
|
||||
func WaitForPodUnschedulableWithTimeout(ctx context.Context, cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
|
||||
return wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, timeout, false, PodUnschedulable(cs, pod.Namespace, pod.Name))
|
||||
}
|
||||
|
||||
// WaitForPodUnschedulable waits for a pod to fail scheduling and returns
|
||||
// an error if it does not become unschedulable within the timeout duration (30 seconds).
|
||||
func WaitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error {
|
||||
return WaitForPodUnschedulableWithTimeout(cs, pod, 30*time.Second)
|
||||
func WaitForPodUnschedulable(ctx context.Context, cs clientset.Interface, pod *v1.Pod) error {
|
||||
return WaitForPodUnschedulableWithTimeout(ctx, cs, pod, 30*time.Second)
|
||||
}
|
||||
|
||||
// WaitForPodSchedulingGated waits for a pod to be in scheduling gated state
|
||||
// and returns an error if it does not fall into this state within the given timeout.
|
||||
func WaitForPodSchedulingGated(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
|
||||
return wait.Poll(100*time.Millisecond, timeout, PodSchedulingGated(cs, pod.Namespace, pod.Name))
|
||||
func WaitForPodSchedulingGated(ctx context.Context, cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
|
||||
return wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, timeout, false, PodSchedulingGated(ctx, cs, pod.Namespace, pod.Name))
|
||||
}
|
||||
|
||||
// WaitForPDBsStable waits for PDBs to have "CurrentHealthy" status equal to
|
||||
// the expected values.
|
||||
func WaitForPDBsStable(testCtx *TestContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error {
|
||||
return wait.Poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||
pdbList, err := testCtx.ClientSet.PolicyV1().PodDisruptionBudgets(testCtx.NS.Name).List(context.TODO(), metav1.ListOptions{})
|
||||
return wait.PollUntilContextTimeout(testCtx.Ctx, time.Second, 60*time.Second, false, func(context.Context) (bool, error) {
|
||||
pdbList, err := testCtx.ClientSet.PolicyV1().PodDisruptionBudgets(testCtx.NS.Name).List(testCtx.Ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -1080,7 +1080,7 @@ func WaitForPDBsStable(testCtx *TestContext, pdbs []*policy.PodDisruptionBudget,
|
||||
|
||||
// WaitCachedPodsStable waits until scheduler cache has the given pods.
|
||||
func WaitCachedPodsStable(testCtx *TestContext, pods []*v1.Pod) error {
|
||||
return wait.Poll(time.Second, 30*time.Second, func() (bool, error) {
|
||||
return wait.PollUntilContextTimeout(testCtx.Ctx, time.Second, 30*time.Second, false, func(context.Context) (bool, error) {
|
||||
cachedPods, err := testCtx.Scheduler.Cache.PodCount()
|
||||
if err != nil {
|
||||
return false, err
|
||||
@ -1089,7 +1089,7 @@ func WaitCachedPodsStable(testCtx *TestContext, pods []*v1.Pod) error {
|
||||
return false, nil
|
||||
}
|
||||
for _, p := range pods {
|
||||
actualPod, err1 := testCtx.ClientSet.CoreV1().Pods(p.Namespace).Get(context.TODO(), p.Name, metav1.GetOptions{})
|
||||
actualPod, err1 := testCtx.ClientSet.CoreV1().Pods(p.Namespace).Get(testCtx.Ctx, p.Name, metav1.GetOptions{})
|
||||
if err1 != nil {
|
||||
return false, err1
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user