mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 15:05:27 +00:00
Merge pull request #120153 from SataQiu/clean-scheduler-20230824
using wait.PollUntilContextTimeout instead of deprecated wait.Poll/PollWithContext/PollImmediate/PollImmediateWithContext methods for scheduler
This commit is contained in:
commit
c7a04e10a6
@ -132,14 +132,14 @@ func StartTestServer(ctx context.Context, customFlags []string) (result TestServ
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("failed to create a client: %v", err)
|
||||
}
|
||||
err = wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
err = wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
|
||||
select {
|
||||
case err := <-errCh:
|
||||
return false, err
|
||||
default:
|
||||
}
|
||||
|
||||
result := client.CoreV1().RESTClient().Get().AbsPath("/healthz").Do(context.TODO())
|
||||
result := client.CoreV1().RESTClient().Get().AbsPath("/healthz").Do(ctx)
|
||||
status := 0
|
||||
result.StatusCode(&status)
|
||||
if status == 200 {
|
||||
|
@ -109,7 +109,7 @@ func observeEventAfterAction(ctx context.Context, c clientset.Interface, ns stri
|
||||
// Wait up 2 minutes polling every second.
|
||||
timeout := 2 * time.Minute
|
||||
interval := 1 * time.Second
|
||||
err = wait.PollWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
|
||||
err = wait.PollUntilContextTimeout(ctx, interval, timeout, false, func(ctx context.Context) (bool, error) {
|
||||
return observedMatchingEvent, nil
|
||||
})
|
||||
return err == nil, err
|
||||
|
@ -326,7 +326,7 @@ var _ = SIGDescribe("LimitRange", func() {
|
||||
framework.ExpectNoError(err, "failed to delete the LimitRange by Collection")
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Confirm that the limitRange %q has been deleted", lrName))
|
||||
err = wait.PollImmediateWithContext(ctx, 1*time.Second, 10*time.Second, checkLimitRangeListQuantity(f, patchedLabelSelector, 0))
|
||||
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 10*time.Second, true, checkLimitRangeListQuantity(f, patchedLabelSelector, 0))
|
||||
framework.ExpectNoError(err, "failed to count the required limitRanges")
|
||||
framework.Logf("LimitRange %q has been deleted.", lrName)
|
||||
|
||||
|
@ -364,7 +364,7 @@ func createBalancedPodForNodes(ctx context.Context, f *framework.Framework, cs c
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete memory balanced pods: %v.", err)
|
||||
} else {
|
||||
err := wait.PollImmediateWithContext(ctx, 2*time.Second, time.Minute, func(ctx context.Context) (bool, error) {
|
||||
err := wait.PollUntilContextTimeout(ctx, 2*time.Second, time.Minute, true, func(ctx context.Context) (bool, error) {
|
||||
podList, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(labels.Set(balancePodLabel)).String(),
|
||||
})
|
||||
|
@ -1312,7 +1312,7 @@ func createPods(ctx context.Context, tb testing.TB, namespace string, cpo *creat
|
||||
// namespace are scheduled. Times out after 10 minutes because even at the
|
||||
// lowest observed QPS of ~10 pods/sec, a 5000-node test should complete.
|
||||
func waitUntilPodsScheduledInNamespace(ctx context.Context, tb testing.TB, podInformer coreinformers.PodInformer, namespace string, wantCount int) error {
|
||||
return wait.PollImmediate(1*time.Second, 10*time.Minute, func() (bool, error) {
|
||||
return wait.PollUntilContextTimeout(ctx, 1*time.Second, 10*time.Minute, true, func(ctx context.Context) (bool, error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return true, ctx.Err()
|
||||
|
Loading…
Reference in New Issue
Block a user