Merge pull request #120153 from SataQiu/clean-scheduler-20230824

using wait.PollUntilContextTimeout instead of deprecated wait.Poll/PollWithContext/PollImmediate/PollImmediateWithContext methods for scheduler
This commit is contained in:
Kubernetes Prow Robot 2023-08-28 02:11:42 -07:00 committed by GitHub
commit c7a04e10a6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 6 additions and 6 deletions

View File

@ -132,14 +132,14 @@ func StartTestServer(ctx context.Context, customFlags []string) (result TestServ
if err != nil {
return result, fmt.Errorf("failed to create a client: %v", err)
}
err = wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
err = wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
select {
case err := <-errCh:
return false, err
default:
}
result := client.CoreV1().RESTClient().Get().AbsPath("/healthz").Do(context.TODO())
result := client.CoreV1().RESTClient().Get().AbsPath("/healthz").Do(ctx)
status := 0
result.StatusCode(&status)
if status == 200 {

View File

@ -109,7 +109,7 @@ func observeEventAfterAction(ctx context.Context, c clientset.Interface, ns stri
// Wait up 2 minutes polling every second.
timeout := 2 * time.Minute
interval := 1 * time.Second
err = wait.PollWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
err = wait.PollUntilContextTimeout(ctx, interval, timeout, false, func(ctx context.Context) (bool, error) {
return observedMatchingEvent, nil
})
return err == nil, err

View File

@ -326,7 +326,7 @@ var _ = SIGDescribe("LimitRange", func() {
framework.ExpectNoError(err, "failed to delete the LimitRange by Collection")
ginkgo.By(fmt.Sprintf("Confirm that the limitRange %q has been deleted", lrName))
err = wait.PollImmediateWithContext(ctx, 1*time.Second, 10*time.Second, checkLimitRangeListQuantity(f, patchedLabelSelector, 0))
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 10*time.Second, true, checkLimitRangeListQuantity(f, patchedLabelSelector, 0))
framework.ExpectNoError(err, "failed to count the required limitRanges")
framework.Logf("LimitRange %q has been deleted.", lrName)

View File

@ -364,7 +364,7 @@ func createBalancedPodForNodes(ctx context.Context, f *framework.Framework, cs c
if err != nil {
framework.Logf("Failed to delete memory balanced pods: %v.", err)
} else {
err := wait.PollImmediateWithContext(ctx, 2*time.Second, time.Minute, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, 2*time.Second, time.Minute, true, func(ctx context.Context) (bool, error) {
podList, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set(balancePodLabel)).String(),
})

View File

@ -1312,7 +1312,7 @@ func createPods(ctx context.Context, tb testing.TB, namespace string, cpo *creat
// namespace are scheduled. Times out after 10 minutes because even at the
// lowest observed QPS of ~10 pods/sec, a 5000-node test should complete.
func waitUntilPodsScheduledInNamespace(ctx context.Context, tb testing.TB, podInformer coreinformers.PodInformer, namespace string, wantCount int) error {
return wait.PollImmediate(1*time.Second, 10*time.Minute, func() (bool, error) {
return wait.PollUntilContextTimeout(ctx, 1*time.Second, 10*time.Minute, true, func(ctx context.Context) (bool, error) {
select {
case <-ctx.Done():
return true, ctx.Err()