diff --git a/test/e2e/framework/metrics/metrics_grabber.go b/test/e2e/framework/metrics/metrics_grabber.go index 932e26c20d9..ba3f0e8e735 100644 --- a/test/e2e/framework/metrics/metrics_grabber.go +++ b/test/e2e/framework/metrics/metrics_grabber.go @@ -203,7 +203,7 @@ func (g *Grabber) GrabFromScheduler() (SchedulerMetrics, error) { var err error g.waitForSchedulerReadyOnce.Do(func() { - if readyErr := e2epod.WaitForPodsReady(g.client, metav1.NamespaceSystem, g.kubeScheduler, 0); readyErr != nil { + if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(g.client, g.kubeScheduler, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil { err = fmt.Errorf("error waiting for kube-scheduler pod to be ready: %w", readyErr) } }) @@ -254,7 +254,7 @@ func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error) var err error g.waitForControllerManagerReadyOnce.Do(func() { - if readyErr := e2epod.WaitForPodsReady(g.client, metav1.NamespaceSystem, g.kubeControllerManager, 0); readyErr != nil { + if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(g.client, g.kubeControllerManager, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil { err = fmt.Errorf("error waiting for kube-controller-manager pod to be ready: %w", readyErr) } }) @@ -293,7 +293,7 @@ func (g *Grabber) GrabFromSnapshotController(podName string, port int) (Snapshot var err error g.waitForSnapshotControllerReadyOnce.Do(func() { - if readyErr := e2epod.WaitForPodsReady(g.client, metav1.NamespaceSystem, podName, 0); readyErr != nil { + if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(g.client, podName, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil { err = fmt.Errorf("error waiting for volume-snapshot-controller pod to be ready: %w", readyErr) } }) diff --git a/test/e2e/framework/pod/wait.go b/test/e2e/framework/pod/wait.go index b5723035d21..61ab7997ce6 100644 --- a/test/e2e/framework/pod/wait.go +++ b/test/e2e/framework/pod/wait.go @@ -513,24 +513,6 @@ func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label la return pods, err } -// WaitForPodsReady waits for the pods to become ready. -func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error { - label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - options := metav1.ListOptions{LabelSelector: label.String()} - return wait.Poll(poll, 5*time.Minute, func() (bool, error) { - pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options) - if err != nil { - return false, nil - } - for _, pod := range pods.Items { - if !podutils.IsPodAvailable(&pod, int32(minReadySeconds), metav1.Now()) { - return false, nil - } - } - return true, nil - }) -} - // WaitForNRestartablePods tries to list restarting pods using ps until it finds expect of them, // returning their names if it can do so before timeout. func WaitForNRestartablePods(ps *testutils.PodStore, expect int, timeout time.Duration) ([]string, error) { diff --git a/test/e2e/network/kube_proxy.go b/test/e2e/network/kube_proxy.go index 24f614a32f1..82357de0732 100644 --- a/test/e2e/network/kube_proxy.go +++ b/test/e2e/network/kube_proxy.go @@ -185,7 +185,7 @@ var _ = common.SIGDescribe("KubeProxy", func() { fr.PodClient().CreateSync(serverPodSpec) // The server should be listening before spawning the client pod - if readyErr := e2epod.WaitForPodsReady(fr.ClientSet, fr.Namespace.Name, serverPodSpec.Name, 0); readyErr != nil { + if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(fr.ClientSet, serverPodSpec.Name, fr.Namespace.Name, framework.PodStartTimeout); readyErr != nil { framework.Failf("error waiting for server pod %s to be ready: %v", serverPodSpec.Name, readyErr) } // Connect to the server and leak the connection diff --git a/test/e2e/storage/vsphere/vsphere_statefulsets.go b/test/e2e/storage/vsphere/vsphere_statefulsets.go index 476b1216702..1e186447553 100644 --- a/test/e2e/storage/vsphere/vsphere_statefulsets.go +++ b/test/e2e/storage/vsphere/vsphere_statefulsets.go @@ -135,7 +135,7 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() { // After scale up, verify all vsphere volumes are attached to node VMs. ginkgo.By("Verify all volumes are attached to Nodes after Statefulsets is scaled up") for _, sspod := range ssPodsAfterScaleUp.Items { - err := e2epod.WaitForPodsReady(client, statefulset.Namespace, sspod.Name, 0) + err := e2epod.WaitTimeoutForPodReadyInNamespace(client, sspod.Name, statefulset.Namespace, framework.PodStartTimeout) framework.ExpectNoError(err) pod, err := client.CoreV1().Pods(namespace).Get(context.TODO(), sspod.Name, metav1.GetOptions{}) framework.ExpectNoError(err)