mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
replace e2e WaitForPodsReady by WaitTimeoutForPodReadyInNamespace
This commit is contained in:
parent
5be21c50c2
commit
34f4959633
@ -203,7 +203,7 @@ func (g *Grabber) GrabFromScheduler() (SchedulerMetrics, error) {
|
|||||||
var err error
|
var err error
|
||||||
|
|
||||||
g.waitForSchedulerReadyOnce.Do(func() {
|
g.waitForSchedulerReadyOnce.Do(func() {
|
||||||
if readyErr := e2epod.WaitForPodsReady(g.client, metav1.NamespaceSystem, g.kubeScheduler, 0); readyErr != nil {
|
if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(g.client, g.kubeScheduler, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil {
|
||||||
err = fmt.Errorf("error waiting for kube-scheduler pod to be ready: %w", readyErr)
|
err = fmt.Errorf("error waiting for kube-scheduler pod to be ready: %w", readyErr)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -254,7 +254,7 @@ func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error)
|
|||||||
var err error
|
var err error
|
||||||
|
|
||||||
g.waitForControllerManagerReadyOnce.Do(func() {
|
g.waitForControllerManagerReadyOnce.Do(func() {
|
||||||
if readyErr := e2epod.WaitForPodsReady(g.client, metav1.NamespaceSystem, g.kubeControllerManager, 0); readyErr != nil {
|
if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(g.client, g.kubeControllerManager, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil {
|
||||||
err = fmt.Errorf("error waiting for kube-controller-manager pod to be ready: %w", readyErr)
|
err = fmt.Errorf("error waiting for kube-controller-manager pod to be ready: %w", readyErr)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -293,7 +293,7 @@ func (g *Grabber) GrabFromSnapshotController(podName string, port int) (Snapshot
|
|||||||
var err error
|
var err error
|
||||||
|
|
||||||
g.waitForSnapshotControllerReadyOnce.Do(func() {
|
g.waitForSnapshotControllerReadyOnce.Do(func() {
|
||||||
if readyErr := e2epod.WaitForPodsReady(g.client, metav1.NamespaceSystem, podName, 0); readyErr != nil {
|
if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(g.client, podName, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil {
|
||||||
err = fmt.Errorf("error waiting for volume-snapshot-controller pod to be ready: %w", readyErr)
|
err = fmt.Errorf("error waiting for volume-snapshot-controller pod to be ready: %w", readyErr)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -513,24 +513,6 @@ func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label la
|
|||||||
return pods, err
|
return pods, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForPodsReady waits for the pods to become ready.
|
|
||||||
func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
|
|
||||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
|
||||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
|
||||||
return wait.Poll(poll, 5*time.Minute, func() (bool, error) {
|
|
||||||
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
|
|
||||||
if err != nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
for _, pod := range pods.Items {
|
|
||||||
if !podutils.IsPodAvailable(&pod, int32(minReadySeconds), metav1.Now()) {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForNRestartablePods tries to list restarting pods using ps until it finds expect of them,
|
// WaitForNRestartablePods tries to list restarting pods using ps until it finds expect of them,
|
||||||
// returning their names if it can do so before timeout.
|
// returning their names if it can do so before timeout.
|
||||||
func WaitForNRestartablePods(ps *testutils.PodStore, expect int, timeout time.Duration) ([]string, error) {
|
func WaitForNRestartablePods(ps *testutils.PodStore, expect int, timeout time.Duration) ([]string, error) {
|
||||||
|
@ -185,7 +185,7 @@ var _ = common.SIGDescribe("KubeProxy", func() {
|
|||||||
fr.PodClient().CreateSync(serverPodSpec)
|
fr.PodClient().CreateSync(serverPodSpec)
|
||||||
|
|
||||||
// The server should be listening before spawning the client pod
|
// The server should be listening before spawning the client pod
|
||||||
if readyErr := e2epod.WaitForPodsReady(fr.ClientSet, fr.Namespace.Name, serverPodSpec.Name, 0); readyErr != nil {
|
if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(fr.ClientSet, serverPodSpec.Name, fr.Namespace.Name, framework.PodStartTimeout); readyErr != nil {
|
||||||
framework.Failf("error waiting for server pod %s to be ready: %v", serverPodSpec.Name, readyErr)
|
framework.Failf("error waiting for server pod %s to be ready: %v", serverPodSpec.Name, readyErr)
|
||||||
}
|
}
|
||||||
// Connect to the server and leak the connection
|
// Connect to the server and leak the connection
|
||||||
|
@ -135,7 +135,7 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() {
|
|||||||
// After scale up, verify all vsphere volumes are attached to node VMs.
|
// After scale up, verify all vsphere volumes are attached to node VMs.
|
||||||
ginkgo.By("Verify all volumes are attached to Nodes after Statefulsets is scaled up")
|
ginkgo.By("Verify all volumes are attached to Nodes after Statefulsets is scaled up")
|
||||||
for _, sspod := range ssPodsAfterScaleUp.Items {
|
for _, sspod := range ssPodsAfterScaleUp.Items {
|
||||||
err := e2epod.WaitForPodsReady(client, statefulset.Namespace, sspod.Name, 0)
|
err := e2epod.WaitTimeoutForPodReadyInNamespace(client, sspod.Name, statefulset.Namespace, framework.PodStartTimeout)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
pod, err := client.CoreV1().Pods(namespace).Get(context.TODO(), sspod.Name, metav1.GetOptions{})
|
pod, err := client.CoreV1().Pods(namespace).Get(context.TODO(), sspod.Name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
Loading…
Reference in New Issue
Block a user