diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 99140300b6a..85da3f26104 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -396,6 +396,14 @@ func PodRunningReady(p *api.Pod) (bool, error) { return true, nil } +func PodRunningReadyOrSucceeded(p *api.Pod) (bool, error) { + // Check if the phase is succeeded. + if p.Status.Phase == api.PodSucceeded { + return true, nil + } + return PodRunningReady(p) +} + // PodNotReady checks whether pod p's has a ready condition of status false. func PodNotReady(p *api.Pod) (bool, error) { // Check the ready condition is false. @@ -3217,16 +3225,30 @@ func GetSigner(provider string) (ssh.Signer, error) { return sshutil.MakePrivateKeySignerFromFile(key) } -// checkPodsRunning returns whether all pods whose names are listed in podNames -// in namespace ns are running and ready, using c and waiting at most timeout. +// CheckPodsRunningReady returns whether all pods whose names are listed in +// podNames in namespace ns are running and ready, using c and waiting at most +// timeout. func CheckPodsRunningReady(c *client.Client, ns string, podNames []string, timeout time.Duration) bool { - np, desc := len(podNames), "running and ready" + return CheckPodsCondition(c, ns, podNames, timeout, PodRunningReady, "running and ready") +} + +// CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are +// listed in podNames in namespace ns are running and ready, or succeeded; use +// c and waiting at most timeout. +func CheckPodsRunningReadyOrSucceeded(c *client.Client, ns string, podNames []string, timeout time.Duration) bool { + return CheckPodsCondition(c, ns, podNames, timeout, PodRunningReadyOrSucceeded, "running and ready, or succeeded") +} + +// CheckPodsCondition returns whether all pods whose names are listed in podNames +// in namespace ns are in the condition, using c and waiting at most timeout. +func CheckPodsCondition(c *client.Client, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool { + np := len(podNames) Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames) result := make(chan bool, len(podNames)) for ix := range podNames { // Launch off pod readiness checkers. go func(name string) { - err := waitForPodCondition(c, ns, name, desc, timeout, PodRunningReady) + err := waitForPodCondition(c, ns, name, desc, timeout, condition) result <- err == nil }(podNames[ix]) } diff --git a/test/e2e/reboot.go b/test/e2e/reboot.go index d048b6ee59d..a81f1b9317b 100644 --- a/test/e2e/reboot.go +++ b/test/e2e/reboot.go @@ -241,11 +241,11 @@ func rebootNode(c *client.Client, provider, name, rebootCmd string) bool { podNames = append(podNames, p.ObjectMeta.Name) } } - framework.Logf("Node %s has %d pods: %v", name, len(podNames), podNames) + framework.Logf("Node %s has %d assigned pods with no liveness probes: %v", name, len(podNames), podNames) // For each pod, we do a sanity check to ensure it's running / healthy - // now, as that's what we'll be checking later. - if !framework.CheckPodsRunningReady(c, ns, podNames, framework.PodReadyBeforeTimeout) { + // or succeeded now, as that's what we'll be checking later. + if !framework.CheckPodsRunningReadyOrSucceeded(c, ns, podNames, framework.PodReadyBeforeTimeout) { printStatusAndLogsForNotReadyPods(c, ns, podNames, pods) return false } @@ -267,8 +267,8 @@ func rebootNode(c *client.Client, provider, name, rebootCmd string) bool { } // Ensure all of the pods that we found on this node before the reboot are - // running / healthy. - if !framework.CheckPodsRunningReady(c, ns, podNames, rebootPodReadyAgainTimeout) { + // running / healthy, or succeeded. + if !framework.CheckPodsRunningReadyOrSucceeded(c, ns, podNames, rebootPodReadyAgainTimeout) { newPods := ps.List() printStatusAndLogsForNotReadyPods(c, ns, podNames, newPods) return false diff --git a/test/e2e/resize_nodes.go b/test/e2e/resize_nodes.go index b1929eab65e..2823519099d 100644 --- a/test/e2e/resize_nodes.go +++ b/test/e2e/resize_nodes.go @@ -396,7 +396,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() { // the cluster is restored to health. By("waiting for system pods to successfully restart") - err := framework.WaitForPodsRunningReady(api.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout, map[string]string{}) + err := framework.WaitForPodsRunningReady(api.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout, framework.ImagePullerLabels) Expect(err).NotTo(HaveOccurred()) }) diff --git a/test/e2e/restart.go b/test/e2e/restart.go index 916306ff4ca..57b50ec86fc 100644 --- a/test/e2e/restart.go +++ b/test/e2e/restart.go @@ -82,8 +82,8 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() { podNamesBefore[i] = p.ObjectMeta.Name } ns := api.NamespaceSystem - if !framework.CheckPodsRunningReady(f.Client, ns, podNamesBefore, framework.PodReadyBeforeTimeout) { - framework.Failf("At least one pod wasn't running and ready at test start.") + if !framework.CheckPodsRunningReadyOrSucceeded(f.Client, ns, podNamesBefore, framework.PodReadyBeforeTimeout) { + framework.Failf("At least one pod wasn't running and ready or succeeded at test start.") } By("restarting all of the nodes") @@ -111,7 +111,7 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() { podNamesAfter, err := waitForNPods(ps, len(podNamesBefore), restartPodReadyAgainTimeout) Expect(err).NotTo(HaveOccurred()) remaining := restartPodReadyAgainTimeout - time.Since(podCheckStart) - if !framework.CheckPodsRunningReady(f.Client, ns, podNamesAfter, remaining) { + if !framework.CheckPodsRunningReadyOrSucceeded(f.Client, ns, podNamesAfter, remaining) { framework.Failf("At least one pod wasn't running and ready after the restart.") } }) diff --git a/test/e2e/scheduler_predicates.go b/test/e2e/scheduler_predicates.go index 47785e74588..e1a41c7ecd2 100644 --- a/test/e2e/scheduler_predicates.go +++ b/test/e2e/scheduler_predicates.go @@ -196,7 +196,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { } } - err = framework.WaitForPodsRunningReady(api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, map[string]string{}) + err = framework.WaitForPodsRunningReady(api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, framework.ImagePullerLabels) Expect(err).NotTo(HaveOccurred()) for _, node := range nodeList.Items {