Merge pull request #26272 from yujuhong/fix_reboot_tests

e2e: fix tests that are broken because of the image prepull pod
This commit is contained in:
Alex Mohr 2016-05-25 14:12:24 -07:00
commit 28a601571c
5 changed files with 36 additions and 14 deletions

View File

@ -396,6 +396,14 @@ func PodRunningReady(p *api.Pod) (bool, error) {
return true, nil
}
func PodRunningReadyOrSucceeded(p *api.Pod) (bool, error) {
// Check if the phase is succeeded.
if p.Status.Phase == api.PodSucceeded {
return true, nil
}
return PodRunningReady(p)
}
// PodNotReady checks whether pod p's has a ready condition of status false.
func PodNotReady(p *api.Pod) (bool, error) {
// Check the ready condition is false.
@ -3217,16 +3225,30 @@ func GetSigner(provider string) (ssh.Signer, error) {
return sshutil.MakePrivateKeySignerFromFile(key)
}
// checkPodsRunning returns whether all pods whose names are listed in podNames
// in namespace ns are running and ready, using c and waiting at most timeout.
// CheckPodsRunningReady returns whether all pods whose names are listed in
// podNames in namespace ns are running and ready, using c and waiting at most
// timeout.
func CheckPodsRunningReady(c *client.Client, ns string, podNames []string, timeout time.Duration) bool {
np, desc := len(podNames), "running and ready"
return CheckPodsCondition(c, ns, podNames, timeout, PodRunningReady, "running and ready")
}
// CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are
// listed in podNames in namespace ns are running and ready, or succeeded; use
// c and waiting at most timeout.
func CheckPodsRunningReadyOrSucceeded(c *client.Client, ns string, podNames []string, timeout time.Duration) bool {
return CheckPodsCondition(c, ns, podNames, timeout, PodRunningReadyOrSucceeded, "running and ready, or succeeded")
}
// CheckPodsCondition returns whether all pods whose names are listed in podNames
// in namespace ns are in the condition, using c and waiting at most timeout.
func CheckPodsCondition(c *client.Client, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool {
np := len(podNames)
Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames)
result := make(chan bool, len(podNames))
for ix := range podNames {
// Launch off pod readiness checkers.
go func(name string) {
err := waitForPodCondition(c, ns, name, desc, timeout, PodRunningReady)
err := waitForPodCondition(c, ns, name, desc, timeout, condition)
result <- err == nil
}(podNames[ix])
}

View File

@ -241,11 +241,11 @@ func rebootNode(c *client.Client, provider, name, rebootCmd string) bool {
podNames = append(podNames, p.ObjectMeta.Name)
}
}
framework.Logf("Node %s has %d pods: %v", name, len(podNames), podNames)
framework.Logf("Node %s has %d assigned pods with no liveness probes: %v", name, len(podNames), podNames)
// For each pod, we do a sanity check to ensure it's running / healthy
// now, as that's what we'll be checking later.
if !framework.CheckPodsRunningReady(c, ns, podNames, framework.PodReadyBeforeTimeout) {
// or succeeded now, as that's what we'll be checking later.
if !framework.CheckPodsRunningReadyOrSucceeded(c, ns, podNames, framework.PodReadyBeforeTimeout) {
printStatusAndLogsForNotReadyPods(c, ns, podNames, pods)
return false
}
@ -267,8 +267,8 @@ func rebootNode(c *client.Client, provider, name, rebootCmd string) bool {
}
// Ensure all of the pods that we found on this node before the reboot are
// running / healthy.
if !framework.CheckPodsRunningReady(c, ns, podNames, rebootPodReadyAgainTimeout) {
// running / healthy, or succeeded.
if !framework.CheckPodsRunningReadyOrSucceeded(c, ns, podNames, rebootPodReadyAgainTimeout) {
newPods := ps.List()
printStatusAndLogsForNotReadyPods(c, ns, podNames, newPods)
return false

View File

@ -396,7 +396,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
// the cluster is restored to health.
By("waiting for system pods to successfully restart")
err := framework.WaitForPodsRunningReady(api.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout, map[string]string{})
err := framework.WaitForPodsRunningReady(api.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout, framework.ImagePullerLabels)
Expect(err).NotTo(HaveOccurred())
})

View File

@ -82,8 +82,8 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() {
podNamesBefore[i] = p.ObjectMeta.Name
}
ns := api.NamespaceSystem
if !framework.CheckPodsRunningReady(f.Client, ns, podNamesBefore, framework.PodReadyBeforeTimeout) {
framework.Failf("At least one pod wasn't running and ready at test start.")
if !framework.CheckPodsRunningReadyOrSucceeded(f.Client, ns, podNamesBefore, framework.PodReadyBeforeTimeout) {
framework.Failf("At least one pod wasn't running and ready or succeeded at test start.")
}
By("restarting all of the nodes")
@ -111,7 +111,7 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() {
podNamesAfter, err := waitForNPods(ps, len(podNamesBefore), restartPodReadyAgainTimeout)
Expect(err).NotTo(HaveOccurred())
remaining := restartPodReadyAgainTimeout - time.Since(podCheckStart)
if !framework.CheckPodsRunningReady(f.Client, ns, podNamesAfter, remaining) {
if !framework.CheckPodsRunningReadyOrSucceeded(f.Client, ns, podNamesAfter, remaining) {
framework.Failf("At least one pod wasn't running and ready after the restart.")
}
})

View File

@ -196,7 +196,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
}
}
err = framework.WaitForPodsRunningReady(api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, map[string]string{})
err = framework.WaitForPodsRunningReady(api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, framework.ImagePullerLabels)
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeList.Items {