Merge pull request #7839 from jayunit100/density-verify-param

E2E: Parameterize Density failure
This commit is contained in:
Wojciech Tyczynski 2015-05-07 10:16:31 +02:00
commit 5200aa1981

View File

@ -20,6 +20,7 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"math"
"math/rand" "math/rand"
"os" "os"
"os/exec" "os/exec"
@ -444,11 +445,14 @@ func DeleteRC(c *client.Client, ns, name string) error {
return nil return nil
} }
// Launch a Replication Controller and wait for all pods it spawns // RunRC Launches (and verifies correctness) of a Replication Controller
// to become running. The controller will need to be cleaned up external // It will waits for all pods it spawns to become "Running".
// to this method // It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling cleanup).
func RunRC(c *client.Client, name string, ns, image string, replicas int) error { func RunRC(c *client.Client, name string, ns, image string, replicas int) error {
var last int var last int
maxContainerFailures := int(math.Max(1.0, float64(replicas)*.01))
current := 0 current := 0
same := 0 same := 0
@ -517,7 +521,7 @@ func RunRC(c *client.Client, name string, ns, image string, replicas int) error
} }
Logf("Controller %s: Found %d pods out of %d", name, current, replicas) Logf("Controller %s: Found %d pods out of %d", name, current, replicas)
By("Waiting for each pod to be running") By(fmt.Sprintf("Waiting for all %d replicas to be running with a max container failures of %d", replicas, maxContainerFailures))
same = 0 same = 0
last = 0 last = 0
failCount = 10 failCount = 10
@ -539,7 +543,7 @@ func RunRC(c *client.Client, name string, ns, image string, replicas int) error
for _, p := range currentPods.Items { for _, p := range currentPods.Items {
if p.Status.Phase == api.PodRunning { if p.Status.Phase == api.PodRunning {
current++ current++
if err := VerifyContainersAreNotFailed(p); err != nil { if err := VerifyContainersAreNotFailed(p, maxContainerFailures); err != nil {
return err return err
} }
} else if p.Status.Phase == api.PodPending { } else if p.Status.Phase == api.PodPending {
@ -584,7 +588,9 @@ func listPods(c *client.Client, namespace string, label labels.Selector, field f
return pods, err return pods, err
} }
func VerifyContainersAreNotFailed(pod api.Pod) error { //VerifyContainersAreNotFailed confirms that containers didn't enter an invalid state.
//For example, too many restarts, or non nill Termination, and so on.
func VerifyContainersAreNotFailed(pod api.Pod, restartMax int) error {
var errStrings []string var errStrings []string
statuses := pod.Status.ContainerStatuses statuses := pod.Status.ContainerStatuses
@ -592,8 +598,17 @@ func VerifyContainersAreNotFailed(pod api.Pod) error {
return nil return nil
} else { } else {
for _, status := range statuses { for _, status := range statuses {
if status.State.Termination != nil || status.LastTerminationState.Termination != nil || status.RestartCount != 0 { var errormsg string = ""
errStrings = append(errStrings, fmt.Sprintf("Error: Pod %s (host: %s) : Container %s was found to have terminated %d times", pod.Name, pod.Spec.Host, status.Name, status.RestartCount)) if status.State.Termination != nil {
errormsg = "status.State.Termination was nil"
} else if status.LastTerminationState.Termination != nil {
errormsg = "status.LastTerminationState.Termination was nil"
} else if status.RestartCount > restartMax {
errormsg = fmt.Sprintf("restarted %d times", restartMax)
}
if len(errormsg) != 0 {
errStrings = append(errStrings, fmt.Sprintf("Error: Pod %s (host: %s) : Container w/ name %s status was bad (%v).", pod.Name, pod.Spec.Host, status.Name, errormsg))
} }
} }
} }