diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index d7ae4971c8c..293632a72de 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -2786,7 +2786,8 @@ func (config *RCConfig) start() error { // Simplified version of RunRC, that does not create RC, but creates plain Pods. // Optionally waits for pods to start running (if waitForRunning == true). // The number of replicas must be non-zero. -func StartPods(c *client.Client, replicas int, namespace string, podNamePrefix string, pod api.Pod, waitForRunning bool) { +func StartPods(c *client.Client, replicas int, namespace string, podNamePrefix string, + pod api.Pod, waitForRunning bool, logFunc func(fmt string, args ...interface{})) error { // no pod to start if replicas < 1 { panic("StartPods: number of replicas must be non-zero") @@ -2799,14 +2800,15 @@ func StartPods(c *client.Client, replicas int, namespace string, podNamePrefix s pod.ObjectMeta.Labels["startPodsID"] = startPodsID pod.Spec.Containers[0].Name = podName _, err := c.Pods(namespace).Create(&pod) - ExpectNoError(err) + return err } - Logf("Waiting for running...") + logFunc("Waiting for running...") if waitForRunning { label := labels.SelectorFromSet(labels.Set(map[string]string{"startPodsID": startPodsID})) err := WaitForPodsWithLabelRunning(c, namespace, label) - ExpectNoError(err, "Error waiting for %d pods to be running - probably a timeout", replicas) + return fmt.Errorf("Error waiting for %d pods to be running - probably a timeout: %v", replicas, err) } + return nil } type EventsLister func(opts v1.ListOptions, ns string) (*v1.EventList, error) diff --git a/test/e2e/scheduler_predicates.go b/test/e2e/scheduler_predicates.go index fe96f77fc9d..3c23fecbb73 100644 --- a/test/e2e/scheduler_predicates.go +++ b/test/e2e/scheduler_predicates.go @@ -126,11 +126,11 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // and there is no need to create additional pods. // StartPods requires at least one pod to replicate. if podsNeededForSaturation > 0 { - framework.StartPods(c, podsNeededForSaturation, ns, "maxp", + framework.ExpectNoError(framework.StartPods(c, podsNeededForSaturation, ns, "maxp", *initPausePod(f, pausePodConfig{ Name: "", Labels: map[string]string{"name": ""}, - }), true) + }), true, framework.Logf)) } podName := "additional-pod" createPausePod(f, pausePodConfig{ @@ -187,7 +187,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // and there is no need to create additional pods. // StartPods requires at least one pod to replicate. if podsNeededForSaturation > 0 { - framework.StartPods(c, podsNeededForSaturation, ns, "overcommit", + framework.ExpectNoError(framework.StartPods(c, podsNeededForSaturation, ns, "overcommit", *initPausePod(f, pausePodConfig{ Name: "", Labels: map[string]string{"name": ""}, @@ -199,7 +199,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { "cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"), }, }, - }), true) + }), true, framework.Logf)) } podName := "additional-pod" createPausePod(f, pausePodConfig{ diff --git a/test/e2e/ubernetes_lite.go b/test/e2e/ubernetes_lite.go index db4ce08660b..37c34930e42 100644 --- a/test/e2e/ubernetes_lite.go +++ b/test/e2e/ubernetes_lite.go @@ -98,7 +98,7 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) // Based on the callers, replicas is always positive number: zoneCount >= 0 implies (2*zoneCount)+1 > 0. // Thus, no need to test for it. Once the precondition changes to zero number of replicas, // test for replicaCount > 0. Otherwise, StartPods panics. - framework.StartPods(f.Client, replicaCount, f.Namespace.Name, serviceName, *podSpec, false) + framework.ExpectNoError(framework.StartPods(f.Client, replicaCount, f.Namespace.Name, serviceName, *podSpec, false, framework.Logf)) // Wait for all of them to be scheduled selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName}))