From d9f3e3c3ad70343ba76bd95d374e7a6b8f13d5b4 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Fri, 6 May 2016 11:58:35 +0200 Subject: [PATCH] e2e.framework.util.StartPods: The number of pods to start must be non-zero. Otherwise the function waits for pods forever if waitForRunning is true. It the number of replicas is zero, panic so the mistake is heard all over the e2e realm. Update all callers of StartPods to test for non-zero number of replicas. --- test/e2e/framework/util.go | 6 +-- test/e2e/scheduler_predicates.go | 86 ++++++++++++++++++-------------- test/e2e/ubernetes_lite.go | 5 ++ 3 files changed, 56 insertions(+), 41 deletions(-) diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 0071406da7c..9852c9d3870 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -2471,12 +2471,12 @@ func (config *RCConfig) start() error { } // Simplified version of RunRC, that does not create RC, but creates plain Pods. -// optionally waits for pods to start running (if waitForRunning == true) +// Optionally waits for pods to start running (if waitForRunning == true). +// The number of replicas must be non-zero. func StartPods(c *client.Client, replicas int, namespace string, podNamePrefix string, pod api.Pod, waitForRunning bool) { // no pod to start if replicas < 1 { - Logf("No pod to start, skipping...") - return + panic("StartPods: number of replicas must be non-zero") } startPodsID := string(util.NewUUID()) // So that we can label and find them for i := 0; i < replicas; i++ { diff --git a/test/e2e/scheduler_predicates.go b/test/e2e/scheduler_predicates.go index a822c4be6e4..0849cb1fc67 100644 --- a/test/e2e/scheduler_predicates.go +++ b/test/e2e/scheduler_predicates.go @@ -235,24 +235,29 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation)) - framework.StartPods(c, podsNeededForSaturation, ns, "maxp", api.Pod{ - TypeMeta: unversioned.TypeMeta{ - Kind: "Pod", - }, - ObjectMeta: api.ObjectMeta{ - Name: "", - Labels: map[string]string{"name": ""}, - }, - Spec: api.PodSpec{ - Containers: []api.Container{ - { - Name: "", - Image: framework.GetPauseImageName(f.Client), + // As the pods are distributed randomly among nodes, + // it can easily happen that all nodes are satured + // and there is no need to create additional pods. + // StartPods requires at least one pod to replicate. + if podsNeededForSaturation > 0 { + framework.StartPods(c, podsNeededForSaturation, ns, "maxp", api.Pod{ + TypeMeta: unversioned.TypeMeta{ + Kind: "Pod", + }, + ObjectMeta: api.ObjectMeta{ + Name: "", + Labels: map[string]string{"name": ""}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "", + Image: framework.GetPauseImageName(f.Client), + }, }, }, - }, - }, true) - + }, true) + } podName := "additional-pod" _, err := c.Pods(ns).Create(&api.Pod{ TypeMeta: unversioned.TypeMeta{ @@ -312,32 +317,37 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster CPU and trying to start another one", podsNeededForSaturation)) - framework.StartPods(c, podsNeededForSaturation, ns, "overcommit", api.Pod{ - TypeMeta: unversioned.TypeMeta{ - Kind: "Pod", - }, - ObjectMeta: api.ObjectMeta{ - Name: "", - Labels: map[string]string{"name": ""}, - }, - Spec: api.PodSpec{ - Containers: []api.Container{ - { - Name: "", - Image: framework.GetPauseImageName(f.Client), - Resources: api.ResourceRequirements{ - Limits: api.ResourceList{ - "cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"), - }, - Requests: api.ResourceList{ - "cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"), + // As the pods are distributed randomly among nodes, + // it can easily happen that all nodes are saturated + // and there is no need to create additional pods. + // StartPods requires at least one pod to replicate. + if podsNeededForSaturation > 0 { + framework.StartPods(c, podsNeededForSaturation, ns, "overcommit", api.Pod{ + TypeMeta: unversioned.TypeMeta{ + Kind: "Pod", + }, + ObjectMeta: api.ObjectMeta{ + Name: "", + Labels: map[string]string{"name": ""}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "", + Image: framework.GetPauseImageName(f.Client), + Resources: api.ResourceRequirements{ + Limits: api.ResourceList{ + "cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"), + }, + Requests: api.ResourceList{ + "cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"), + }, }, }, }, }, - }, - }, true) - + }, true) + } podName := "additional-pod" _, err = c.Pods(ns).Create(&api.Pod{ TypeMeta: unversioned.TypeMeta{ diff --git a/test/e2e/ubernetes_lite.go b/test/e2e/ubernetes_lite.go index c3368c79706..5584a507944 100644 --- a/test/e2e/ubernetes_lite.go +++ b/test/e2e/ubernetes_lite.go @@ -93,6 +93,11 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) }, }, } + + // Caution: StartPods requires at least one pod to replicate. + // Based on the callers, replicas is always positive number: zoneCount >= 0 implies (2*zoneCount)+1 > 0. + // Thus, no need to test for it. Once the precondition changes to zero number of replicas, + // test for replicaCount > 0. Otherwise, StartPods panics. framework.StartPods(f.Client, replicaCount, f.Namespace.Name, serviceName, *podSpec, false) // Wait for all of them to be scheduled