mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-06 18:54:06 +00:00
e2e.framework.util.StartPods:
The number of pods to start must be non-zero. Otherwise the function waits for pods forever if waitForRunning is true. It the number of replicas is zero, panic so the mistake is heard all over the e2e realm. Update all callers of StartPods to test for non-zero number of replicas.
This commit is contained in:
parent
970104df31
commit
d9f3e3c3ad
@ -2471,12 +2471,12 @@ func (config *RCConfig) start() error {
|
||||
}
|
||||
|
||||
// Simplified version of RunRC, that does not create RC, but creates plain Pods.
|
||||
// optionally waits for pods to start running (if waitForRunning == true)
|
||||
// Optionally waits for pods to start running (if waitForRunning == true).
|
||||
// The number of replicas must be non-zero.
|
||||
func StartPods(c *client.Client, replicas int, namespace string, podNamePrefix string, pod api.Pod, waitForRunning bool) {
|
||||
// no pod to start
|
||||
if replicas < 1 {
|
||||
Logf("No pod to start, skipping...")
|
||||
return
|
||||
panic("StartPods: number of replicas must be non-zero")
|
||||
}
|
||||
startPodsID := string(util.NewUUID()) // So that we can label and find them
|
||||
for i := 0; i < replicas; i++ {
|
||||
|
@ -235,24 +235,29 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
|
||||
|
||||
By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation))
|
||||
|
||||
framework.StartPods(c, podsNeededForSaturation, ns, "maxp", api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "",
|
||||
Labels: map[string]string{"name": ""},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "",
|
||||
Image: framework.GetPauseImageName(f.Client),
|
||||
// As the pods are distributed randomly among nodes,
|
||||
// it can easily happen that all nodes are satured
|
||||
// and there is no need to create additional pods.
|
||||
// StartPods requires at least one pod to replicate.
|
||||
if podsNeededForSaturation > 0 {
|
||||
framework.StartPods(c, podsNeededForSaturation, ns, "maxp", api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "",
|
||||
Labels: map[string]string{"name": ""},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "",
|
||||
Image: framework.GetPauseImageName(f.Client),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, true)
|
||||
|
||||
}, true)
|
||||
}
|
||||
podName := "additional-pod"
|
||||
_, err := c.Pods(ns).Create(&api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
@ -312,32 +317,37 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
|
||||
|
||||
By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster CPU and trying to start another one", podsNeededForSaturation))
|
||||
|
||||
framework.StartPods(c, podsNeededForSaturation, ns, "overcommit", api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "",
|
||||
Labels: map[string]string{"name": ""},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "",
|
||||
Image: framework.GetPauseImageName(f.Client),
|
||||
Resources: api.ResourceRequirements{
|
||||
Limits: api.ResourceList{
|
||||
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
|
||||
},
|
||||
Requests: api.ResourceList{
|
||||
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
|
||||
// As the pods are distributed randomly among nodes,
|
||||
// it can easily happen that all nodes are saturated
|
||||
// and there is no need to create additional pods.
|
||||
// StartPods requires at least one pod to replicate.
|
||||
if podsNeededForSaturation > 0 {
|
||||
framework.StartPods(c, podsNeededForSaturation, ns, "overcommit", api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "",
|
||||
Labels: map[string]string{"name": ""},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "",
|
||||
Image: framework.GetPauseImageName(f.Client),
|
||||
Resources: api.ResourceRequirements{
|
||||
Limits: api.ResourceList{
|
||||
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
|
||||
},
|
||||
Requests: api.ResourceList{
|
||||
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, true)
|
||||
|
||||
}, true)
|
||||
}
|
||||
podName := "additional-pod"
|
||||
_, err = c.Pods(ns).Create(&api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
|
@ -93,6 +93,11 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Caution: StartPods requires at least one pod to replicate.
|
||||
// Based on the callers, replicas is always positive number: zoneCount >= 0 implies (2*zoneCount)+1 > 0.
|
||||
// Thus, no need to test for it. Once the precondition changes to zero number of replicas,
|
||||
// test for replicaCount > 0. Otherwise, StartPods panics.
|
||||
framework.StartPods(f.Client, replicaCount, f.Namespace.Name, serviceName, *podSpec, false)
|
||||
|
||||
// Wait for all of them to be scheduled
|
||||
|
Loading…
Reference in New Issue
Block a user