mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-25 03:21:15 +00:00
Merge pull request #25255 from ingvagabund/e2e-framework-util-StartPods-panic-for-zero-replicas
Automatic merge from submit-queue e2e.framework.util.StartPods: panic if the number or replicas is zero The number of pods to start must be non-zero. Otherwise the function waits for pods forever if ``waitForRunning`` is true. It the number of replicas is zero, panic so the mistake is heard all over the e2e realm. Update all callers of StartPods to test for non-zero number of replicas.
This commit is contained in:
commit
e0b8595f92
@ -2590,12 +2590,12 @@ func (config *RCConfig) start() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Simplified version of RunRC, that does not create RC, but creates plain Pods.
|
// Simplified version of RunRC, that does not create RC, but creates plain Pods.
|
||||||
// optionally waits for pods to start running (if waitForRunning == true)
|
// Optionally waits for pods to start running (if waitForRunning == true).
|
||||||
|
// The number of replicas must be non-zero.
|
||||||
func StartPods(c *client.Client, replicas int, namespace string, podNamePrefix string, pod api.Pod, waitForRunning bool) {
|
func StartPods(c *client.Client, replicas int, namespace string, podNamePrefix string, pod api.Pod, waitForRunning bool) {
|
||||||
// no pod to start
|
// no pod to start
|
||||||
if replicas < 1 {
|
if replicas < 1 {
|
||||||
Logf("No pod to start, skipping...")
|
panic("StartPods: number of replicas must be non-zero")
|
||||||
return
|
|
||||||
}
|
}
|
||||||
startPodsID := string(util.NewUUID()) // So that we can label and find them
|
startPodsID := string(util.NewUUID()) // So that we can label and find them
|
||||||
for i := 0; i < replicas; i++ {
|
for i := 0; i < replicas; i++ {
|
||||||
|
@ -252,24 +252,29 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
|
|||||||
|
|
||||||
By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation))
|
By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation))
|
||||||
|
|
||||||
framework.StartPods(c, podsNeededForSaturation, ns, "maxp", api.Pod{
|
// As the pods are distributed randomly among nodes,
|
||||||
TypeMeta: unversioned.TypeMeta{
|
// it can easily happen that all nodes are satured
|
||||||
Kind: "Pod",
|
// and there is no need to create additional pods.
|
||||||
},
|
// StartPods requires at least one pod to replicate.
|
||||||
ObjectMeta: api.ObjectMeta{
|
if podsNeededForSaturation > 0 {
|
||||||
Name: "",
|
framework.StartPods(c, podsNeededForSaturation, ns, "maxp", api.Pod{
|
||||||
Labels: map[string]string{"name": ""},
|
TypeMeta: unversioned.TypeMeta{
|
||||||
},
|
Kind: "Pod",
|
||||||
Spec: api.PodSpec{
|
},
|
||||||
Containers: []api.Container{
|
ObjectMeta: api.ObjectMeta{
|
||||||
{
|
Name: "",
|
||||||
Name: "",
|
Labels: map[string]string{"name": ""},
|
||||||
Image: framework.GetPauseImageName(f.Client),
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Containers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: "",
|
||||||
|
Image: framework.GetPauseImageName(f.Client),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
}, true)
|
||||||
}, true)
|
}
|
||||||
|
|
||||||
podName := "additional-pod"
|
podName := "additional-pod"
|
||||||
_, err := c.Pods(ns).Create(&api.Pod{
|
_, err := c.Pods(ns).Create(&api.Pod{
|
||||||
TypeMeta: unversioned.TypeMeta{
|
TypeMeta: unversioned.TypeMeta{
|
||||||
@ -329,32 +334,37 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
|
|||||||
|
|
||||||
By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster CPU and trying to start another one", podsNeededForSaturation))
|
By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster CPU and trying to start another one", podsNeededForSaturation))
|
||||||
|
|
||||||
framework.StartPods(c, podsNeededForSaturation, ns, "overcommit", api.Pod{
|
// As the pods are distributed randomly among nodes,
|
||||||
TypeMeta: unversioned.TypeMeta{
|
// it can easily happen that all nodes are saturated
|
||||||
Kind: "Pod",
|
// and there is no need to create additional pods.
|
||||||
},
|
// StartPods requires at least one pod to replicate.
|
||||||
ObjectMeta: api.ObjectMeta{
|
if podsNeededForSaturation > 0 {
|
||||||
Name: "",
|
framework.StartPods(c, podsNeededForSaturation, ns, "overcommit", api.Pod{
|
||||||
Labels: map[string]string{"name": ""},
|
TypeMeta: unversioned.TypeMeta{
|
||||||
},
|
Kind: "Pod",
|
||||||
Spec: api.PodSpec{
|
},
|
||||||
Containers: []api.Container{
|
ObjectMeta: api.ObjectMeta{
|
||||||
{
|
Name: "",
|
||||||
Name: "",
|
Labels: map[string]string{"name": ""},
|
||||||
Image: framework.GetPauseImageName(f.Client),
|
},
|
||||||
Resources: api.ResourceRequirements{
|
Spec: api.PodSpec{
|
||||||
Limits: api.ResourceList{
|
Containers: []api.Container{
|
||||||
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
|
{
|
||||||
},
|
Name: "",
|
||||||
Requests: api.ResourceList{
|
Image: framework.GetPauseImageName(f.Client),
|
||||||
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
|
Resources: api.ResourceRequirements{
|
||||||
|
Limits: api.ResourceList{
|
||||||
|
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
|
||||||
|
},
|
||||||
|
Requests: api.ResourceList{
|
||||||
|
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
}, true)
|
||||||
}, true)
|
}
|
||||||
|
|
||||||
podName := "additional-pod"
|
podName := "additional-pod"
|
||||||
_, err = c.Pods(ns).Create(&api.Pod{
|
_, err = c.Pods(ns).Create(&api.Pod{
|
||||||
TypeMeta: unversioned.TypeMeta{
|
TypeMeta: unversioned.TypeMeta{
|
||||||
|
@ -93,6 +93,11 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Caution: StartPods requires at least one pod to replicate.
|
||||||
|
// Based on the callers, replicas is always positive number: zoneCount >= 0 implies (2*zoneCount)+1 > 0.
|
||||||
|
// Thus, no need to test for it. Once the precondition changes to zero number of replicas,
|
||||||
|
// test for replicaCount > 0. Otherwise, StartPods panics.
|
||||||
framework.StartPods(f.Client, replicaCount, f.Namespace.Name, serviceName, *podSpec, false)
|
framework.StartPods(f.Client, replicaCount, f.Namespace.Name, serviceName, *podSpec, false)
|
||||||
|
|
||||||
// Wait for all of them to be scheduled
|
// Wait for all of them to be scheduled
|
||||||
|
Loading…
Reference in New Issue
Block a user