Merge pull request #131211 from BenTheElder/max-pods

fix flaky garbage collector tests
This commit is contained in:
Kubernetes Prow Robot 2025-04-14 08:19:06 -07:00 committed by GitHub
commit d6e3f34f70
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 18 additions and 9 deletions

View File

@ -511,7 +511,7 @@
file: test/e2e/apimachinery/garbage_collector.go file: test/e2e/apimachinery/garbage_collector.go
- testname: Garbage Collector, delete replication controller, after owned pods - testname: Garbage Collector, delete replication controller, after owned pods
codename: '[sig-api-machinery] Garbage collector should keep the rc around until codename: '[sig-api-machinery] Garbage collector should keep the rc around until
all its pods are deleted if the deleteOptions says so [Conformance]' all its pods are deleted if the deleteOptions says so [Serial] [Conformance]'
description: Create a replication controller with maximum allocatable Pods between description: Create a replication controller with maximum allocatable Pods between
10 and 100 replicas. Once RC is created and the all Pods are created, delete RC 10 and 100 replicas. Once RC is created and the all Pods are created, delete RC
with deleteOptions.PropagationPolicy set to Foreground. Deleting the Replication with deleteOptions.PropagationPolicy set to Foreground. Deleting the Replication
@ -528,7 +528,8 @@
file: test/e2e/apimachinery/garbage_collector.go file: test/e2e/apimachinery/garbage_collector.go
- testname: Garbage Collector, multiple owners - testname: Garbage Collector, multiple owners
codename: '[sig-api-machinery] Garbage collector should not delete dependents that codename: '[sig-api-machinery] Garbage collector should not delete dependents that
have both valid owner and owner that''s waiting for dependents to be deleted [Conformance]' have both valid owner and owner that''s waiting for dependents to be deleted [Serial]
[Conformance]'
description: Create a replication controller RC1, with maximum allocatable Pods description: Create a replication controller RC1, with maximum allocatable Pods
between 10 and 100 replicas. Create second replication controller RC2 and set between 10 and 100 replicas. Create second replication controller RC2 and set
RC2 as owner for half of those replicas. Once RC1 is created and the all Pods RC2 as owner for half of those replicas. Once RC1 is created and the all Pods
@ -549,7 +550,7 @@
file: test/e2e/apimachinery/garbage_collector.go file: test/e2e/apimachinery/garbage_collector.go
- testname: Garbage Collector, delete replication controller, propagation policy orphan - testname: Garbage Collector, delete replication controller, propagation policy orphan
codename: '[sig-api-machinery] Garbage collector should orphan pods created by rc codename: '[sig-api-machinery] Garbage collector should orphan pods created by rc
if delete options say so [Conformance]' if delete options say so [Serial] [Conformance]'
description: Create a replication controller with maximum allocatable Pods between description: Create a replication controller with maximum allocatable Pods between
10 and 100 replicas. Once RC is created and the all Pods are created, delete RC 10 and 100 replicas. Once RC is created and the all Pods are created, delete RC
with deleteOptions.PropagationPolicy set to Orphan. Deleting the Replication Controller with deleteOptions.PropagationPolicy set to Orphan. Deleting the Replication Controller

View File

@ -54,23 +54,31 @@ import (
// estimateMaximumPods estimates how many pods the cluster can handle // estimateMaximumPods estimates how many pods the cluster can handle
// with some wiggle room, to prevent pods being unable to schedule due // with some wiggle room, to prevent pods being unable to schedule due
// to max pod constraints. // to max pod constraints.
//
// Tests that call this should use framework.WithSerial() because they're not
// safe to run concurrently as they consume a large number of pods.
func estimateMaximumPods(ctx context.Context, c clientset.Interface, min, max int32) int32 { func estimateMaximumPods(ctx context.Context, c clientset.Interface, min, max int32) int32 {
nodes, err := e2enode.GetReadySchedulableNodes(ctx, c) nodes, err := e2enode.GetReadySchedulableNodes(ctx, c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
availablePods := int32(0) availablePods := int32(0)
// estimate some reasonable overhead per-node for pods that are non-test
const daemonSetReservedPods = 10
for _, node := range nodes.Items { for _, node := range nodes.Items {
if q, ok := node.Status.Allocatable["pods"]; ok { if q, ok := node.Status.Allocatable["pods"]; ok {
if num, ok := q.AsInt64(); ok { if num, ok := q.AsInt64(); ok {
availablePods += int32(num) if num > daemonSetReservedPods {
availablePods += int32(num - daemonSetReservedPods)
}
continue continue
} }
} }
// best guess per node, since default maxPerCore is 10 and most nodes have at least // Only when we fail to obtain the number, we fall back to a best guess
// per node. Since default maxPerCore is 10 and most nodes have at least
// one core. // one core.
availablePods += 10 availablePods += 10
} }
//avoid creating exactly max pods // avoid creating exactly max pods
availablePods = int32(float32(availablePods) * 0.5) availablePods = int32(float32(availablePods) * 0.5)
// bound the top and bottom // bound the top and bottom
if availablePods > max { if availablePods > max {
@ -377,7 +385,7 @@ var _ = SIGDescribe("Garbage collector", func() {
Testname: Garbage Collector, delete replication controller, propagation policy orphan Testname: Garbage Collector, delete replication controller, propagation policy orphan
Description: Create a replication controller with maximum allocatable Pods between 10 and 100 replicas. Once RC is created and the all Pods are created, delete RC with deleteOptions.PropagationPolicy set to Orphan. Deleting the Replication Controller MUST cause pods created by that RC to be orphaned. Description: Create a replication controller with maximum allocatable Pods between 10 and 100 replicas. Once RC is created and the all Pods are created, delete RC with deleteOptions.PropagationPolicy set to Orphan. Deleting the Replication Controller MUST cause pods created by that RC to be orphaned.
*/ */
framework.ConformanceIt("should orphan pods created by rc if delete options say so", func(ctx context.Context) { framework.ConformanceIt("should orphan pods created by rc if delete options say so", framework.WithSerial(), func(ctx context.Context) {
clientSet := f.ClientSet clientSet := f.ClientSet
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name) rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.CoreV1().Pods(f.Namespace.Name) podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
@ -636,7 +644,7 @@ var _ = SIGDescribe("Garbage collector", func() {
Testname: Garbage Collector, delete replication controller, after owned pods Testname: Garbage Collector, delete replication controller, after owned pods
Description: Create a replication controller with maximum allocatable Pods between 10 and 100 replicas. Once RC is created and the all Pods are created, delete RC with deleteOptions.PropagationPolicy set to Foreground. Deleting the Replication Controller MUST cause pods created by that RC to be deleted before the RC is deleted. Description: Create a replication controller with maximum allocatable Pods between 10 and 100 replicas. Once RC is created and the all Pods are created, delete RC with deleteOptions.PropagationPolicy set to Foreground. Deleting the Replication Controller MUST cause pods created by that RC to be deleted before the RC is deleted.
*/ */
framework.ConformanceIt("should keep the rc around until all its pods are deleted if the deleteOptions says so", func(ctx context.Context) { framework.ConformanceIt("should keep the rc around until all its pods are deleted if the deleteOptions says so", framework.WithSerial(), func(ctx context.Context) {
clientSet := f.ClientSet clientSet := f.ClientSet
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name) rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.CoreV1().Pods(f.Namespace.Name) podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
@ -711,7 +719,7 @@ var _ = SIGDescribe("Garbage collector", func() {
Testname: Garbage Collector, multiple owners Testname: Garbage Collector, multiple owners
Description: Create a replication controller RC1, with maximum allocatable Pods between 10 and 100 replicas. Create second replication controller RC2 and set RC2 as owner for half of those replicas. Once RC1 is created and the all Pods are created, delete RC1 with deleteOptions.PropagationPolicy set to Foreground. Half of the Pods that has RC2 as owner MUST not be deleted or have a deletion timestamp. Deleting the Replication Controller MUST not delete Pods that are owned by multiple replication controllers. Description: Create a replication controller RC1, with maximum allocatable Pods between 10 and 100 replicas. Create second replication controller RC2 and set RC2 as owner for half of those replicas. Once RC1 is created and the all Pods are created, delete RC1 with deleteOptions.PropagationPolicy set to Foreground. Half of the Pods that has RC2 as owner MUST not be deleted or have a deletion timestamp. Deleting the Replication Controller MUST not delete Pods that are owned by multiple replication controllers.
*/ */
framework.ConformanceIt("should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted", func(ctx context.Context) { framework.ConformanceIt("should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted", framework.WithSerial(), func(ctx context.Context) {
clientSet := f.ClientSet clientSet := f.ClientSet
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name) rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.CoreV1().Pods(f.Namespace.Name) podClient := clientSet.CoreV1().Pods(f.Namespace.Name)