diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index 9eb0ca5f521..6790f895603 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -1007,7 +1007,8 @@ var _ = SIGDescribe("Cluster size autoscaling", framework.WithSlow(), func() { // 70% of allocatable memory of a single node * replica count, forcing a scale up in case of normal pods replicaCount := 2 * nodeCount reservedMemory := int(float64(replicaCount) * float64(0.7) * float64(memAllocatableMb)) - ginkgo.DeferCleanup(ReserveMemoryWithSchedulerName(ctx, f, "memory-reservation", replicaCount, reservedMemory, false, 1, nonExistingBypassedSchedulerName)) + cleanupFunc := ReserveMemoryWithSchedulerName(ctx, f, "memory-reservation", replicaCount, reservedMemory, false, 1, nonExistingBypassedSchedulerName) + defer cleanupFunc() // Verify that cluster size is increased ginkgo.By("Waiting for cluster scale-up") sizeFunc := func(size int) bool { @@ -1020,7 +1021,8 @@ var _ = SIGDescribe("Cluster size autoscaling", framework.WithSlow(), func() { // 50% of allocatable memory of a single node, so that no scale up would trigger in normal cases replicaCount := 1 reservedMemory := int(float64(0.5) * float64(memAllocatableMb)) - ginkgo.DeferCleanup(ReserveMemoryWithSchedulerName(ctx, f, "memory-reservation", replicaCount, reservedMemory, false, 1, nonExistingBypassedSchedulerName)) + cleanupFunc := ReserveMemoryWithSchedulerName(ctx, f, "memory-reservation", replicaCount, reservedMemory, false, 1, nonExistingBypassedSchedulerName) + defer cleanupFunc() // Verify that cluster size is the same ginkgo.By(fmt.Sprintf("Waiting for scale up hoping it won't happen, polling cluster size for %s", scaleUpTimeout.String())) sizeFunc := func(size int) bool { @@ -1035,7 +1037,8 @@ var _ = SIGDescribe("Cluster size autoscaling", framework.WithSlow(), func() { replicaCount := 2 * nodeCount reservedMemory := int(float64(replicaCount) * float64(0.7) * float64(memAllocatableMb)) schedulerName := "non-existent-scheduler-" + f.UniqueName - ginkgo.DeferCleanup(ReserveMemoryWithSchedulerName(ctx, f, "memory-reservation", replicaCount, reservedMemory, false, 1, schedulerName)) + cleanupFunc := ReserveMemoryWithSchedulerName(ctx, f, "memory-reservation", replicaCount, reservedMemory, false, 1, schedulerName) + defer cleanupFunc() // Verify that cluster size is the same ginkgo.By(fmt.Sprintf("Waiting for scale up hoping it won't happen, polling cluster size for %s", scaleUpTimeout.String())) sizeFunc := func(size int) bool {