Merge pull request #112351 from piotrnosek/fixbehaviortests

Fix flaky and failing HPA E2E Behavior tests
This commit is contained in:
Kubernetes Prow Robot 2022-09-12 10:55:37 -07:00 committed by GitHub
commit 46e1718462
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -209,12 +209,16 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
})
ginkgo.Describe("with scale limited by number of Pods rate", func() {
podCPURequest := 200
targetCPUUtilizationPercent := 25
usageForSingleReplica := 45
ginkgo.It("should scale up no more than given number of Pods per minute", func() {
ginkgo.By("setting up resource consumer and HPA")
initPods := 1
initCPUUsageTotal := initPods * usageForSingleReplica
limitWindowLength := 1 * time.Minute
podsLimitPerMinute := 2
podsLimitPerMinute := 1
rc := e2eautoscaling.NewDynamicResourceConsumer(
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
@ -230,33 +234,33 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
ginkgo.By("triggering scale up by increasing consumption")
rc.ConsumeCPU(5 * usageForSingleReplica)
rc.ConsumeCPU(3 * usageForSingleReplica)
waitStart := time.Now()
rc.WaitForReplicas(2, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor2 := time.Now().Sub(waitStart)
waitStart = time.Now()
rc.WaitForReplicas(3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor3 := time.Now().Sub(waitStart)
waitStart = time.Now()
rc.WaitForReplicas(5, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor5 := time.Now().Sub(waitStart)
ginkgo.By("verifying time waited for a scale up to 3 replicas")
ginkgo.By("verifying time waited for a scale up to 2 replicas")
deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
// First scale event can happen right away, as there were no scale events in the past.
framework.ExpectEqual(timeWaitedFor3 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor3, deadline)
framework.ExpectEqual(timeWaitedFor2 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor2, deadline)
ginkgo.By("verifying time waited for a scale up to 5 replicas")
ginkgo.By("verifying time waited for a scale up to 3 replicas")
// Second scale event needs to respect limit window.
framework.ExpectEqual(timeWaitedFor5 > limitWindowLength, true, "waited %s, wanted to wait more than %s", timeWaitedFor5, limitWindowLength)
framework.ExpectEqual(timeWaitedFor5 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor5, deadline)
framework.ExpectEqual(timeWaitedFor3 > limitWindowLength, true, "waited %s, wanted to wait more than %s", timeWaitedFor3, limitWindowLength)
framework.ExpectEqual(timeWaitedFor3 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor3, deadline)
})
ginkgo.It("should scale down no more than given number of Pods per minute", func() {
ginkgo.By("setting up resource consumer and HPA")
initPods := 6
initPods := 3
initCPUUsageTotal := initPods * usageForSingleReplica
limitWindowLength := 1 * time.Minute
podsLimitPerMinute := 2
podsLimitPerMinute := 1
rc := e2eautoscaling.NewDynamicResourceConsumer(
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
@ -275,29 +279,33 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
rc.ConsumeCPU(1 * usageForSingleReplica)
waitStart := time.Now()
rc.WaitForReplicas(4, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor4 := time.Now().Sub(waitStart)
waitStart = time.Now()
rc.WaitForReplicas(2, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor2 := time.Now().Sub(waitStart)
ginkgo.By("verifying time waited for a scale down to 4 replicas")
deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
// First scale event can happen right away, as there were no scale events in the past.
framework.ExpectEqual(timeWaitedFor4 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor4, deadline)
waitStart = time.Now()
rc.WaitForReplicas(1, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor1 := time.Now().Sub(waitStart)
ginkgo.By("verifying time waited for a scale down to 2 replicas")
// Second scale event needs to respect limit window.
framework.ExpectEqual(timeWaitedFor2 > limitWindowLength, true, "waited %s, wanted more than %s", timeWaitedFor2, limitWindowLength)
deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
// First scale event can happen right away, as there were no scale events in the past.
framework.ExpectEqual(timeWaitedFor2 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor2, deadline)
ginkgo.By("verifying time waited for a scale down to 1 replicas")
// Second scale event needs to respect limit window.
framework.ExpectEqual(timeWaitedFor1 > limitWindowLength, true, "waited %s, wanted more than %s", timeWaitedFor1, limitWindowLength)
framework.ExpectEqual(timeWaitedFor1 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor1, deadline)
})
})
ginkgo.Describe("with scale limited by percentage", func() {
podCPURequest := 200
targetCPUUtilizationPercent := 25
usageForSingleReplica := 45
ginkgo.It("should scale up no more than given percentage of current Pods per minute", func() {
ginkgo.By("setting up resource consumer and HPA")
initPods := 4
initPods := 2
initCPUUsageTotal := initPods * usageForSingleReplica
limitWindowLength := 1 * time.Minute
percentageLimitPerMinute := 50
@ -316,33 +324,34 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
ginkgo.By("triggering scale up by increasing consumption")
rc.ConsumeCPU(10 * usageForSingleReplica)
rc.ConsumeCPU(8 * usageForSingleReplica)
waitStart := time.Now()
rc.WaitForReplicas(6, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor6 := time.Now().Sub(waitStart)
rc.WaitForReplicas(3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor3 := time.Now().Sub(waitStart)
waitStart = time.Now()
rc.WaitForReplicas(9, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor9 := time.Now().Sub(waitStart)
// Scale up limited by percentage takes ceiling, so new replicas number is ceil(3 * 1.5) = ceil(4.5) = 5
rc.WaitForReplicas(5, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor5 := time.Now().Sub(waitStart)
ginkgo.By("verifying time waited for a scale up to 6 replicas")
ginkgo.By("verifying time waited for a scale up to 3 replicas")
deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
// First scale event can happen right away, as there were no scale events in the past.
framework.ExpectEqual(timeWaitedFor6 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor6, deadline)
framework.ExpectEqual(timeWaitedFor3 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor3, deadline)
ginkgo.By("verifying time waited for a scale up to 9 replicas")
ginkgo.By("verifying time waited for a scale up to 5 replicas")
// Second scale event needs to respect limit window.
framework.ExpectEqual(timeWaitedFor9 > limitWindowLength, true, "waited %s, wanted to wait more than %s", timeWaitedFor9, limitWindowLength)
framework.ExpectEqual(timeWaitedFor9 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor9, deadline)
framework.ExpectEqual(timeWaitedFor5 > limitWindowLength, true, "waited %s, wanted to wait more than %s", timeWaitedFor5, limitWindowLength)
framework.ExpectEqual(timeWaitedFor5 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor5, deadline)
})
ginkgo.It("should scale down no more than given percentage of current Pods per minute", func() {
ginkgo.By("setting up resource consumer and HPA")
initPods := 8
initPods := 7
initCPUUsageTotal := initPods * usageForSingleReplica
limitWindowLength := 1 * time.Minute
percentageLimitPerMinute := 50
percentageLimitPerMinute := 25
rc := e2eautoscaling.NewDynamicResourceConsumer(
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
@ -361,26 +370,29 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
rc.ConsumeCPU(1 * usageForSingleReplica)
waitStart := time.Now()
rc.WaitForReplicas(4, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor4 := time.Now().Sub(waitStart)
rc.WaitForReplicas(5, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor5 := time.Now().Sub(waitStart)
waitStart = time.Now()
rc.WaitForReplicas(2, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor2 := time.Now().Sub(waitStart)
// Scale down limited by percentage takes floor, so new replicas number is floor(5 * 0.75) = floor(3.75) = 3
rc.WaitForReplicas(3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor3 := time.Now().Sub(waitStart)
ginkgo.By("verifying time waited for a scale down to 4 replicas")
ginkgo.By("verifying time waited for a scale down to 5 replicas")
deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
// First scale event can happen right away, as there were no scale events in the past.
framework.ExpectEqual(timeWaitedFor4 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor4, deadline)
framework.ExpectEqual(timeWaitedFor5 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor5, deadline)
ginkgo.By("verifying time waited for a scale down to 2 replicas")
ginkgo.By("verifying time waited for a scale down to 3 replicas")
// Second scale event needs to respect limit window.
framework.ExpectEqual(timeWaitedFor2 > limitWindowLength, true, "waited %s, wanted more than %s", timeWaitedFor2, limitWindowLength)
framework.ExpectEqual(timeWaitedFor2 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor2, deadline)
framework.ExpectEqual(timeWaitedFor3 > limitWindowLength, true, "waited %s, wanted more than %s", timeWaitedFor3, limitWindowLength)
framework.ExpectEqual(timeWaitedFor3 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor3, deadline)
})
})
ginkgo.Describe("with both scale up and down controls configured", func() {
waitBuffer := 2 * time.Minute
ginkgo.It("should keep recommendation within the range over two stabilization windows", func() {
ginkgo.By("setting up resource consumer and HPA")
initPods := 2
@ -396,13 +408,13 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
defer rc.CleanUp()
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 2, 10,
rc, int32(targetCPUUtilizationPercent), 2, 5,
e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
ginkgo.By("triggering scale up by increasing consumption")
rc.ConsumeCPU(5 * usageForSingleReplica)
rc.ConsumeCPU(4 * usageForSingleReplica)
waitDeadline := upScaleStabilization
ginkgo.By("verifying number of replicas stay in desired range within stabilisation window")
@ -450,7 +462,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
scaleUpRule := e2eautoscaling.HPAScalingRuleWithScalingPolicy(autoscalingv2.PodsScalingPolicy, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds()))
scaleDownRule := e2eautoscaling.HPAScalingRuleWithStabilizationWindow(int32(downScaleStabilization.Seconds()))
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 2, 10,
rc, int32(targetCPUUtilizationPercent), 2, 5,
e2eautoscaling.HPABehaviorWithScaleUpAndDownRules(scaleUpRule, scaleDownRule),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)