Merge pull request #111253 from piotrnosek/hpa-tests-1

Add e2e HPA Behavior tests: scale down stabilisation, scale up disabled, scale down disabled
This commit is contained in:
Kubernetes Prow Robot 2022-07-21 12:59:42 -07:00 committed by GitHub
commit f93a37e314
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 187 additions and 22 deletions

View File

@ -30,6 +30,21 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
f := framework.NewDefaultFramework("horizontal-pod-autoscaling") f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
fullWindowOfNewUsage := 30 * time.Second
windowWithOldUsagePasses := 30 * time.Second
newPodMetricsDelay := 15 * time.Second
metricsAvailableDelay := fullWindowOfNewUsage + windowWithOldUsagePasses + newPodMetricsDelay
hpaReconciliationInterval := 15 * time.Second
actuationDelay := 10 * time.Second
maxHPAReactionTime := metricsAvailableDelay + hpaReconciliationInterval + actuationDelay
maxConsumeCPUDelay := 30 * time.Second
waitForReplicasPollInterval := 20 * time.Second
maxResourceConsumerDelay := maxConsumeCPUDelay + waitForReplicasPollInterval
waitBuffer := 1 * time.Minute
ginkgo.Describe("with short downscale stabilization window", func() { ginkgo.Describe("with short downscale stabilization window", func() {
ginkgo.It("should scale down soon after the stabilization period", func() { ginkgo.It("should scale down soon after the stabilization period", func() {
ginkgo.By("setting up resource consumer and HPA") ginkgo.By("setting up resource consumer and HPA")
@ -38,6 +53,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
usageForSingleReplica := 110 usageForSingleReplica := 110
initPods := 1 initPods := 1
initCPUUsageTotal := initPods * usageForSingleReplica initCPUUsageTotal := initPods * usageForSingleReplica
upScaleStabilization := 0 * time.Minute
downScaleStabilization := 1 * time.Minute downScaleStabilization := 1 * time.Minute
rc := e2eautoscaling.NewDynamicResourceConsumer( rc := e2eautoscaling.NewDynamicResourceConsumer(
@ -48,25 +64,11 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
defer rc.CleanUp() defer rc.CleanUp()
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 5, int32(downScaleStabilization.Seconds()), rc, int32(targetCPUUtilizationPercent), 1, 5,
e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization),
) )
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name) defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
fullWindowOfNewUsage := 30 * time.Second
windowWithOldUsagePasses := 30 * time.Second
newPodMetricsDelay := 15 * time.Second
metricsAvailableDelay := fullWindowOfNewUsage + windowWithOldUsagePasses + newPodMetricsDelay
hpaReconciliationInterval := 15 * time.Second
actuationDelay := 10 * time.Second
maxHPAReactionTime := metricsAvailableDelay + hpaReconciliationInterval + actuationDelay
maxConsumeCPUDelay := 30 * time.Second
waitForReplicasPollInterval := 20 * time.Second
maxResourceConsumerDelay := maxConsumeCPUDelay + waitForReplicasPollInterval
waitBuffer := 1 * time.Minute
// making sure HPA is ready, doing its job and already has a recommendation recorded // making sure HPA is ready, doing its job and already has a recommendation recorded
// for stabilization logic before lowering the consumption // for stabilization logic before lowering the consumption
ginkgo.By("triggering scale up to record a recommendation") ginkgo.By("triggering scale up to record a recommendation")
@ -86,4 +88,130 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
framework.ExpectEqual(timeWaited < deadline, true, "waited %s, wanted less than %s", timeWaited, deadline) framework.ExpectEqual(timeWaited < deadline, true, "waited %s, wanted less than %s", timeWaited, deadline)
}) })
}) })
ginkgo.Describe("with long upscale stabilization window", func() {
ginkgo.It("should scale up only after the stabilization period", func() {
ginkgo.By("setting up resource consumer and HPA")
podCPURequest := 500
targetCPUUtilizationPercent := 25
usageForSingleReplica := 110
initPods := 2
initCPUUsageTotal := initPods * usageForSingleReplica
upScaleStabilization := 3 * time.Minute
downScaleStabilization := 0 * time.Minute
rc := e2eautoscaling.NewDynamicResourceConsumer(
"consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
// making sure HPA is ready, doing its job and already has a recommendation recorded
// for stabilization logic before increasing the consumption
ginkgo.By("triggering scale down to record a recommendation")
rc.ConsumeCPU(1 * usageForSingleReplica)
rc.WaitForReplicas(1, maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer)
ginkgo.By("triggering scale up by increasing consumption")
rc.ConsumeCPU(3 * usageForSingleReplica)
waitStart := time.Now()
rc.WaitForReplicas(3, upScaleStabilization+maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer)
timeWaited := time.Now().Sub(waitStart)
ginkgo.By("verifying time waited for a scale up")
framework.Logf("time waited for scale up: %s", timeWaited)
framework.ExpectEqual(timeWaited > upScaleStabilization, true, "waited %s, wanted more than %s", timeWaited, upScaleStabilization)
deadline := upScaleStabilization + maxHPAReactionTime + maxResourceConsumerDelay
framework.ExpectEqual(timeWaited < deadline, true, "waited %s, wanted less than %s", timeWaited, deadline)
})
})
ginkgo.Describe("with upscale autoscaling disabled", func() {
ginkgo.It("shouldn't scale up", func() {
ginkgo.By("setting up resource consumer and HPA")
podCPURequest := 500
targetCPUUtilizationPercent := 25
usageForSingleReplica := 110
initPods := 1
initCPUUsageTotal := initPods * usageForSingleReplica
rc := e2eautoscaling.NewDynamicResourceConsumer(
"consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleUpDisabled(),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
waitDeadline := maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer
ginkgo.By("trying to trigger scale up")
rc.ConsumeCPU(8 * usageForSingleReplica)
waitStart := time.Now()
rc.EnsureDesiredReplicasInRange(initPods, initPods, waitDeadline, hpa.Name)
timeWaited := time.Now().Sub(waitStart)
ginkgo.By("verifying time waited for a scale up")
framework.Logf("time waited for scale up: %s", timeWaited)
framework.ExpectEqual(timeWaited > waitDeadline, true, "waited %s, wanted to wait more than %s", timeWaited, waitDeadline)
ginkgo.By("verifying number of replicas")
replicas := rc.GetReplicas()
framework.ExpectEqual(replicas == initPods, true, "had %s replicas, still have %s replicas after time deadline", initPods, replicas)
})
})
ginkgo.Describe("with downscale autoscaling disabled", func() {
ginkgo.It("shouldn't scale down", func() {
ginkgo.By("setting up resource consumer and HPA")
podCPURequest := 500
targetCPUUtilizationPercent := 25
usageForSingleReplica := 110
initPods := 3
initCPUUsageTotal := initPods * usageForSingleReplica
rc := e2eautoscaling.NewDynamicResourceConsumer(
"consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleDownDisabled(),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
defaultDownscaleStabilisation := 5 * time.Minute
waitDeadline := maxHPAReactionTime + maxResourceConsumerDelay + defaultDownscaleStabilisation
ginkgo.By("trying to trigger scale down")
rc.ConsumeCPU(1 * usageForSingleReplica)
waitStart := time.Now()
rc.EnsureDesiredReplicasInRange(initPods, initPods, waitDeadline, hpa.Name)
timeWaited := time.Now().Sub(waitStart)
ginkgo.By("verifying time waited for a scale down")
framework.Logf("time waited for scale down: %s", timeWaited)
framework.ExpectEqual(timeWaited > waitDeadline, true, "waited %s, wanted to wait more than %s", timeWaited, waitDeadline)
ginkgo.By("verifying number of replicas")
replicas := rc.GetReplicas()
framework.ExpectEqual(replicas == initPods, true, "had %s replicas, still have %s replicas after time deadline", initPods, replicas)
})
})
}) })

View File

@ -684,7 +684,7 @@ func DeleteContainerResourceHPA(rc *ResourceConsumer, autoscalerName string) {
rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, metav1.DeleteOptions{}) rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, metav1.DeleteOptions{})
} }
func CreateCPUHorizontalPodAutoscalerWithBehavior(rc *ResourceConsumer, cpu, minReplicas, maxRepl, downscaleStabilizationSeconds int32) *autoscalingv2.HorizontalPodAutoscaler { func CreateCPUHorizontalPodAutoscalerWithBehavior(rc *ResourceConsumer, cpu int32, minReplicas int32, maxRepl int32, behavior *autoscalingv2.HorizontalPodAutoscalerBehavior) *autoscalingv2.HorizontalPodAutoscaler {
hpa := &autoscalingv2.HorizontalPodAutoscaler{ hpa := &autoscalingv2.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: rc.name, Name: rc.name,
@ -710,11 +710,7 @@ func CreateCPUHorizontalPodAutoscalerWithBehavior(rc *ResourceConsumer, cpu, min
}, },
}, },
}, },
Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{ Behavior: behavior,
ScaleDown: &autoscalingv2.HPAScalingRules{
StabilizationWindowSeconds: &downscaleStabilizationSeconds,
},
},
}, },
} }
hpa, errHPA := rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Create(context.TODO(), hpa, metav1.CreateOptions{}) hpa, errHPA := rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Create(context.TODO(), hpa, metav1.CreateOptions{})
@ -722,6 +718,47 @@ func CreateCPUHorizontalPodAutoscalerWithBehavior(rc *ResourceConsumer, cpu, min
return hpa return hpa
} }
func HPABehaviorWithScaleUpAndDownRules(scaleUpRule, scaleDownRule *autoscalingv2.HPAScalingRules) *autoscalingv2.HorizontalPodAutoscalerBehavior {
return &autoscalingv2.HorizontalPodAutoscalerBehavior{
ScaleUp: scaleUpRule,
ScaleDown: scaleDownRule,
}
}
func HPAStabilizationWindowScalingRule(stabilizationDuration int32) *autoscalingv2.HPAScalingRules {
return &autoscalingv2.HPAScalingRules{
StabilizationWindowSeconds: &stabilizationDuration,
}
}
func HPAPolicyDisabledScalingRule() *autoscalingv2.HPAScalingRules {
disabledPolicy := autoscalingv2.DisabledPolicySelect
return &autoscalingv2.HPAScalingRules{
SelectPolicy: &disabledPolicy,
}
}
func HPABehaviorWithStabilizationWindows(upscaleStabilization, downscaleStabilization time.Duration) *autoscalingv2.HorizontalPodAutoscalerBehavior {
return HPABehaviorWithScaleUpAndDownRules(
/*scaleUpRule=*/ HPAStabilizationWindowScalingRule(int32(upscaleStabilization.Seconds())),
/*scaleDownRule=*/ HPAStabilizationWindowScalingRule(int32(downscaleStabilization.Seconds())),
)
}
func HPABehaviorWithScaleUpDisabled() *autoscalingv2.HorizontalPodAutoscalerBehavior {
return HPABehaviorWithScaleUpAndDownRules(
/*scaleUpRule=*/ HPAPolicyDisabledScalingRule(),
/*scaleDownRule=*/ nil,
)
}
func HPABehaviorWithScaleDownDisabled() *autoscalingv2.HorizontalPodAutoscalerBehavior {
return HPABehaviorWithScaleUpAndDownRules(
/*scaleUpRule=*/ nil,
/*scaleDownRule=*/ HPAPolicyDisabledScalingRule(),
)
}
func DeleteHPAWithBehavior(rc *ResourceConsumer, autoscalerName string) { func DeleteHPAWithBehavior(rc *ResourceConsumer, autoscalerName string) {
rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, metav1.DeleteOptions{}) rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, metav1.DeleteOptions{})
} }