mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Merge pull request #111253 from piotrnosek/hpa-tests-1
Add e2e HPA Behavior tests: scale down stabilisation, scale up disabled, scale down disabled
This commit is contained in:
commit
f93a37e314
@ -30,6 +30,21 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
|
||||
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
|
||||
fullWindowOfNewUsage := 30 * time.Second
|
||||
windowWithOldUsagePasses := 30 * time.Second
|
||||
newPodMetricsDelay := 15 * time.Second
|
||||
metricsAvailableDelay := fullWindowOfNewUsage + windowWithOldUsagePasses + newPodMetricsDelay
|
||||
|
||||
hpaReconciliationInterval := 15 * time.Second
|
||||
actuationDelay := 10 * time.Second
|
||||
maxHPAReactionTime := metricsAvailableDelay + hpaReconciliationInterval + actuationDelay
|
||||
|
||||
maxConsumeCPUDelay := 30 * time.Second
|
||||
waitForReplicasPollInterval := 20 * time.Second
|
||||
maxResourceConsumerDelay := maxConsumeCPUDelay + waitForReplicasPollInterval
|
||||
|
||||
waitBuffer := 1 * time.Minute
|
||||
|
||||
ginkgo.Describe("with short downscale stabilization window", func() {
|
||||
ginkgo.It("should scale down soon after the stabilization period", func() {
|
||||
ginkgo.By("setting up resource consumer and HPA")
|
||||
@ -38,6 +53,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
|
||||
usageForSingleReplica := 110
|
||||
initPods := 1
|
||||
initCPUUsageTotal := initPods * usageForSingleReplica
|
||||
upScaleStabilization := 0 * time.Minute
|
||||
downScaleStabilization := 1 * time.Minute
|
||||
|
||||
rc := e2eautoscaling.NewDynamicResourceConsumer(
|
||||
@ -48,25 +64,11 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
|
||||
defer rc.CleanUp()
|
||||
|
||||
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
|
||||
rc, int32(targetCPUUtilizationPercent), 1, 5, int32(downScaleStabilization.Seconds()),
|
||||
rc, int32(targetCPUUtilizationPercent), 1, 5,
|
||||
e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization),
|
||||
)
|
||||
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
|
||||
|
||||
fullWindowOfNewUsage := 30 * time.Second
|
||||
windowWithOldUsagePasses := 30 * time.Second
|
||||
newPodMetricsDelay := 15 * time.Second
|
||||
metricsAvailableDelay := fullWindowOfNewUsage + windowWithOldUsagePasses + newPodMetricsDelay
|
||||
|
||||
hpaReconciliationInterval := 15 * time.Second
|
||||
actuationDelay := 10 * time.Second
|
||||
maxHPAReactionTime := metricsAvailableDelay + hpaReconciliationInterval + actuationDelay
|
||||
|
||||
maxConsumeCPUDelay := 30 * time.Second
|
||||
waitForReplicasPollInterval := 20 * time.Second
|
||||
maxResourceConsumerDelay := maxConsumeCPUDelay + waitForReplicasPollInterval
|
||||
|
||||
waitBuffer := 1 * time.Minute
|
||||
|
||||
// making sure HPA is ready, doing its job and already has a recommendation recorded
|
||||
// for stabilization logic before lowering the consumption
|
||||
ginkgo.By("triggering scale up to record a recommendation")
|
||||
@ -86,4 +88,130 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
|
||||
framework.ExpectEqual(timeWaited < deadline, true, "waited %s, wanted less than %s", timeWaited, deadline)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Describe("with long upscale stabilization window", func() {
|
||||
ginkgo.It("should scale up only after the stabilization period", func() {
|
||||
ginkgo.By("setting up resource consumer and HPA")
|
||||
podCPURequest := 500
|
||||
targetCPUUtilizationPercent := 25
|
||||
usageForSingleReplica := 110
|
||||
initPods := 2
|
||||
initCPUUsageTotal := initPods * usageForSingleReplica
|
||||
upScaleStabilization := 3 * time.Minute
|
||||
downScaleStabilization := 0 * time.Minute
|
||||
|
||||
rc := e2eautoscaling.NewDynamicResourceConsumer(
|
||||
"consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
|
||||
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
|
||||
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
|
||||
)
|
||||
defer rc.CleanUp()
|
||||
|
||||
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
|
||||
rc, int32(targetCPUUtilizationPercent), 1, 10,
|
||||
e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization),
|
||||
)
|
||||
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
|
||||
|
||||
// making sure HPA is ready, doing its job and already has a recommendation recorded
|
||||
// for stabilization logic before increasing the consumption
|
||||
ginkgo.By("triggering scale down to record a recommendation")
|
||||
rc.ConsumeCPU(1 * usageForSingleReplica)
|
||||
rc.WaitForReplicas(1, maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer)
|
||||
|
||||
ginkgo.By("triggering scale up by increasing consumption")
|
||||
rc.ConsumeCPU(3 * usageForSingleReplica)
|
||||
waitStart := time.Now()
|
||||
rc.WaitForReplicas(3, upScaleStabilization+maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer)
|
||||
timeWaited := time.Now().Sub(waitStart)
|
||||
|
||||
ginkgo.By("verifying time waited for a scale up")
|
||||
framework.Logf("time waited for scale up: %s", timeWaited)
|
||||
framework.ExpectEqual(timeWaited > upScaleStabilization, true, "waited %s, wanted more than %s", timeWaited, upScaleStabilization)
|
||||
deadline := upScaleStabilization + maxHPAReactionTime + maxResourceConsumerDelay
|
||||
framework.ExpectEqual(timeWaited < deadline, true, "waited %s, wanted less than %s", timeWaited, deadline)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Describe("with upscale autoscaling disabled", func() {
|
||||
ginkgo.It("shouldn't scale up", func() {
|
||||
ginkgo.By("setting up resource consumer and HPA")
|
||||
podCPURequest := 500
|
||||
targetCPUUtilizationPercent := 25
|
||||
usageForSingleReplica := 110
|
||||
initPods := 1
|
||||
initCPUUsageTotal := initPods * usageForSingleReplica
|
||||
|
||||
rc := e2eautoscaling.NewDynamicResourceConsumer(
|
||||
"consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
|
||||
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
|
||||
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
|
||||
)
|
||||
defer rc.CleanUp()
|
||||
|
||||
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
|
||||
rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleUpDisabled(),
|
||||
)
|
||||
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
|
||||
|
||||
waitDeadline := maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer
|
||||
|
||||
ginkgo.By("trying to trigger scale up")
|
||||
rc.ConsumeCPU(8 * usageForSingleReplica)
|
||||
waitStart := time.Now()
|
||||
|
||||
rc.EnsureDesiredReplicasInRange(initPods, initPods, waitDeadline, hpa.Name)
|
||||
timeWaited := time.Now().Sub(waitStart)
|
||||
|
||||
ginkgo.By("verifying time waited for a scale up")
|
||||
framework.Logf("time waited for scale up: %s", timeWaited)
|
||||
framework.ExpectEqual(timeWaited > waitDeadline, true, "waited %s, wanted to wait more than %s", timeWaited, waitDeadline)
|
||||
|
||||
ginkgo.By("verifying number of replicas")
|
||||
replicas := rc.GetReplicas()
|
||||
framework.ExpectEqual(replicas == initPods, true, "had %s replicas, still have %s replicas after time deadline", initPods, replicas)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Describe("with downscale autoscaling disabled", func() {
|
||||
ginkgo.It("shouldn't scale down", func() {
|
||||
ginkgo.By("setting up resource consumer and HPA")
|
||||
podCPURequest := 500
|
||||
targetCPUUtilizationPercent := 25
|
||||
usageForSingleReplica := 110
|
||||
initPods := 3
|
||||
initCPUUsageTotal := initPods * usageForSingleReplica
|
||||
|
||||
rc := e2eautoscaling.NewDynamicResourceConsumer(
|
||||
"consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
|
||||
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
|
||||
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
|
||||
)
|
||||
defer rc.CleanUp()
|
||||
|
||||
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
|
||||
rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleDownDisabled(),
|
||||
)
|
||||
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
|
||||
|
||||
defaultDownscaleStabilisation := 5 * time.Minute
|
||||
waitDeadline := maxHPAReactionTime + maxResourceConsumerDelay + defaultDownscaleStabilisation
|
||||
|
||||
ginkgo.By("trying to trigger scale down")
|
||||
rc.ConsumeCPU(1 * usageForSingleReplica)
|
||||
waitStart := time.Now()
|
||||
|
||||
rc.EnsureDesiredReplicasInRange(initPods, initPods, waitDeadline, hpa.Name)
|
||||
timeWaited := time.Now().Sub(waitStart)
|
||||
|
||||
ginkgo.By("verifying time waited for a scale down")
|
||||
framework.Logf("time waited for scale down: %s", timeWaited)
|
||||
framework.ExpectEqual(timeWaited > waitDeadline, true, "waited %s, wanted to wait more than %s", timeWaited, waitDeadline)
|
||||
|
||||
ginkgo.By("verifying number of replicas")
|
||||
replicas := rc.GetReplicas()
|
||||
framework.ExpectEqual(replicas == initPods, true, "had %s replicas, still have %s replicas after time deadline", initPods, replicas)
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
|
@ -684,7 +684,7 @@ func DeleteContainerResourceHPA(rc *ResourceConsumer, autoscalerName string) {
|
||||
rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
func CreateCPUHorizontalPodAutoscalerWithBehavior(rc *ResourceConsumer, cpu, minReplicas, maxRepl, downscaleStabilizationSeconds int32) *autoscalingv2.HorizontalPodAutoscaler {
|
||||
func CreateCPUHorizontalPodAutoscalerWithBehavior(rc *ResourceConsumer, cpu int32, minReplicas int32, maxRepl int32, behavior *autoscalingv2.HorizontalPodAutoscalerBehavior) *autoscalingv2.HorizontalPodAutoscaler {
|
||||
hpa := &autoscalingv2.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: rc.name,
|
||||
@ -710,11 +710,7 @@ func CreateCPUHorizontalPodAutoscalerWithBehavior(rc *ResourceConsumer, cpu, min
|
||||
},
|
||||
},
|
||||
},
|
||||
Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{
|
||||
ScaleDown: &autoscalingv2.HPAScalingRules{
|
||||
StabilizationWindowSeconds: &downscaleStabilizationSeconds,
|
||||
},
|
||||
},
|
||||
Behavior: behavior,
|
||||
},
|
||||
}
|
||||
hpa, errHPA := rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Create(context.TODO(), hpa, metav1.CreateOptions{})
|
||||
@ -722,6 +718,47 @@ func CreateCPUHorizontalPodAutoscalerWithBehavior(rc *ResourceConsumer, cpu, min
|
||||
return hpa
|
||||
}
|
||||
|
||||
func HPABehaviorWithScaleUpAndDownRules(scaleUpRule, scaleDownRule *autoscalingv2.HPAScalingRules) *autoscalingv2.HorizontalPodAutoscalerBehavior {
|
||||
return &autoscalingv2.HorizontalPodAutoscalerBehavior{
|
||||
ScaleUp: scaleUpRule,
|
||||
ScaleDown: scaleDownRule,
|
||||
}
|
||||
}
|
||||
|
||||
func HPAStabilizationWindowScalingRule(stabilizationDuration int32) *autoscalingv2.HPAScalingRules {
|
||||
return &autoscalingv2.HPAScalingRules{
|
||||
StabilizationWindowSeconds: &stabilizationDuration,
|
||||
}
|
||||
}
|
||||
|
||||
func HPAPolicyDisabledScalingRule() *autoscalingv2.HPAScalingRules {
|
||||
disabledPolicy := autoscalingv2.DisabledPolicySelect
|
||||
return &autoscalingv2.HPAScalingRules{
|
||||
SelectPolicy: &disabledPolicy,
|
||||
}
|
||||
}
|
||||
|
||||
func HPABehaviorWithStabilizationWindows(upscaleStabilization, downscaleStabilization time.Duration) *autoscalingv2.HorizontalPodAutoscalerBehavior {
|
||||
return HPABehaviorWithScaleUpAndDownRules(
|
||||
/*scaleUpRule=*/ HPAStabilizationWindowScalingRule(int32(upscaleStabilization.Seconds())),
|
||||
/*scaleDownRule=*/ HPAStabilizationWindowScalingRule(int32(downscaleStabilization.Seconds())),
|
||||
)
|
||||
}
|
||||
|
||||
func HPABehaviorWithScaleUpDisabled() *autoscalingv2.HorizontalPodAutoscalerBehavior {
|
||||
return HPABehaviorWithScaleUpAndDownRules(
|
||||
/*scaleUpRule=*/ HPAPolicyDisabledScalingRule(),
|
||||
/*scaleDownRule=*/ nil,
|
||||
)
|
||||
}
|
||||
|
||||
func HPABehaviorWithScaleDownDisabled() *autoscalingv2.HorizontalPodAutoscalerBehavior {
|
||||
return HPABehaviorWithScaleUpAndDownRules(
|
||||
/*scaleUpRule=*/ nil,
|
||||
/*scaleDownRule=*/ HPAPolicyDisabledScalingRule(),
|
||||
)
|
||||
}
|
||||
|
||||
func DeleteHPAWithBehavior(rc *ResourceConsumer, autoscalerName string) {
|
||||
rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, metav1.DeleteOptions{})
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user