Merge pull request #111346 from piotrnosek/hpa-tests-2

Add e2e HPA Behavior tests: scale up/down limited by number of Pods / min, scale up/down limited by percentage / min
This commit is contained in:
Kubernetes Prow Robot 2022-08-09 03:14:50 -07:00 committed by GitHub
commit f0bd02ca5e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 237 additions and 37 deletions

View File

@ -30,6 +30,12 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
f := framework.NewDefaultFramework("horizontal-pod-autoscaling") f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
hpaName := "consumer"
podCPURequest := 500
targetCPUUtilizationPercent := 25
usageForSingleReplica := 110
fullWindowOfNewUsage := 30 * time.Second fullWindowOfNewUsage := 30 * time.Second
windowWithOldUsagePasses := 30 * time.Second windowWithOldUsagePasses := 30 * time.Second
newPodMetricsDelay := 15 * time.Second newPodMetricsDelay := 15 * time.Second
@ -48,16 +54,13 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
ginkgo.Describe("with short downscale stabilization window", func() { ginkgo.Describe("with short downscale stabilization window", func() {
ginkgo.It("should scale down soon after the stabilization period", func() { ginkgo.It("should scale down soon after the stabilization period", func() {
ginkgo.By("setting up resource consumer and HPA") ginkgo.By("setting up resource consumer and HPA")
podCPURequest := 500
targetCPUUtilizationPercent := 25
usageForSingleReplica := 110
initPods := 1 initPods := 1
initCPUUsageTotal := initPods * usageForSingleReplica initCPUUsageTotal := initPods * usageForSingleReplica
upScaleStabilization := 0 * time.Minute upScaleStabilization := 0 * time.Minute
downScaleStabilization := 1 * time.Minute downScaleStabilization := 1 * time.Minute
rc := e2eautoscaling.NewDynamicResourceConsumer( rc := e2eautoscaling.NewDynamicResourceConsumer(
"consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
@ -92,16 +95,13 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
ginkgo.Describe("with long upscale stabilization window", func() { ginkgo.Describe("with long upscale stabilization window", func() {
ginkgo.It("should scale up only after the stabilization period", func() { ginkgo.It("should scale up only after the stabilization period", func() {
ginkgo.By("setting up resource consumer and HPA") ginkgo.By("setting up resource consumer and HPA")
podCPURequest := 500
targetCPUUtilizationPercent := 25
usageForSingleReplica := 110
initPods := 2 initPods := 2
initCPUUsageTotal := initPods * usageForSingleReplica initCPUUsageTotal := initPods * usageForSingleReplica
upScaleStabilization := 3 * time.Minute upScaleStabilization := 3 * time.Minute
downScaleStabilization := 0 * time.Minute downScaleStabilization := 0 * time.Minute
rc := e2eautoscaling.NewDynamicResourceConsumer( rc := e2eautoscaling.NewDynamicResourceConsumer(
"consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
@ -133,24 +133,21 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
}) })
}) })
ginkgo.Describe("with upscale autoscaling disabled", func() { ginkgo.Describe("with autoscaling disabled", func() {
ginkgo.It("shouldn't scale up", func() { ginkgo.It("shouldn't scale up", func() {
ginkgo.By("setting up resource consumer and HPA") ginkgo.By("setting up resource consumer and HPA")
podCPURequest := 500
targetCPUUtilizationPercent := 25
usageForSingleReplica := 110
initPods := 1 initPods := 1
initCPUUsageTotal := initPods * usageForSingleReplica initCPUUsageTotal := initPods * usageForSingleReplica
rc := e2eautoscaling.NewDynamicResourceConsumer( rc := e2eautoscaling.NewDynamicResourceConsumer(
"consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
defer rc.CleanUp() defer rc.CleanUp()
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleUpDisabled(), rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleDisabled(e2eautoscaling.ScaleUpDirection),
) )
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name) defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
@ -171,26 +168,21 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
replicas := rc.GetReplicas() replicas := rc.GetReplicas()
framework.ExpectEqual(replicas == initPods, true, "had %s replicas, still have %s replicas after time deadline", initPods, replicas) framework.ExpectEqual(replicas == initPods, true, "had %s replicas, still have %s replicas after time deadline", initPods, replicas)
}) })
})
ginkgo.Describe("with downscale autoscaling disabled", func() {
ginkgo.It("shouldn't scale down", func() { ginkgo.It("shouldn't scale down", func() {
ginkgo.By("setting up resource consumer and HPA") ginkgo.By("setting up resource consumer and HPA")
podCPURequest := 500
targetCPUUtilizationPercent := 25
usageForSingleReplica := 110
initPods := 3 initPods := 3
initCPUUsageTotal := initPods * usageForSingleReplica initCPUUsageTotal := initPods * usageForSingleReplica
rc := e2eautoscaling.NewDynamicResourceConsumer( rc := e2eautoscaling.NewDynamicResourceConsumer(
"consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, initPods, hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200, initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
) )
defer rc.CleanUp() defer rc.CleanUp()
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior( hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleDownDisabled(), rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleDisabled(e2eautoscaling.ScaleDownDirection),
) )
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name) defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
@ -212,6 +204,178 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
replicas := rc.GetReplicas() replicas := rc.GetReplicas()
framework.ExpectEqual(replicas == initPods, true, "had %s replicas, still have %s replicas after time deadline", initPods, replicas) framework.ExpectEqual(replicas == initPods, true, "had %s replicas, still have %s replicas after time deadline", initPods, replicas)
}) })
}) })
ginkgo.Describe("with scale limited by number of Pods rate", func() {
ginkgo.It("should scale up no more than given number of Pods per minute", func() {
ginkgo.By("setting up resource consumer and HPA")
initPods := 1
initCPUUsageTotal := initPods * usageForSingleReplica
limitWindowLength := 1 * time.Minute
podsLimitPerMinute := 2
rc := e2eautoscaling.NewDynamicResourceConsumer(
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithScaleLimitedByNumberOfPods(e2eautoscaling.ScaleUpDirection, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds())),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
ginkgo.By("triggering scale up by increasing consumption")
rc.ConsumeCPU(5 * usageForSingleReplica)
waitStart := time.Now()
rc.WaitForReplicas(3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor3 := time.Now().Sub(waitStart)
waitStart = time.Now()
rc.WaitForReplicas(5, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor5 := time.Now().Sub(waitStart)
ginkgo.By("verifying time waited for a scale up to 3 replicas")
deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
// First scale event can happen right away, as there were no scale events in the past.
framework.ExpectEqual(timeWaitedFor3 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor3, deadline)
ginkgo.By("verifying time waited for a scale up to 5 replicas")
// Second scale event needs to respect limit window.
framework.ExpectEqual(timeWaitedFor5 > limitWindowLength, true, "waited %s, wanted to wait more than %s", timeWaitedFor5, limitWindowLength)
framework.ExpectEqual(timeWaitedFor5 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor5, deadline)
})
ginkgo.It("should scale down no more than given number of Pods per minute", func() {
ginkgo.By("setting up resource consumer and HPA")
initPods := 6
initCPUUsageTotal := initPods * usageForSingleReplica
limitWindowLength := 1 * time.Minute
podsLimitPerMinute := 2
rc := e2eautoscaling.NewDynamicResourceConsumer(
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithScaleLimitedByNumberOfPods(e2eautoscaling.ScaleDownDirection, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds())),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
ginkgo.By("triggering scale down by lowering consumption")
rc.ConsumeCPU(1 * usageForSingleReplica)
waitStart := time.Now()
rc.WaitForReplicas(4, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor4 := time.Now().Sub(waitStart)
waitStart = time.Now()
rc.WaitForReplicas(2, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor2 := time.Now().Sub(waitStart)
ginkgo.By("verifying time waited for a scale down to 4 replicas")
deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
// First scale event can happen right away, as there were no scale events in the past.
framework.ExpectEqual(timeWaitedFor4 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor4, deadline)
ginkgo.By("verifying time waited for a scale down to 2 replicas")
// Second scale event needs to respect limit window.
framework.ExpectEqual(timeWaitedFor2 > limitWindowLength, true, "waited %s, wanted more than %s", timeWaitedFor2, limitWindowLength)
framework.ExpectEqual(timeWaitedFor2 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor2, deadline)
})
})
ginkgo.Describe("with scale limited by percentage", func() {
ginkgo.It("should scale up no more than given percentage of current Pods per minute", func() {
ginkgo.By("setting up resource consumer and HPA")
initPods := 4
initCPUUsageTotal := initPods * usageForSingleReplica
limitWindowLength := 1 * time.Minute
percentageLimitPerMinute := 50
rc := e2eautoscaling.NewDynamicResourceConsumer(
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithScaleLimitedByPercentage(e2eautoscaling.ScaleUpDirection, int32(percentageLimitPerMinute), int32(limitWindowLength.Seconds())),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
ginkgo.By("triggering scale up by increasing consumption")
rc.ConsumeCPU(10 * usageForSingleReplica)
waitStart := time.Now()
rc.WaitForReplicas(6, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor6 := time.Now().Sub(waitStart)
waitStart = time.Now()
rc.WaitForReplicas(9, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor9 := time.Now().Sub(waitStart)
ginkgo.By("verifying time waited for a scale up to 6 replicas")
deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
// First scale event can happen right away, as there were no scale events in the past.
framework.ExpectEqual(timeWaitedFor6 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor6, deadline)
ginkgo.By("verifying time waited for a scale up to 9 replicas")
// Second scale event needs to respect limit window.
framework.ExpectEqual(timeWaitedFor9 > limitWindowLength, true, "waited %s, wanted to wait more than %s", timeWaitedFor9, limitWindowLength)
framework.ExpectEqual(timeWaitedFor9 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor9, deadline)
})
ginkgo.It("should scale down no more than given percentage of current Pods per minute", func() {
ginkgo.By("setting up resource consumer and HPA")
initPods := 8
initCPUUsageTotal := initPods * usageForSingleReplica
limitWindowLength := 1 * time.Minute
percentageLimitPerMinute := 50
rc := e2eautoscaling.NewDynamicResourceConsumer(
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithScaleLimitedByPercentage(e2eautoscaling.ScaleDownDirection, int32(percentageLimitPerMinute), int32(limitWindowLength.Seconds())),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
ginkgo.By("triggering scale down by lowering consumption")
rc.ConsumeCPU(1 * usageForSingleReplica)
waitStart := time.Now()
rc.WaitForReplicas(4, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor4 := time.Now().Sub(waitStart)
waitStart = time.Now()
rc.WaitForReplicas(2, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
timeWaitedFor2 := time.Now().Sub(waitStart)
ginkgo.By("verifying time waited for a scale down to 4 replicas")
deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
// First scale event can happen right away, as there were no scale events in the past.
framework.ExpectEqual(timeWaitedFor4 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor4, deadline)
ginkgo.By("verifying time waited for a scale down to 2 replicas")
// Second scale event needs to respect limit window.
framework.ExpectEqual(timeWaitedFor2 > limitWindowLength, true, "waited %s, wanted more than %s", timeWaitedFor2, limitWindowLength)
framework.ExpectEqual(timeWaitedFor2 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor2, deadline)
})
})
}) })

View File

@ -78,6 +78,15 @@ var (
KindReplicaSet = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ReplicaSet"} KindReplicaSet = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ReplicaSet"}
) )
// ScalingDirection identifies the scale direction for HPA Behavior.
type ScalingDirection int
const (
DirectionUnknown ScalingDirection = iota
ScaleUpDirection
ScaleDownDirection
)
/* /*
ResourceConsumer is a tool for testing. It helps create specified usage of CPU or memory (Warning: memory not supported) ResourceConsumer is a tool for testing. It helps create specified usage of CPU or memory (Warning: memory not supported)
typical use case: typical use case:
@ -725,38 +734,65 @@ func HPABehaviorWithScaleUpAndDownRules(scaleUpRule, scaleDownRule *autoscalingv
} }
} }
func HPAStabilizationWindowScalingRule(stabilizationDuration int32) *autoscalingv2.HPAScalingRules { func HPABehaviorWithScalingRuleInDirection(scalingDirection ScalingDirection, rule *autoscalingv2.HPAScalingRules) *autoscalingv2.HorizontalPodAutoscalerBehavior {
var scaleUpRule, scaleDownRule *autoscalingv2.HPAScalingRules
if scalingDirection == ScaleUpDirection {
scaleUpRule = rule
}
if scalingDirection == ScaleDownDirection {
scaleDownRule = rule
}
return HPABehaviorWithScaleUpAndDownRules(scaleUpRule, scaleDownRule)
}
func HPAScalingRuleWithStabilizationWindow(stabilizationDuration int32) *autoscalingv2.HPAScalingRules {
return &autoscalingv2.HPAScalingRules{ return &autoscalingv2.HPAScalingRules{
StabilizationWindowSeconds: &stabilizationDuration, StabilizationWindowSeconds: &stabilizationDuration,
} }
} }
func HPAPolicyDisabledScalingRule() *autoscalingv2.HPAScalingRules { func HPAScalingRuleWithPolicyDisabled() *autoscalingv2.HPAScalingRules {
disabledPolicy := autoscalingv2.DisabledPolicySelect disabledPolicy := autoscalingv2.DisabledPolicySelect
return &autoscalingv2.HPAScalingRules{ return &autoscalingv2.HPAScalingRules{
SelectPolicy: &disabledPolicy, SelectPolicy: &disabledPolicy,
} }
} }
func HPAScalingRuleWithScalingPolicy(policyType autoscalingv2.HPAScalingPolicyType, value, periodSeconds int32) *autoscalingv2.HPAScalingRules {
stabilizationWindowDisabledDuration := int32(0)
selectPolicy := autoscalingv2.MaxChangePolicySelect
return &autoscalingv2.HPAScalingRules{
Policies: []autoscalingv2.HPAScalingPolicy{
{
Type: policyType,
Value: value,
PeriodSeconds: periodSeconds,
},
},
SelectPolicy: &selectPolicy,
StabilizationWindowSeconds: &stabilizationWindowDisabledDuration,
}
}
func HPABehaviorWithStabilizationWindows(upscaleStabilization, downscaleStabilization time.Duration) *autoscalingv2.HorizontalPodAutoscalerBehavior { func HPABehaviorWithStabilizationWindows(upscaleStabilization, downscaleStabilization time.Duration) *autoscalingv2.HorizontalPodAutoscalerBehavior {
return HPABehaviorWithScaleUpAndDownRules( scaleUpRule := HPAScalingRuleWithStabilizationWindow(int32(upscaleStabilization.Seconds()))
/*scaleUpRule=*/ HPAStabilizationWindowScalingRule(int32(upscaleStabilization.Seconds())), scaleDownRule := HPAScalingRuleWithStabilizationWindow(int32(downscaleStabilization.Seconds()))
/*scaleDownRule=*/ HPAStabilizationWindowScalingRule(int32(downscaleStabilization.Seconds())), return HPABehaviorWithScaleUpAndDownRules(scaleUpRule, scaleDownRule)
)
} }
func HPABehaviorWithScaleUpDisabled() *autoscalingv2.HorizontalPodAutoscalerBehavior { func HPABehaviorWithScaleDisabled(scalingDirection ScalingDirection) *autoscalingv2.HorizontalPodAutoscalerBehavior {
return HPABehaviorWithScaleUpAndDownRules( scalingRule := HPAScalingRuleWithPolicyDisabled()
/*scaleUpRule=*/ HPAPolicyDisabledScalingRule(), return HPABehaviorWithScalingRuleInDirection(scalingDirection, scalingRule)
/*scaleDownRule=*/ nil,
)
} }
func HPABehaviorWithScaleDownDisabled() *autoscalingv2.HorizontalPodAutoscalerBehavior { func HPABehaviorWithScaleLimitedByNumberOfPods(scalingDirection ScalingDirection, numberOfPods, periodSeconds int32) *autoscalingv2.HorizontalPodAutoscalerBehavior {
return HPABehaviorWithScaleUpAndDownRules( scalingRule := HPAScalingRuleWithScalingPolicy(autoscalingv2.PodsScalingPolicy, numberOfPods, periodSeconds)
/*scaleUpRule=*/ nil, return HPABehaviorWithScalingRuleInDirection(scalingDirection, scalingRule)
/*scaleDownRule=*/ HPAPolicyDisabledScalingRule(), }
)
func HPABehaviorWithScaleLimitedByPercentage(scalingDirection ScalingDirection, percentage, periodSeconds int32) *autoscalingv2.HorizontalPodAutoscalerBehavior {
scalingRule := HPAScalingRuleWithScalingPolicy(autoscalingv2.PercentScalingPolicy, percentage, periodSeconds)
return HPABehaviorWithScalingRuleInDirection(scalingDirection, scalingRule)
} }
func DeleteHPAWithBehavior(rc *ResourceConsumer, autoscalerName string) { func DeleteHPAWithBehavior(rc *ResourceConsumer, autoscalerName string) {