From 3c07d3a20c45201324bd88478be84c01f5e9949e Mon Sep 17 00:00:00 2001 From: Olivier Michaelis <38879457+oliviermichaelis@users.noreply.github.com> Date: Sun, 27 Mar 2022 12:34:32 +0200 Subject: [PATCH] Fix replica calculation at start of HPA scaling policy period When calculating the scale-up/scale-down limit, the number of replicas at the start of the scaling policy period is calculated correctly by taken into account the number of scaled-up and scaled-down replicas. Signed-off-by: Olivier Michaelis <38879457+oliviermichaelis@users.noreply.github.com> --- pkg/controller/podautoscaler/horizontal.go | 18 ++++++------ .../podautoscaler/horizontal_test.go | 28 +++++++++++++++++-- 2 files changed, 36 insertions(+), 10 deletions(-) diff --git a/pkg/controller/podautoscaler/horizontal.go b/pkg/controller/podautoscaler/horizontal.go index 008ba8f6b54..37800108c30 100644 --- a/pkg/controller/podautoscaler/horizontal.go +++ b/pkg/controller/podautoscaler/horizontal.go @@ -929,7 +929,7 @@ func (a *HorizontalController) convertDesiredReplicasWithBehaviorRate(args Norma var possibleLimitingReason, possibleLimitingMessage string if args.DesiredReplicas > args.CurrentReplicas { - scaleUpLimit := calculateScaleUpLimitWithScalingRules(args.CurrentReplicas, a.scaleUpEvents[args.Key], args.ScaleUpBehavior) + scaleUpLimit := calculateScaleUpLimitWithScalingRules(args.CurrentReplicas, a.scaleUpEvents[args.Key], a.scaleDownEvents[args.Key], args.ScaleUpBehavior) if scaleUpLimit < args.CurrentReplicas { // We shouldn't scale up further until the scaleUpEvents will be cleaned up scaleUpLimit = args.CurrentReplicas @@ -947,7 +947,7 @@ func (a *HorizontalController) convertDesiredReplicasWithBehaviorRate(args Norma return maximumAllowedReplicas, possibleLimitingReason, possibleLimitingMessage } } else if args.DesiredReplicas < args.CurrentReplicas { - scaleDownLimit := calculateScaleDownLimitWithBehaviors(args.CurrentReplicas, a.scaleDownEvents[args.Key], args.ScaleDownBehavior) + scaleDownLimit := calculateScaleDownLimitWithBehaviors(args.CurrentReplicas, a.scaleUpEvents[args.Key], a.scaleDownEvents[args.Key], args.ScaleDownBehavior) if scaleDownLimit > args.CurrentReplicas { // We shouldn't scale down further until the scaleDownEvents will be cleaned up scaleDownLimit = args.CurrentReplicas @@ -1032,7 +1032,7 @@ func getLongestPolicyPeriod(scalingRules *autoscalingv2.HPAScalingRules) int32 { } // calculateScaleUpLimitWithScalingRules returns the maximum number of pods that could be added for the given HPAScalingRules -func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleEvents []timestampedScaleEvent, scalingRules *autoscalingv2.HPAScalingRules) int32 { +func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleUpEvents, scaleDownEvents []timestampedScaleEvent, scalingRules *autoscalingv2.HPAScalingRules) int32 { var result int32 var proposed int32 var selectPolicyFn func(int32, int32) int32 @@ -1046,8 +1046,9 @@ func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleEvents [] selectPolicyFn = max // Use the default policy otherwise to produce a highest possible change } for _, policy := range scalingRules.Policies { - replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleEvents) - periodStartReplicas := currentReplicas - replicasAddedInCurrentPeriod + replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleUpEvents) + replicasDeletedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleDownEvents) + periodStartReplicas := currentReplicas - replicasAddedInCurrentPeriod + replicasDeletedInCurrentPeriod if policy.Type == autoscalingv2.PodsScalingPolicy { proposed = periodStartReplicas + policy.Value } else if policy.Type == autoscalingv2.PercentScalingPolicy { @@ -1060,7 +1061,7 @@ func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleEvents [] } // calculateScaleDownLimitWithBehavior returns the maximum number of pods that could be deleted for the given HPAScalingRules -func calculateScaleDownLimitWithBehaviors(currentReplicas int32, scaleEvents []timestampedScaleEvent, scalingRules *autoscalingv2.HPAScalingRules) int32 { +func calculateScaleDownLimitWithBehaviors(currentReplicas int32, scaleUpEvents, scaleDownEvents []timestampedScaleEvent, scalingRules *autoscalingv2.HPAScalingRules) int32 { var result int32 var proposed int32 var selectPolicyFn func(int32, int32) int32 @@ -1074,8 +1075,9 @@ func calculateScaleDownLimitWithBehaviors(currentReplicas int32, scaleEvents []t selectPolicyFn = min // Use the default policy otherwise to produce a highest possible change } for _, policy := range scalingRules.Policies { - replicasDeletedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleEvents) - periodStartReplicas := currentReplicas + replicasDeletedInCurrentPeriod + replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleUpEvents) + replicasDeletedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleDownEvents) + periodStartReplicas := currentReplicas - replicasAddedInCurrentPeriod + replicasDeletedInCurrentPeriod if policy.Type == autoscalingv2.PodsScalingPolicy { proposed = periodStartReplicas - policy.Value } else if policy.Type == autoscalingv2.PercentScalingPolicy { diff --git a/pkg/controller/podautoscaler/horizontal_test.go b/pkg/controller/podautoscaler/horizontal_test.go index dbc664ee1bc..5adc863dcad 100644 --- a/pkg/controller/podautoscaler/horizontal_test.go +++ b/pkg/controller/podautoscaler/horizontal_test.go @@ -3029,7 +3029,7 @@ func TestConvertDesiredReplicasWithRules(t *testing.T) { func TestCalculateScaleUpLimitWithScalingRules(t *testing.T) { policy := autoscalingv2.MinChangePolicySelect - calculated := calculateScaleUpLimitWithScalingRules(1, []timestampedScaleEvent{}, &autoscalingv2.HPAScalingRules{ + calculated := calculateScaleUpLimitWithScalingRules(1, []timestampedScaleEvent{}, []timestampedScaleEvent{}, &autoscalingv2.HPAScalingRules{ StabilizationWindowSeconds: utilpointer.Int32Ptr(300), SelectPolicy: &policy, Policies: []autoscalingv2.HPAScalingPolicy{ @@ -3051,7 +3051,7 @@ func TestCalculateScaleUpLimitWithScalingRules(t *testing.T) { func TestCalculateScaleDownLimitWithBehaviors(t *testing.T) { policy := autoscalingv2.MinChangePolicySelect - calculated := calculateScaleDownLimitWithBehaviors(5, []timestampedScaleEvent{}, &autoscalingv2.HPAScalingRules{ + calculated := calculateScaleDownLimitWithBehaviors(5, []timestampedScaleEvent{}, []timestampedScaleEvent{}, &autoscalingv2.HPAScalingRules{ StabilizationWindowSeconds: utilpointer.Int32Ptr(300), SelectPolicy: &policy, Policies: []autoscalingv2.HPAScalingPolicy{ @@ -3480,6 +3480,18 @@ func TestScalingWithRules(t *testing.T) { expectedReplicas: 255, // (100 - 15) + 200% expectedCondition: "ScaleUpLimit", }, + { + name: "scaleUp with percent policy and previous scale up and down events", + scaleUpEvents: generateEventsUniformDistribution([]int{4}, 120), + scaleDownEvents: generateEventsUniformDistribution([]int{2}, 120), + specMinReplicas: 1, + specMaxReplicas: 1000, + scaleUpRules: generateScalingRules(0, 0, 300, 300, 0), + currentReplicas: 6, + prenormalizedDesiredReplicas: 24, + expectedReplicas: 16, + expectedCondition: "ScaleUpLimit", + }, // ScaleDown with PeriodSeconds usage { name: "scaleDown with default policy and previous events", @@ -3546,6 +3558,18 @@ func TestScalingWithRules(t *testing.T) { expectedReplicas: 56, // (100 + 12) - 50% expectedCondition: "ScaleDownLimit", }, + { + name: "scaleDown with percent policy and previous scale up and down events", + scaleUpEvents: generateEventsUniformDistribution([]int{2}, 120), + scaleDownEvents: generateEventsUniformDistribution([]int{4}, 120), + specMinReplicas: 1, + specMaxReplicas: 1000, + scaleDownRules: generateScalingRules(0, 0, 50, 180, 0), + currentReplicas: 10, + prenormalizedDesiredReplicas: 1, + expectedReplicas: 6, + expectedCondition: "ScaleDownLimit", + }, { // corner case for calculating the scaleDownLimit, when we changed pod or percent policy after a lot of scaleDown events // in this case we shouldn't allow scale down, though, the naive formula will suggest that scaleDownlimit is more then CurrentReplicas (100+30-10% > 100)