Merge pull request #95647 from JoshuaAndrew/master

Horizontal Pod Autoscaler doesn`t automatically scale the number of pods correctly
This commit is contained in:
Kubernetes Prow Robot 2020-10-23 04:05:59 -07:00 committed by GitHub
commit 106ee38796
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 50 additions and 2 deletions

View File

@ -998,15 +998,17 @@ func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleEvents []
if *scalingRules.SelectPolicy == autoscalingv2.DisabledPolicySelect {
return currentReplicas // Scaling is disabled
} else if *scalingRules.SelectPolicy == autoscalingv2.MinPolicySelect {
result = math.MaxInt32
selectPolicyFn = min // For scaling up, the lowest change ('min' policy) produces a minimum value
} else {
result = math.MinInt32
selectPolicyFn = max // Use the default policy otherwise to produce a highest possible change
}
for _, policy := range scalingRules.Policies {
replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleEvents)
periodStartReplicas := currentReplicas - replicasAddedInCurrentPeriod
if policy.Type == autoscalingv2.PodsScalingPolicy {
proposed = int32(periodStartReplicas + policy.Value)
proposed = periodStartReplicas + policy.Value
} else if policy.Type == autoscalingv2.PercentScalingPolicy {
// the proposal has to be rounded up because the proposed change might not increase the replica count causing the target to never scale up
proposed = int32(math.Ceil(float64(periodStartReplicas) * (1 + float64(policy.Value)/100)))
@ -1018,14 +1020,16 @@ func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleEvents []
// calculateScaleDownLimitWithBehavior returns the maximum number of pods that could be deleted for the given HPAScalingRules
func calculateScaleDownLimitWithBehaviors(currentReplicas int32, scaleEvents []timestampedScaleEvent, scalingRules *autoscalingv2.HPAScalingRules) int32 {
var result int32 = math.MaxInt32
var result int32
var proposed int32
var selectPolicyFn func(int32, int32) int32
if *scalingRules.SelectPolicy == autoscalingv2.DisabledPolicySelect {
return currentReplicas // Scaling is disabled
} else if *scalingRules.SelectPolicy == autoscalingv2.MinPolicySelect {
result = math.MinInt32
selectPolicyFn = max // For scaling down, the lowest change ('min' policy) produces a maximum value
} else {
result = math.MaxInt32
selectPolicyFn = min // Use the default policy otherwise to produce a highest possible change
}
for _, policy := range scalingRules.Policies {

View File

@ -2993,6 +2993,50 @@ func TestConvertDesiredReplicasWithRules(t *testing.T) {
}
}
func TestCalculateScaleUpLimitWithScalingRules(t *testing.T) {
policy := autoscalingv2.MinPolicySelect
calculated := calculateScaleUpLimitWithScalingRules(1, []timestampedScaleEvent{}, &autoscalingv2.HPAScalingRules{
StabilizationWindowSeconds: utilpointer.Int32Ptr(300),
SelectPolicy: &policy,
Policies: []autoscalingv2.HPAScalingPolicy{
{
Type: autoscalingv2.PodsScalingPolicy,
Value: 2,
PeriodSeconds: 60,
},
{
Type: autoscalingv2.PercentScalingPolicy,
Value: 50,
PeriodSeconds: 60,
},
},
})
assert.Equal(t, calculated, int32(2))
}
func TestCalculateScaleDownLimitWithBehaviors(t *testing.T) {
policy := autoscalingv2.MinPolicySelect
calculated := calculateScaleDownLimitWithBehaviors(5, []timestampedScaleEvent{}, &autoscalingv2.HPAScalingRules{
StabilizationWindowSeconds: utilpointer.Int32Ptr(300),
SelectPolicy: &policy,
Policies: []autoscalingv2.HPAScalingPolicy{
{
Type: autoscalingv2.PodsScalingPolicy,
Value: 2,
PeriodSeconds: 60,
},
{
Type: autoscalingv2.PercentScalingPolicy,
Value: 50,
PeriodSeconds: 60,
},
},
})
assert.Equal(t, calculated, int32(3))
}
func generateScalingRules(pods, podsPeriod, percent, percentPeriod, stabilizationWindow int32) *autoscalingv2.HPAScalingRules {
policy := autoscalingv2.MaxPolicySelect
directionBehavior := autoscalingv2.HPAScalingRules{