mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-24 19:12:02 +00:00
Fix replica calculation at start of HPA scaling policy period
When calculating the scale-up/scale-down limit, the number of replicas at the start of the scaling policy period is calculated correctly by taken into account the number of scaled-up and scaled-down replicas. Signed-off-by: Olivier Michaelis <38879457+oliviermichaelis@users.noreply.github.com>
This commit is contained in:
parent
898443f405
commit
3c07d3a20c
@ -929,7 +929,7 @@ func (a *HorizontalController) convertDesiredReplicasWithBehaviorRate(args Norma
|
|||||||
var possibleLimitingReason, possibleLimitingMessage string
|
var possibleLimitingReason, possibleLimitingMessage string
|
||||||
|
|
||||||
if args.DesiredReplicas > args.CurrentReplicas {
|
if args.DesiredReplicas > args.CurrentReplicas {
|
||||||
scaleUpLimit := calculateScaleUpLimitWithScalingRules(args.CurrentReplicas, a.scaleUpEvents[args.Key], args.ScaleUpBehavior)
|
scaleUpLimit := calculateScaleUpLimitWithScalingRules(args.CurrentReplicas, a.scaleUpEvents[args.Key], a.scaleDownEvents[args.Key], args.ScaleUpBehavior)
|
||||||
if scaleUpLimit < args.CurrentReplicas {
|
if scaleUpLimit < args.CurrentReplicas {
|
||||||
// We shouldn't scale up further until the scaleUpEvents will be cleaned up
|
// We shouldn't scale up further until the scaleUpEvents will be cleaned up
|
||||||
scaleUpLimit = args.CurrentReplicas
|
scaleUpLimit = args.CurrentReplicas
|
||||||
@ -947,7 +947,7 @@ func (a *HorizontalController) convertDesiredReplicasWithBehaviorRate(args Norma
|
|||||||
return maximumAllowedReplicas, possibleLimitingReason, possibleLimitingMessage
|
return maximumAllowedReplicas, possibleLimitingReason, possibleLimitingMessage
|
||||||
}
|
}
|
||||||
} else if args.DesiredReplicas < args.CurrentReplicas {
|
} else if args.DesiredReplicas < args.CurrentReplicas {
|
||||||
scaleDownLimit := calculateScaleDownLimitWithBehaviors(args.CurrentReplicas, a.scaleDownEvents[args.Key], args.ScaleDownBehavior)
|
scaleDownLimit := calculateScaleDownLimitWithBehaviors(args.CurrentReplicas, a.scaleUpEvents[args.Key], a.scaleDownEvents[args.Key], args.ScaleDownBehavior)
|
||||||
if scaleDownLimit > args.CurrentReplicas {
|
if scaleDownLimit > args.CurrentReplicas {
|
||||||
// We shouldn't scale down further until the scaleDownEvents will be cleaned up
|
// We shouldn't scale down further until the scaleDownEvents will be cleaned up
|
||||||
scaleDownLimit = args.CurrentReplicas
|
scaleDownLimit = args.CurrentReplicas
|
||||||
@ -1032,7 +1032,7 @@ func getLongestPolicyPeriod(scalingRules *autoscalingv2.HPAScalingRules) int32 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// calculateScaleUpLimitWithScalingRules returns the maximum number of pods that could be added for the given HPAScalingRules
|
// calculateScaleUpLimitWithScalingRules returns the maximum number of pods that could be added for the given HPAScalingRules
|
||||||
func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleEvents []timestampedScaleEvent, scalingRules *autoscalingv2.HPAScalingRules) int32 {
|
func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleUpEvents, scaleDownEvents []timestampedScaleEvent, scalingRules *autoscalingv2.HPAScalingRules) int32 {
|
||||||
var result int32
|
var result int32
|
||||||
var proposed int32
|
var proposed int32
|
||||||
var selectPolicyFn func(int32, int32) int32
|
var selectPolicyFn func(int32, int32) int32
|
||||||
@ -1046,8 +1046,9 @@ func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleEvents []
|
|||||||
selectPolicyFn = max // Use the default policy otherwise to produce a highest possible change
|
selectPolicyFn = max // Use the default policy otherwise to produce a highest possible change
|
||||||
}
|
}
|
||||||
for _, policy := range scalingRules.Policies {
|
for _, policy := range scalingRules.Policies {
|
||||||
replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleEvents)
|
replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleUpEvents)
|
||||||
periodStartReplicas := currentReplicas - replicasAddedInCurrentPeriod
|
replicasDeletedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleDownEvents)
|
||||||
|
periodStartReplicas := currentReplicas - replicasAddedInCurrentPeriod + replicasDeletedInCurrentPeriod
|
||||||
if policy.Type == autoscalingv2.PodsScalingPolicy {
|
if policy.Type == autoscalingv2.PodsScalingPolicy {
|
||||||
proposed = periodStartReplicas + policy.Value
|
proposed = periodStartReplicas + policy.Value
|
||||||
} else if policy.Type == autoscalingv2.PercentScalingPolicy {
|
} else if policy.Type == autoscalingv2.PercentScalingPolicy {
|
||||||
@ -1060,7 +1061,7 @@ func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleEvents []
|
|||||||
}
|
}
|
||||||
|
|
||||||
// calculateScaleDownLimitWithBehavior returns the maximum number of pods that could be deleted for the given HPAScalingRules
|
// calculateScaleDownLimitWithBehavior returns the maximum number of pods that could be deleted for the given HPAScalingRules
|
||||||
func calculateScaleDownLimitWithBehaviors(currentReplicas int32, scaleEvents []timestampedScaleEvent, scalingRules *autoscalingv2.HPAScalingRules) int32 {
|
func calculateScaleDownLimitWithBehaviors(currentReplicas int32, scaleUpEvents, scaleDownEvents []timestampedScaleEvent, scalingRules *autoscalingv2.HPAScalingRules) int32 {
|
||||||
var result int32
|
var result int32
|
||||||
var proposed int32
|
var proposed int32
|
||||||
var selectPolicyFn func(int32, int32) int32
|
var selectPolicyFn func(int32, int32) int32
|
||||||
@ -1074,8 +1075,9 @@ func calculateScaleDownLimitWithBehaviors(currentReplicas int32, scaleEvents []t
|
|||||||
selectPolicyFn = min // Use the default policy otherwise to produce a highest possible change
|
selectPolicyFn = min // Use the default policy otherwise to produce a highest possible change
|
||||||
}
|
}
|
||||||
for _, policy := range scalingRules.Policies {
|
for _, policy := range scalingRules.Policies {
|
||||||
replicasDeletedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleEvents)
|
replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleUpEvents)
|
||||||
periodStartReplicas := currentReplicas + replicasDeletedInCurrentPeriod
|
replicasDeletedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleDownEvents)
|
||||||
|
periodStartReplicas := currentReplicas - replicasAddedInCurrentPeriod + replicasDeletedInCurrentPeriod
|
||||||
if policy.Type == autoscalingv2.PodsScalingPolicy {
|
if policy.Type == autoscalingv2.PodsScalingPolicy {
|
||||||
proposed = periodStartReplicas - policy.Value
|
proposed = periodStartReplicas - policy.Value
|
||||||
} else if policy.Type == autoscalingv2.PercentScalingPolicy {
|
} else if policy.Type == autoscalingv2.PercentScalingPolicy {
|
||||||
|
@ -3029,7 +3029,7 @@ func TestConvertDesiredReplicasWithRules(t *testing.T) {
|
|||||||
func TestCalculateScaleUpLimitWithScalingRules(t *testing.T) {
|
func TestCalculateScaleUpLimitWithScalingRules(t *testing.T) {
|
||||||
policy := autoscalingv2.MinChangePolicySelect
|
policy := autoscalingv2.MinChangePolicySelect
|
||||||
|
|
||||||
calculated := calculateScaleUpLimitWithScalingRules(1, []timestampedScaleEvent{}, &autoscalingv2.HPAScalingRules{
|
calculated := calculateScaleUpLimitWithScalingRules(1, []timestampedScaleEvent{}, []timestampedScaleEvent{}, &autoscalingv2.HPAScalingRules{
|
||||||
StabilizationWindowSeconds: utilpointer.Int32Ptr(300),
|
StabilizationWindowSeconds: utilpointer.Int32Ptr(300),
|
||||||
SelectPolicy: &policy,
|
SelectPolicy: &policy,
|
||||||
Policies: []autoscalingv2.HPAScalingPolicy{
|
Policies: []autoscalingv2.HPAScalingPolicy{
|
||||||
@ -3051,7 +3051,7 @@ func TestCalculateScaleUpLimitWithScalingRules(t *testing.T) {
|
|||||||
func TestCalculateScaleDownLimitWithBehaviors(t *testing.T) {
|
func TestCalculateScaleDownLimitWithBehaviors(t *testing.T) {
|
||||||
policy := autoscalingv2.MinChangePolicySelect
|
policy := autoscalingv2.MinChangePolicySelect
|
||||||
|
|
||||||
calculated := calculateScaleDownLimitWithBehaviors(5, []timestampedScaleEvent{}, &autoscalingv2.HPAScalingRules{
|
calculated := calculateScaleDownLimitWithBehaviors(5, []timestampedScaleEvent{}, []timestampedScaleEvent{}, &autoscalingv2.HPAScalingRules{
|
||||||
StabilizationWindowSeconds: utilpointer.Int32Ptr(300),
|
StabilizationWindowSeconds: utilpointer.Int32Ptr(300),
|
||||||
SelectPolicy: &policy,
|
SelectPolicy: &policy,
|
||||||
Policies: []autoscalingv2.HPAScalingPolicy{
|
Policies: []autoscalingv2.HPAScalingPolicy{
|
||||||
@ -3480,6 +3480,18 @@ func TestScalingWithRules(t *testing.T) {
|
|||||||
expectedReplicas: 255, // (100 - 15) + 200%
|
expectedReplicas: 255, // (100 - 15) + 200%
|
||||||
expectedCondition: "ScaleUpLimit",
|
expectedCondition: "ScaleUpLimit",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "scaleUp with percent policy and previous scale up and down events",
|
||||||
|
scaleUpEvents: generateEventsUniformDistribution([]int{4}, 120),
|
||||||
|
scaleDownEvents: generateEventsUniformDistribution([]int{2}, 120),
|
||||||
|
specMinReplicas: 1,
|
||||||
|
specMaxReplicas: 1000,
|
||||||
|
scaleUpRules: generateScalingRules(0, 0, 300, 300, 0),
|
||||||
|
currentReplicas: 6,
|
||||||
|
prenormalizedDesiredReplicas: 24,
|
||||||
|
expectedReplicas: 16,
|
||||||
|
expectedCondition: "ScaleUpLimit",
|
||||||
|
},
|
||||||
// ScaleDown with PeriodSeconds usage
|
// ScaleDown with PeriodSeconds usage
|
||||||
{
|
{
|
||||||
name: "scaleDown with default policy and previous events",
|
name: "scaleDown with default policy and previous events",
|
||||||
@ -3546,6 +3558,18 @@ func TestScalingWithRules(t *testing.T) {
|
|||||||
expectedReplicas: 56, // (100 + 12) - 50%
|
expectedReplicas: 56, // (100 + 12) - 50%
|
||||||
expectedCondition: "ScaleDownLimit",
|
expectedCondition: "ScaleDownLimit",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "scaleDown with percent policy and previous scale up and down events",
|
||||||
|
scaleUpEvents: generateEventsUniformDistribution([]int{2}, 120),
|
||||||
|
scaleDownEvents: generateEventsUniformDistribution([]int{4}, 120),
|
||||||
|
specMinReplicas: 1,
|
||||||
|
specMaxReplicas: 1000,
|
||||||
|
scaleDownRules: generateScalingRules(0, 0, 50, 180, 0),
|
||||||
|
currentReplicas: 10,
|
||||||
|
prenormalizedDesiredReplicas: 1,
|
||||||
|
expectedReplicas: 6,
|
||||||
|
expectedCondition: "ScaleDownLimit",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
// corner case for calculating the scaleDownLimit, when we changed pod or percent policy after a lot of scaleDown events
|
// corner case for calculating the scaleDownLimit, when we changed pod or percent policy after a lot of scaleDown events
|
||||||
// in this case we shouldn't allow scale down, though, the naive formula will suggest that scaleDownlimit is more then CurrentReplicas (100+30-10% > 100)
|
// in this case we shouldn't allow scale down, though, the naive formula will suggest that scaleDownlimit is more then CurrentReplicas (100+30-10% > 100)
|
||||||
|
Loading…
Reference in New Issue
Block a user