mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 12:43:23 +00:00
Merge pull request #53690 from mattjmcnaughton/mattjmcnaughton/53670-fix-hpa-scaling-above-max-replicas
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Fix hpa scaling above max replicas w/ scaleUpLimit **What this PR does / why we need it**: Fix a bug where `desiredReplicas` could be greater than `maxReplicas` if the original value for `desiredReplicas > scaleUpLimit` and `scaleUpLimit > maxReplicas`. Previously, when that happened, we would scale up to `scaleUpLimit`, and then in the next auto-scaling run, scale down to `maxReplicas`. Address this issue and introduce a regression test. **Which issue this PR fixes** fixes #53670 **Release note**: ```release-note Address a bug which allowed the horizontal pod autoscaler to allocate `desiredReplicas` > `maxReplicas` in certain instances. ```
This commit is contained in:
commit
93b3469a1f
@ -422,6 +422,15 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
|
||||
case desiredReplicas > scaleUpLimit:
|
||||
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "ScaleUpLimit", "the desired replica count is increasing faster than the maximum scale rate")
|
||||
desiredReplicas = scaleUpLimit
|
||||
|
||||
// Ensure that even if the scaleUpLimit is greater
|
||||
// than the maximum number of replicas, we only
|
||||
// set the max number of replicas as desired.
|
||||
if scaleUpLimit > hpa.Spec.MaxReplicas {
|
||||
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "TooManyReplicas", "the desired replica count was more than the maximum replica count")
|
||||
desiredReplicas = hpa.Spec.MaxReplicas
|
||||
}
|
||||
|
||||
case hpa.Spec.MinReplicas != nil && desiredReplicas < *hpa.Spec.MinReplicas:
|
||||
// make sure we aren't below our minimum
|
||||
var statusMsg string
|
||||
|
@ -1219,6 +1219,30 @@ func TestUpscaleCap(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestUpscaleCapGreaterThanMaxReplicas(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 1,
|
||||
maxReplicas: 20,
|
||||
initialReplicas: 3,
|
||||
// desiredReplicas would be 24 without maxReplicas
|
||||
desiredReplicas: 20,
|
||||
CPUTarget: 10,
|
||||
reportedLevels: []uint64{100, 200, 300},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
|
||||
useMetricsAPI: true,
|
||||
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
||||
Type: autoscalingv2.ScalingLimited,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "ScaleUpLimit",
|
||||
}, autoscalingv2.HorizontalPodAutoscalerCondition{
|
||||
Type: autoscalingv2.ScalingLimited,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "TooManyReplicas",
|
||||
}),
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestConditionInvalidSelectorMissing(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 1,
|
||||
|
Loading…
Reference in New Issue
Block a user