Merge pull request #130797 from jm-franc/configurable-tolerance

Add support for HPA configurable tolerance
This commit is contained in:
Kubernetes Prow Robot 2025-03-24 14:20:32 -07:00 committed by GitHub
commit 34e80be133
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
36 changed files with 1656 additions and 305 deletions

View File

@ -3934,10 +3934,10 @@
"type": "object" "type": "object"
}, },
"io.k8s.api.autoscaling.v2.HPAScalingRules": { "io.k8s.api.autoscaling.v2.HPAScalingRules": {
"description": "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.", "description": "HPAScalingRules configures the scaling behavior for one direction via scaling Policy Rules and a configurable metric tolerance.\n\nScaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.\n\nThe tolerance is applied to the metric values and prevents scaling too eagerly for small metric variations. (Note that setting a tolerance requires enabling the alpha HPAConfigurableTolerance feature gate.)",
"properties": { "properties": {
"policies": { "policies": {
"description": "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid", "description": "policies is a list of potential scaling polices which can be used during scaling. If not set, use the default values: - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window. - For scale down: allow all pods to be removed in a 15s window.",
"items": { "items": {
"$ref": "#/definitions/io.k8s.api.autoscaling.v2.HPAScalingPolicy" "$ref": "#/definitions/io.k8s.api.autoscaling.v2.HPAScalingPolicy"
}, },
@ -3952,6 +3952,10 @@
"description": "stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", "description": "stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).",
"format": "int32", "format": "int32",
"type": "integer" "type": "integer"
},
"tolerance": {
"$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity",
"description": "tolerance is the tolerance on the ratio between the current and desired metric value under which no updates are made to the desired number of replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not set, the default cluster-wide tolerance is applied (by default 10%).\n\nFor example, if autoscaling is configured with a memory consumption target of 100Mi, and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be triggered when the actual consumption falls below 95Mi or exceeds 101Mi.\n\nThis is an alpha field and requires enabling the HPAConfigurableTolerance feature gate."
} }
}, },
"type": "object" "type": "object"

View File

@ -170,10 +170,10 @@
"type": "object" "type": "object"
}, },
"io.k8s.api.autoscaling.v2.HPAScalingRules": { "io.k8s.api.autoscaling.v2.HPAScalingRules": {
"description": "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.", "description": "HPAScalingRules configures the scaling behavior for one direction via scaling Policy Rules and a configurable metric tolerance.\n\nScaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.\n\nThe tolerance is applied to the metric values and prevents scaling too eagerly for small metric variations. (Note that setting a tolerance requires enabling the alpha HPAConfigurableTolerance feature gate.)",
"properties": { "properties": {
"policies": { "policies": {
"description": "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid", "description": "policies is a list of potential scaling polices which can be used during scaling. If not set, use the default values: - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window. - For scale down: allow all pods to be removed in a 15s window.",
"items": { "items": {
"allOf": [ "allOf": [
{ {
@ -193,6 +193,14 @@
"description": "stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", "description": "stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).",
"format": "int32", "format": "int32",
"type": "integer" "type": "integer"
},
"tolerance": {
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity"
}
],
"description": "tolerance is the tolerance on the ratio between the current and desired metric value under which no updates are made to the desired number of replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not set, the default cluster-wide tolerance is applied (by default 10%).\n\nFor example, if autoscaling is configured with a memory consumption target of 100Mi, and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be triggered when the actual consumption falls below 95Mi or exceeds 101Mi.\n\nThis is an alpha field and requires enabling the HPAConfigurableTolerance feature gate."
} }
}, },
"type": "object" "type": "object"

View File

@ -36,3 +36,11 @@ const DefaultCPUUtilization = 80
// BehaviorSpecsAnnotation is the annotation which holds the HPA constraints specs // BehaviorSpecsAnnotation is the annotation which holds the HPA constraints specs
// when converting the `Behavior` field from autoscaling/v2beta2 // when converting the `Behavior` field from autoscaling/v2beta2
const BehaviorSpecsAnnotation = "autoscaling.alpha.kubernetes.io/behavior" const BehaviorSpecsAnnotation = "autoscaling.alpha.kubernetes.io/behavior"
// ToleranceScaleDownAnnotation is the annotation which holds the HPA tolerance specs
// when converting the `ScaleDown.Tolerance` field from autoscaling/v2
const ToleranceScaleDownAnnotation = "autoscaling.alpha.kubernetes.io/scale-down-tolerance"
// ToleranceScaleUpAnnotation is the annotation which holds the HPA tolerance specs
// when converting the `ScaleUp.Tolerance` field from autoscaling/v2
const ToleranceScaleUpAnnotation = "autoscaling.alpha.kubernetes.io/scale-up-tolerance"

View File

@ -16,8 +16,10 @@ limitations under the License.
package autoscaling package autoscaling
// DropRoundTripHorizontalPodAutoscalerAnnotations removes any annotations used to serialize round-tripped fields from later API versions, // DropRoundTripHorizontalPodAutoscalerAnnotations removes any annotations used to
// serialize round-tripped fields from HorizontalPodAutoscaler later API versions,
// and returns false if no changes were made and the original input object was returned. // and returns false if no changes were made and the original input object was returned.
//
// It should always be called when converting internal -> external versions, prior // It should always be called when converting internal -> external versions, prior
// to setting any of the custom annotations: // to setting any of the custom annotations:
// //
@ -34,12 +36,16 @@ package autoscaling
func DropRoundTripHorizontalPodAutoscalerAnnotations(in map[string]string) (out map[string]string, copied bool) { func DropRoundTripHorizontalPodAutoscalerAnnotations(in map[string]string) (out map[string]string, copied bool) {
_, hasMetricsSpecs := in[MetricSpecsAnnotation] _, hasMetricsSpecs := in[MetricSpecsAnnotation]
_, hasBehaviorSpecs := in[BehaviorSpecsAnnotation] _, hasBehaviorSpecs := in[BehaviorSpecsAnnotation]
_, hasToleranceScaleDown := in[ToleranceScaleDownAnnotation]
_, hasToleranceScaleUp := in[ToleranceScaleUpAnnotation]
_, hasMetricsStatuses := in[MetricStatusesAnnotation] _, hasMetricsStatuses := in[MetricStatusesAnnotation]
_, hasConditions := in[HorizontalPodAutoscalerConditionsAnnotation] _, hasConditions := in[HorizontalPodAutoscalerConditionsAnnotation]
if hasMetricsSpecs || hasBehaviorSpecs || hasMetricsStatuses || hasConditions { if hasMetricsSpecs || hasBehaviorSpecs || hasToleranceScaleDown || hasToleranceScaleUp || hasMetricsStatuses || hasConditions {
out = DeepCopyStringMap(in) out = DeepCopyStringMap(in)
delete(out, MetricSpecsAnnotation) delete(out, MetricSpecsAnnotation)
delete(out, BehaviorSpecsAnnotation) delete(out, BehaviorSpecsAnnotation)
delete(out, ToleranceScaleDownAnnotation)
delete(out, ToleranceScaleUpAnnotation)
delete(out, MetricStatusesAnnotation) delete(out, MetricStatusesAnnotation)
delete(out, HorizontalPodAutoscalerConditionsAnnotation) delete(out, HorizontalPodAutoscalerConditionsAnnotation)
return out, true return out, true

View File

@ -138,12 +138,18 @@ const (
DisabledPolicySelect ScalingPolicySelect = "Disabled" DisabledPolicySelect ScalingPolicySelect = "Disabled"
) )
// HPAScalingRules configures the scaling behavior for one direction. // HPAScalingRules configures the scaling behavior for one direction via
// These Rules are applied after calculating DesiredReplicas from metrics for the HPA. // scaling Policy Rules and a configurable metric tolerance.
//
// Scaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA.
// They can limit the scaling velocity by specifying scaling policies. // They can limit the scaling velocity by specifying scaling policies.
// They can prevent flapping by specifying the stabilization window, so that the // They can prevent flapping by specifying the stabilization window, so that the
// number of replicas is not set instantly, instead, the safest value from the stabilization // number of replicas is not set instantly, instead, the safest value from the stabilization
// window is chosen. // window is chosen.
//
// The tolerance is applied to the metric values and prevents scaling too
// eagerly for small metric variations. (Note that setting a tolerance requires
// enabling the alpha HPAConfigurableTolerance feature gate.)
type HPAScalingRules struct { type HPAScalingRules struct {
// StabilizationWindowSeconds is the number of seconds for which past recommendations should be // StabilizationWindowSeconds is the number of seconds for which past recommendations should be
// considered while scaling up or scaling down. // considered while scaling up or scaling down.
@ -157,10 +163,27 @@ type HPAScalingRules struct {
// If not set, the default value MaxPolicySelect is used. // If not set, the default value MaxPolicySelect is used.
// +optional // +optional
SelectPolicy *ScalingPolicySelect SelectPolicy *ScalingPolicySelect
// policies is a list of potential scaling polices which can used during scaling. // policies is a list of potential scaling polices which can be used during scaling.
// At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid // If not set, use the default values:
// - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window.
// - For scale down: allow all pods to be removed in a 15s window.
// +optional // +optional
Policies []HPAScalingPolicy Policies []HPAScalingPolicy
// tolerance is the tolerance on the ratio between the current and desired
// metric value under which no updates are made to the desired number of
// replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not
// set, the default cluster-wide tolerance is applied (by default 10%).
//
// For example, if autoscaling is configured with a memory consumption target of 100Mi,
// and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be
// triggered when the actual consumption falls below 95Mi or exceeds 101Mi.
//
// This is an alpha field and requires enabling the HPAConfigurableTolerance
// feature gate.
//
// +featureGate=HPAConfigurableTolerance
// +optional
Tolerance *resource.Quantity
} }
// HPAScalingPolicyType is the type of the policy which could be used while making scaling decisions. // HPAScalingPolicyType is the type of the policy which could be used while making scaling decisions.

View File

@ -91,9 +91,12 @@ func SetDefaults_HorizontalPodAutoscaler(obj *autoscalingv2.HorizontalPodAutosca
SetDefaults_HorizontalPodAutoscalerBehavior(obj) SetDefaults_HorizontalPodAutoscalerBehavior(obj)
} }
// SetDefaults_HorizontalPodAutoscalerBehavior fills the behavior if it is not null // SetDefaults_HorizontalPodAutoscalerBehavior fills the behavior if it contains
// at least one scaling rule policy (for scale-up or scale-down)
func SetDefaults_HorizontalPodAutoscalerBehavior(obj *autoscalingv2.HorizontalPodAutoscaler) { func SetDefaults_HorizontalPodAutoscalerBehavior(obj *autoscalingv2.HorizontalPodAutoscaler) {
// if behavior is specified, we should fill all the 'nil' values with the default ones // If behavior contains a scaling rule policy (either for scale-up, scale-down, or both), we
// should fill all the unset scaling policy fields (i.e. StabilizationWindowSeconds,
// SelectPolicy, Policies) with default values
if obj.Spec.Behavior != nil { if obj.Spec.Behavior != nil {
obj.Spec.Behavior.ScaleUp = GenerateHPAScaleUpRules(obj.Spec.Behavior.ScaleUp) obj.Spec.Behavior.ScaleUp = GenerateHPAScaleUpRules(obj.Spec.Behavior.ScaleUp)
obj.Spec.Behavior.ScaleDown = GenerateHPAScaleDownRules(obj.Spec.Behavior.ScaleDown) obj.Spec.Behavior.ScaleDown = GenerateHPAScaleDownRules(obj.Spec.Behavior.ScaleDown)
@ -129,5 +132,8 @@ func copyHPAScalingRules(from, to *autoscalingv2.HPAScalingRules) *autoscalingv2
if from.Policies != nil { if from.Policies != nil {
to.Policies = from.Policies to.Policies = from.Policies
} }
if from.Tolerance != nil {
to.Tolerance = from.Tolerance
}
return to return to
} }

View File

@ -20,8 +20,12 @@ import (
"reflect" "reflect"
"testing" "testing"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/features"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -132,14 +136,17 @@ func TestGenerateScaleUpRules(t *testing.T) {
rateUpPercentPeriodSeconds int32 rateUpPercentPeriodSeconds int32
stabilizationSeconds *int32 stabilizationSeconds *int32
selectPolicy *autoscalingv2.ScalingPolicySelect selectPolicy *autoscalingv2.ScalingPolicySelect
tolerance *resource.Quantity
expectedPolicies []autoscalingv2.HPAScalingPolicy expectedPolicies []autoscalingv2.HPAScalingPolicy
expectedStabilization *int32 expectedStabilization *int32
expectedSelectPolicy string expectedSelectPolicy string
expectedTolerance *resource.Quantity
annotation string annotation string
} }
maxPolicy := autoscalingv2.MaxChangePolicySelect maxPolicy := autoscalingv2.MaxChangePolicySelect
minPolicy := autoscalingv2.MinChangePolicySelect minPolicy := autoscalingv2.MinChangePolicySelect
sampleTolerance := resource.MustParse("0.5")
tests := []TestCase{ tests := []TestCase{
{ {
annotation: "Default values", annotation: "Default values",
@ -208,12 +215,25 @@ func TestGenerateScaleUpRules(t *testing.T) {
expectedStabilization: utilpointer.Int32(25), expectedStabilization: utilpointer.Int32(25),
expectedSelectPolicy: string(autoscalingv2.MaxChangePolicySelect), expectedSelectPolicy: string(autoscalingv2.MaxChangePolicySelect),
}, },
{
annotation: "Percent policy and tolerance is specified",
rateUpPercent: 7,
rateUpPercentPeriodSeconds: 10,
tolerance: &sampleTolerance,
expectedPolicies: []autoscalingv2.HPAScalingPolicy{
{Type: autoscalingv2.PercentScalingPolicy, Value: 7, PeriodSeconds: 10},
},
expectedStabilization: utilpointer.Int32(0),
expectedSelectPolicy: string(autoscalingv2.MaxChangePolicySelect),
expectedTolerance: &sampleTolerance,
},
} }
for _, tc := range tests { for _, tc := range tests {
t.Run(tc.annotation, func(t *testing.T) { t.Run(tc.annotation, func(t *testing.T) {
scaleUpRules := &autoscalingv2.HPAScalingRules{ scaleUpRules := &autoscalingv2.HPAScalingRules{
StabilizationWindowSeconds: tc.stabilizationSeconds, StabilizationWindowSeconds: tc.stabilizationSeconds,
SelectPolicy: tc.selectPolicy, SelectPolicy: tc.selectPolicy,
Tolerance: tc.tolerance,
} }
if tc.rateUpPods != 0 || tc.rateUpPodsPeriodSeconds != 0 { if tc.rateUpPods != 0 || tc.rateUpPodsPeriodSeconds != 0 {
scaleUpRules.Policies = append(scaleUpRules.Policies, autoscalingv2.HPAScalingPolicy{ scaleUpRules.Policies = append(scaleUpRules.Policies, autoscalingv2.HPAScalingPolicy{
@ -234,10 +254,138 @@ func TestGenerateScaleUpRules(t *testing.T) {
} }
assert.Equal(t, autoscalingv2.ScalingPolicySelect(tc.expectedSelectPolicy), *up.SelectPolicy) assert.Equal(t, autoscalingv2.ScalingPolicySelect(tc.expectedSelectPolicy), *up.SelectPolicy)
assert.Equal(t, tc.expectedTolerance, up.Tolerance)
}) })
} }
} }
func TestSetBehaviorDefaults(t *testing.T) {
sampleTolerance := resource.MustParse("0.5")
maxPolicy := autoscalingv2.MaxChangePolicySelect
policies := []autoscalingv2.HPAScalingPolicy{
{Type: autoscalingv2.PercentScalingPolicy, Value: 7, PeriodSeconds: 10},
}
type TestCase struct {
behavior *autoscalingv2.HorizontalPodAutoscalerBehavior
expectedBehavior *autoscalingv2.HorizontalPodAutoscalerBehavior
annotation string
}
tests := []TestCase{
{
annotation: "Nil behavior",
behavior: nil,
expectedBehavior: nil,
},
{
annotation: "Behavior with stabilizationWindowSeconds and tolerance",
behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{
ScaleUp: &autoscalingv2.HPAScalingRules{
StabilizationWindowSeconds: utilpointer.Int32(100),
Tolerance: &sampleTolerance,
},
},
expectedBehavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{
ScaleDown: &autoscalingv2.HPAScalingRules{
SelectPolicy: &maxPolicy,
Policies: []autoscalingv2.HPAScalingPolicy{
{Type: autoscalingv2.PercentScalingPolicy, Value: 100, PeriodSeconds: 15},
},
},
ScaleUp: &autoscalingv2.HPAScalingRules{
StabilizationWindowSeconds: utilpointer.Int32(100),
SelectPolicy: &maxPolicy,
Policies: []autoscalingv2.HPAScalingPolicy{
{Type: autoscalingv2.PodsScalingPolicy, Value: 4, PeriodSeconds: 15},
{Type: autoscalingv2.PercentScalingPolicy, Value: 100, PeriodSeconds: 15},
},
Tolerance: &sampleTolerance,
},
},
},
{
annotation: "Behavior with policy, without tolerance",
behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{
ScaleDown: &autoscalingv2.HPAScalingRules{
Policies: policies,
},
},
expectedBehavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{
ScaleDown: &autoscalingv2.HPAScalingRules{
SelectPolicy: &maxPolicy,
Policies: policies,
},
ScaleUp: &autoscalingv2.HPAScalingRules{
StabilizationWindowSeconds: utilpointer.Int32(0),
SelectPolicy: &maxPolicy,
Policies: []autoscalingv2.HPAScalingPolicy{
{Type: autoscalingv2.PodsScalingPolicy, Value: 4, PeriodSeconds: 15},
{Type: autoscalingv2.PercentScalingPolicy, Value: 100, PeriodSeconds: 15},
},
},
},
},
}
for _, tc := range tests {
t.Run(tc.annotation, func(t *testing.T) {
hpa := autoscalingv2.HorizontalPodAutoscaler{
Spec: autoscalingv2.HorizontalPodAutoscalerSpec{
Behavior: tc.behavior,
},
}
expectedHPA := autoscalingv2.HorizontalPodAutoscaler{
Spec: autoscalingv2.HorizontalPodAutoscalerSpec{
Behavior: tc.expectedBehavior,
},
}
SetDefaults_HorizontalPodAutoscalerBehavior(&hpa)
assert.Equal(t, expectedHPA, hpa)
})
}
}
func TestSetBehaviorDefaultsConfigurableToleranceEnabled(t *testing.T) {
// Enable HPAConfigurableTolerance feature gate.
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HPAConfigurableTolerance, true)
// Verify that the tolerance field is left unset.
maxPolicy := autoscalingv2.MaxChangePolicySelect
hpa := autoscalingv2.HorizontalPodAutoscaler{
Spec: autoscalingv2.HorizontalPodAutoscalerSpec{
Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{
ScaleUp: &autoscalingv2.HPAScalingRules{
StabilizationWindowSeconds: utilpointer.Int32(100),
},
},
},
}
expectedHPA := autoscalingv2.HorizontalPodAutoscaler{
Spec: autoscalingv2.HorizontalPodAutoscalerSpec{
Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{
ScaleDown: &autoscalingv2.HPAScalingRules{
SelectPolicy: &maxPolicy,
Policies: []autoscalingv2.HPAScalingPolicy{
{Type: autoscalingv2.PercentScalingPolicy, Value: 100, PeriodSeconds: 15},
},
},
ScaleUp: &autoscalingv2.HPAScalingRules{
StabilizationWindowSeconds: utilpointer.Int32(100),
SelectPolicy: &maxPolicy,
Policies: []autoscalingv2.HPAScalingPolicy{
{Type: autoscalingv2.PodsScalingPolicy, Value: 4, PeriodSeconds: 15},
{Type: autoscalingv2.PercentScalingPolicy, Value: 100, PeriodSeconds: 15},
},
},
},
},
}
SetDefaults_HorizontalPodAutoscalerBehavior(&hpa)
assert.Equal(t, expectedHPA, hpa)
}
func TestHorizontalPodAutoscalerAnnotations(t *testing.T) { func TestHorizontalPodAutoscalerAnnotations(t *testing.T) {
tests := []struct { tests := []struct {
hpa autoscalingv2.HorizontalPodAutoscaler hpa autoscalingv2.HorizontalPodAutoscaler

View File

@ -452,6 +452,7 @@ func autoConvert_v2_HPAScalingRules_To_autoscaling_HPAScalingRules(in *autoscali
out.StabilizationWindowSeconds = (*int32)(unsafe.Pointer(in.StabilizationWindowSeconds)) out.StabilizationWindowSeconds = (*int32)(unsafe.Pointer(in.StabilizationWindowSeconds))
out.SelectPolicy = (*autoscaling.ScalingPolicySelect)(unsafe.Pointer(in.SelectPolicy)) out.SelectPolicy = (*autoscaling.ScalingPolicySelect)(unsafe.Pointer(in.SelectPolicy))
out.Policies = *(*[]autoscaling.HPAScalingPolicy)(unsafe.Pointer(&in.Policies)) out.Policies = *(*[]autoscaling.HPAScalingPolicy)(unsafe.Pointer(&in.Policies))
out.Tolerance = (*resource.Quantity)(unsafe.Pointer(in.Tolerance))
return nil return nil
} }
@ -464,6 +465,7 @@ func autoConvert_autoscaling_HPAScalingRules_To_v2_HPAScalingRules(in *autoscali
out.StabilizationWindowSeconds = (*int32)(unsafe.Pointer(in.StabilizationWindowSeconds)) out.StabilizationWindowSeconds = (*int32)(unsafe.Pointer(in.StabilizationWindowSeconds))
out.SelectPolicy = (*autoscalingv2.ScalingPolicySelect)(unsafe.Pointer(in.SelectPolicy)) out.SelectPolicy = (*autoscalingv2.ScalingPolicySelect)(unsafe.Pointer(in.SelectPolicy))
out.Policies = *(*[]autoscalingv2.HPAScalingPolicy)(unsafe.Pointer(&in.Policies)) out.Policies = *(*[]autoscalingv2.HPAScalingPolicy)(unsafe.Pointer(&in.Policies))
out.Tolerance = (*resource.Quantity)(unsafe.Pointer(in.Tolerance))
return nil return nil
} }

View File

@ -17,8 +17,11 @@ limitations under the License.
package v2beta2 package v2beta2
import ( import (
"fmt"
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/conversion"
"k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/autoscaling"
) )
@ -27,8 +30,29 @@ func Convert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutosca
if err := autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler(in, out, s); err != nil { if err := autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler(in, out, s); err != nil {
return err return err
} }
// v2beta2 round-trips to internal without any serialized annotations, make sure any from other versions don't get serialized // Ensure old round-trips annotations are discarded
out.Annotations, _ = autoscaling.DropRoundTripHorizontalPodAutoscalerAnnotations(out.Annotations) annotations, copiedAnnotations := autoscaling.DropRoundTripHorizontalPodAutoscalerAnnotations(out.Annotations)
out.Annotations = annotations
behavior := in.Spec.Behavior
if behavior == nil {
return nil
}
// Save the tolerance fields in annotations for round-trip
if behavior.ScaleDown != nil && behavior.ScaleDown.Tolerance != nil {
if !copiedAnnotations {
copiedAnnotations = true
out.Annotations = autoscaling.DeepCopyStringMap(out.Annotations)
}
out.Annotations[autoscaling.ToleranceScaleDownAnnotation] = behavior.ScaleDown.Tolerance.String()
}
if behavior.ScaleUp != nil && behavior.ScaleUp.Tolerance != nil {
if !copiedAnnotations {
copiedAnnotations = true
out.Annotations = autoscaling.DeepCopyStringMap(out.Annotations)
}
out.Annotations[autoscaling.ToleranceScaleUpAnnotation] = behavior.ScaleUp.Tolerance.String()
}
return nil return nil
} }
@ -36,7 +60,44 @@ func Convert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutosca
if err := autoConvert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s); err != nil { if err := autoConvert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s); err != nil {
return err return err
} }
// v2beta2 round-trips to internal without any serialized annotations, make sure any from other versions don't get serialized // Restore the tolerance fields from annotations for round-trip
if tolerance, ok := out.Annotations[autoscaling.ToleranceScaleDownAnnotation]; ok {
if out.Spec.Behavior == nil {
out.Spec.Behavior = &autoscaling.HorizontalPodAutoscalerBehavior{}
}
if out.Spec.Behavior.ScaleDown == nil {
out.Spec.Behavior.ScaleDown = &autoscaling.HPAScalingRules{}
}
q, err := resource.ParseQuantity(tolerance)
if err != nil {
return fmt.Errorf("failed to parse annotation %q: %w", autoscaling.ToleranceScaleDownAnnotation, err)
}
out.Spec.Behavior.ScaleDown.Tolerance = &q
}
if tolerance, ok := out.Annotations[autoscaling.ToleranceScaleUpAnnotation]; ok {
if out.Spec.Behavior == nil {
out.Spec.Behavior = &autoscaling.HorizontalPodAutoscalerBehavior{}
}
if out.Spec.Behavior.ScaleUp == nil {
out.Spec.Behavior.ScaleUp = &autoscaling.HPAScalingRules{}
}
q, err := resource.ParseQuantity(tolerance)
if err != nil {
return fmt.Errorf("failed to parse annotation %q: %w", autoscaling.ToleranceScaleUpAnnotation, err)
}
out.Spec.Behavior.ScaleUp.Tolerance = &q
}
// Do not save round-trip annotations in internal resource
out.Annotations, _ = autoscaling.DropRoundTripHorizontalPodAutoscalerAnnotations(out.Annotations) out.Annotations, _ = autoscaling.DropRoundTripHorizontalPodAutoscalerAnnotations(out.Annotations)
return nil return nil
} }
func Convert_v2beta2_HPAScalingRules_To_autoscaling_HPAScalingRules(in *autoscalingv2beta2.HPAScalingRules, out *autoscaling.HPAScalingRules, s conversion.Scope) error {
// Tolerance field is handled in the HorizontalPodAutoscaler conversion function.
return autoConvert_v2beta2_HPAScalingRules_To_autoscaling_HPAScalingRules(in, out, s)
}
func Convert_autoscaling_HPAScalingRules_To_v2beta2_HPAScalingRules(in *autoscaling.HPAScalingRules, out *autoscalingv2beta2.HPAScalingRules, s conversion.Scope) error {
// Tolerance field is handled in the HorizontalPodAutoscaler conversion function.
return autoConvert_autoscaling_HPAScalingRules_To_v2beta2_HPAScalingRules(in, out, s)
}

View File

@ -0,0 +1,124 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v2beta2
import (
"testing"
"github.com/google/go-cmp/cmp"
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/apis/autoscaling"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/utils/ptr"
)
func TestConvertRoundTrip(t *testing.T) {
tolerance1 := resource.MustParse("0.1")
tolerance2 := resource.MustParse("0.2")
tests := []struct {
name string
internalHPA *autoscaling.HorizontalPodAutoscaler
}{
{
"Complete HPA with scale-up tolerance",
&autoscaling.HorizontalPodAutoscaler{
ObjectMeta: v1.ObjectMeta{
Name: "hpa",
Namespace: "hpa-ns",
Annotations: map[string]string{"key": "value"},
},
Spec: autoscaling.HorizontalPodAutoscalerSpec{
MinReplicas: ptr.To(int32(1)),
MaxReplicas: 3,
Metrics: []autoscaling.MetricSpec{
{
Type: autoscaling.ResourceMetricSourceType,
Resource: &autoscaling.ResourceMetricSource{
Name: api.ResourceCPU,
Target: autoscaling.MetricTarget{
Type: autoscaling.AverageValueMetricType,
AverageValue: resource.NewMilliQuantity(300, resource.DecimalSI),
},
},
},
},
Behavior: &autoscaling.HorizontalPodAutoscalerBehavior{
ScaleUp: &autoscaling.HPAScalingRules{
Policies: []autoscaling.HPAScalingPolicy{
{
Type: autoscaling.PodsScalingPolicy,
Value: 1,
PeriodSeconds: 2,
},
},
Tolerance: &tolerance1,
},
},
},
},
},
{
"Scale-down and scale-up tolerances",
&autoscaling.HorizontalPodAutoscaler{
Spec: autoscaling.HorizontalPodAutoscalerSpec{
MinReplicas: ptr.To(int32(1)),
MaxReplicas: 3,
Behavior: &autoscaling.HorizontalPodAutoscalerBehavior{
ScaleUp: &autoscaling.HPAScalingRules{
Tolerance: &tolerance1,
},
ScaleDown: &autoscaling.HPAScalingRules{
Tolerance: &tolerance2,
},
},
},
},
},
{
"Scale-down tolerance only",
&autoscaling.HorizontalPodAutoscaler{
Spec: autoscaling.HorizontalPodAutoscalerSpec{
MinReplicas: ptr.To(int32(1)),
MaxReplicas: 3,
Behavior: &autoscaling.HorizontalPodAutoscalerBehavior{
ScaleDown: &autoscaling.HPAScalingRules{
Tolerance: &tolerance2,
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
v2beta2HPA := &autoscalingv2beta2.HorizontalPodAutoscaler{}
if err := Convert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler(tt.internalHPA, v2beta2HPA, nil); err != nil {
t.Errorf("Convert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler() error = %v", err)
}
roundtripHPA := &autoscaling.HorizontalPodAutoscaler{}
if err := Convert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(v2beta2HPA, roundtripHPA, nil); err != nil {
t.Errorf("Convert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler() error = %v", err)
}
if !apiequality.Semantic.DeepEqual(tt.internalHPA, roundtripHPA) {
t.Errorf("HPA is not equivalent after round-trip: mismatch (-want +got):\n%s", cmp.Diff(tt.internalHPA, roundtripHPA))
}
})
}
}

View File

@ -101,16 +101,6 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil { }); err != nil {
return err return err
} }
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.HPAScalingRules)(nil), (*autoscaling.HPAScalingRules)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_HPAScalingRules_To_autoscaling_HPAScalingRules(a.(*autoscalingv2beta2.HPAScalingRules), b.(*autoscaling.HPAScalingRules), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscaling.HPAScalingRules)(nil), (*autoscalingv2beta2.HPAScalingRules)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HPAScalingRules_To_v2beta2_HPAScalingRules(a.(*autoscaling.HPAScalingRules), b.(*autoscalingv2beta2.HPAScalingRules), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.HorizontalPodAutoscalerBehavior)(nil), (*autoscaling.HorizontalPodAutoscalerBehavior)(nil), func(a, b interface{}, scope conversion.Scope) error { if err := s.AddGeneratedConversionFunc((*autoscalingv2beta2.HorizontalPodAutoscalerBehavior)(nil), (*autoscaling.HorizontalPodAutoscalerBehavior)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_HorizontalPodAutoscalerBehavior_To_autoscaling_HorizontalPodAutoscalerBehavior(a.(*autoscalingv2beta2.HorizontalPodAutoscalerBehavior), b.(*autoscaling.HorizontalPodAutoscalerBehavior), scope) return Convert_v2beta2_HorizontalPodAutoscalerBehavior_To_autoscaling_HorizontalPodAutoscalerBehavior(a.(*autoscalingv2beta2.HorizontalPodAutoscalerBehavior), b.(*autoscaling.HorizontalPodAutoscalerBehavior), scope)
}); err != nil { }); err != nil {
@ -271,11 +261,21 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil { }); err != nil {
return err return err
} }
if err := s.AddConversionFunc((*autoscaling.HPAScalingRules)(nil), (*autoscalingv2beta2.HPAScalingRules)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HPAScalingRules_To_v2beta2_HPAScalingRules(a.(*autoscaling.HPAScalingRules), b.(*autoscalingv2beta2.HPAScalingRules), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.HorizontalPodAutoscaler)(nil), (*autoscalingv2beta2.HorizontalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error { if err := s.AddConversionFunc((*autoscaling.HorizontalPodAutoscaler)(nil), (*autoscalingv2beta2.HorizontalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler(a.(*autoscaling.HorizontalPodAutoscaler), b.(*autoscalingv2beta2.HorizontalPodAutoscaler), scope) return Convert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler(a.(*autoscaling.HorizontalPodAutoscaler), b.(*autoscalingv2beta2.HorizontalPodAutoscaler), scope)
}); err != nil { }); err != nil {
return err return err
} }
if err := s.AddConversionFunc((*autoscalingv2beta2.HPAScalingRules)(nil), (*autoscaling.HPAScalingRules)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_HPAScalingRules_To_autoscaling_HPAScalingRules(a.(*autoscalingv2beta2.HPAScalingRules), b.(*autoscaling.HPAScalingRules), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscalingv2beta2.HorizontalPodAutoscaler)(nil), (*autoscaling.HorizontalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error { if err := s.AddConversionFunc((*autoscalingv2beta2.HorizontalPodAutoscaler)(nil), (*autoscaling.HorizontalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(a.(*autoscalingv2beta2.HorizontalPodAutoscaler), b.(*autoscaling.HorizontalPodAutoscaler), scope) return Convert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(a.(*autoscalingv2beta2.HorizontalPodAutoscaler), b.(*autoscaling.HorizontalPodAutoscaler), scope)
}); err != nil { }); err != nil {
@ -455,23 +455,14 @@ func autoConvert_v2beta2_HPAScalingRules_To_autoscaling_HPAScalingRules(in *auto
return nil return nil
} }
// Convert_v2beta2_HPAScalingRules_To_autoscaling_HPAScalingRules is an autogenerated conversion function.
func Convert_v2beta2_HPAScalingRules_To_autoscaling_HPAScalingRules(in *autoscalingv2beta2.HPAScalingRules, out *autoscaling.HPAScalingRules, s conversion.Scope) error {
return autoConvert_v2beta2_HPAScalingRules_To_autoscaling_HPAScalingRules(in, out, s)
}
func autoConvert_autoscaling_HPAScalingRules_To_v2beta2_HPAScalingRules(in *autoscaling.HPAScalingRules, out *autoscalingv2beta2.HPAScalingRules, s conversion.Scope) error { func autoConvert_autoscaling_HPAScalingRules_To_v2beta2_HPAScalingRules(in *autoscaling.HPAScalingRules, out *autoscalingv2beta2.HPAScalingRules, s conversion.Scope) error {
out.StabilizationWindowSeconds = (*int32)(unsafe.Pointer(in.StabilizationWindowSeconds)) out.StabilizationWindowSeconds = (*int32)(unsafe.Pointer(in.StabilizationWindowSeconds))
out.SelectPolicy = (*autoscalingv2beta2.ScalingPolicySelect)(unsafe.Pointer(in.SelectPolicy)) out.SelectPolicy = (*autoscalingv2beta2.ScalingPolicySelect)(unsafe.Pointer(in.SelectPolicy))
out.Policies = *(*[]autoscalingv2beta2.HPAScalingPolicy)(unsafe.Pointer(&in.Policies)) out.Policies = *(*[]autoscalingv2beta2.HPAScalingPolicy)(unsafe.Pointer(&in.Policies))
// WARNING: in.Tolerance requires manual conversion: does not exist in peer-type
return nil return nil
} }
// Convert_autoscaling_HPAScalingRules_To_v2beta2_HPAScalingRules is an autogenerated conversion function.
func Convert_autoscaling_HPAScalingRules_To_v2beta2_HPAScalingRules(in *autoscaling.HPAScalingRules, out *autoscalingv2beta2.HPAScalingRules, s conversion.Scope) error {
return autoConvert_autoscaling_HPAScalingRules_To_v2beta2_HPAScalingRules(in, out, s)
}
func autoConvert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *autoscalingv2beta2.HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { func autoConvert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *autoscalingv2beta2.HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta out.ObjectMeta = in.ObjectMeta
if err := Convert_v2beta2_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { if err := Convert_v2beta2_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil {
@ -495,8 +486,24 @@ func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAut
} }
func autoConvert_v2beta2_HorizontalPodAutoscalerBehavior_To_autoscaling_HorizontalPodAutoscalerBehavior(in *autoscalingv2beta2.HorizontalPodAutoscalerBehavior, out *autoscaling.HorizontalPodAutoscalerBehavior, s conversion.Scope) error { func autoConvert_v2beta2_HorizontalPodAutoscalerBehavior_To_autoscaling_HorizontalPodAutoscalerBehavior(in *autoscalingv2beta2.HorizontalPodAutoscalerBehavior, out *autoscaling.HorizontalPodAutoscalerBehavior, s conversion.Scope) error {
out.ScaleUp = (*autoscaling.HPAScalingRules)(unsafe.Pointer(in.ScaleUp)) if in.ScaleUp != nil {
out.ScaleDown = (*autoscaling.HPAScalingRules)(unsafe.Pointer(in.ScaleDown)) in, out := &in.ScaleUp, &out.ScaleUp
*out = new(autoscaling.HPAScalingRules)
if err := Convert_v2beta2_HPAScalingRules_To_autoscaling_HPAScalingRules(*in, *out, s); err != nil {
return err
}
} else {
out.ScaleUp = nil
}
if in.ScaleDown != nil {
in, out := &in.ScaleDown, &out.ScaleDown
*out = new(autoscaling.HPAScalingRules)
if err := Convert_v2beta2_HPAScalingRules_To_autoscaling_HPAScalingRules(*in, *out, s); err != nil {
return err
}
} else {
out.ScaleDown = nil
}
return nil return nil
} }
@ -506,8 +513,24 @@ func Convert_v2beta2_HorizontalPodAutoscalerBehavior_To_autoscaling_HorizontalPo
} }
func autoConvert_autoscaling_HorizontalPodAutoscalerBehavior_To_v2beta2_HorizontalPodAutoscalerBehavior(in *autoscaling.HorizontalPodAutoscalerBehavior, out *autoscalingv2beta2.HorizontalPodAutoscalerBehavior, s conversion.Scope) error { func autoConvert_autoscaling_HorizontalPodAutoscalerBehavior_To_v2beta2_HorizontalPodAutoscalerBehavior(in *autoscaling.HorizontalPodAutoscalerBehavior, out *autoscalingv2beta2.HorizontalPodAutoscalerBehavior, s conversion.Scope) error {
out.ScaleUp = (*autoscalingv2beta2.HPAScalingRules)(unsafe.Pointer(in.ScaleUp)) if in.ScaleUp != nil {
out.ScaleDown = (*autoscalingv2beta2.HPAScalingRules)(unsafe.Pointer(in.ScaleDown)) in, out := &in.ScaleUp, &out.ScaleUp
*out = new(autoscalingv2beta2.HPAScalingRules)
if err := Convert_autoscaling_HPAScalingRules_To_v2beta2_HPAScalingRules(*in, *out, s); err != nil {
return err
}
} else {
out.ScaleUp = nil
}
if in.ScaleDown != nil {
in, out := &in.ScaleDown, &out.ScaleDown
*out = new(autoscalingv2beta2.HPAScalingRules)
if err := Convert_autoscaling_HPAScalingRules_To_v2beta2_HPAScalingRules(*in, *out, s); err != nil {
return err
}
} else {
out.ScaleDown = nil
}
return nil return nil
} }
@ -603,7 +626,15 @@ func autoConvert_v2beta2_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPo
} else { } else {
out.Metrics = nil out.Metrics = nil
} }
out.Behavior = (*autoscaling.HorizontalPodAutoscalerBehavior)(unsafe.Pointer(in.Behavior)) if in.Behavior != nil {
in, out := &in.Behavior, &out.Behavior
*out = new(autoscaling.HorizontalPodAutoscalerBehavior)
if err := Convert_v2beta2_HorizontalPodAutoscalerBehavior_To_autoscaling_HorizontalPodAutoscalerBehavior(*in, *out, s); err != nil {
return err
}
} else {
out.Behavior = nil
}
return nil return nil
} }
@ -629,7 +660,15 @@ func autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v2beta2_HorizontalPo
} else { } else {
out.Metrics = nil out.Metrics = nil
} }
out.Behavior = (*autoscalingv2beta2.HorizontalPodAutoscalerBehavior)(unsafe.Pointer(in.Behavior)) if in.Behavior != nil {
in, out := &in.Behavior, &out.Behavior
*out = new(autoscalingv2beta2.HorizontalPodAutoscalerBehavior)
if err := Convert_autoscaling_HorizontalPodAutoscalerBehavior_To_v2beta2_HorizontalPodAutoscalerBehavior(*in, *out, s); err != nil {
return err
}
} else {
out.Behavior = nil
}
return nil return nil
} }

View File

@ -23,11 +23,9 @@ import (
pathvalidation "k8s.io/apimachinery/pkg/api/validation/path" pathvalidation "k8s.io/apimachinery/pkg/api/validation/path"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/validation/field"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/autoscaling"
corevalidation "k8s.io/kubernetes/pkg/apis/core/v1/validation" corevalidation "k8s.io/kubernetes/pkg/apis/core/v1/validation"
apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" apivalidation "k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/kubernetes/pkg/features"
) )
const ( const (
@ -53,12 +51,12 @@ func ValidateScale(scale *autoscaling.Scale) field.ErrorList {
// Prefix indicates this name will be used as part of generation, in which case trailing dashes are allowed. // Prefix indicates this name will be used as part of generation, in which case trailing dashes are allowed.
var ValidateHorizontalPodAutoscalerName = apivalidation.ValidateReplicationControllerName var ValidateHorizontalPodAutoscalerName = apivalidation.ValidateReplicationControllerName
func validateHorizontalPodAutoscalerSpec(autoscaler autoscaling.HorizontalPodAutoscalerSpec, fldPath *field.Path, minReplicasLowerBound int32) field.ErrorList { func validateHorizontalPodAutoscalerSpec(autoscaler autoscaling.HorizontalPodAutoscalerSpec, fldPath *field.Path, opts HorizontalPodAutoscalerSpecValidationOptions) field.ErrorList {
allErrs := field.ErrorList{} allErrs := field.ErrorList{}
if autoscaler.MinReplicas != nil && *autoscaler.MinReplicas < minReplicasLowerBound { if autoscaler.MinReplicas != nil && *autoscaler.MinReplicas < opts.MinReplicasLowerBound {
allErrs = append(allErrs, field.Invalid(fldPath.Child("minReplicas"), *autoscaler.MinReplicas, allErrs = append(allErrs, field.Invalid(fldPath.Child("minReplicas"), *autoscaler.MinReplicas,
fmt.Sprintf("must be greater than or equal to %d", minReplicasLowerBound))) fmt.Sprintf("must be greater than or equal to %d", opts.MinReplicasLowerBound)))
} }
if autoscaler.MaxReplicas < 1 { if autoscaler.MaxReplicas < 1 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("maxReplicas"), autoscaler.MaxReplicas, "must be greater than 0")) allErrs = append(allErrs, field.Invalid(fldPath.Child("maxReplicas"), autoscaler.MaxReplicas, "must be greater than 0"))
@ -72,7 +70,7 @@ func validateHorizontalPodAutoscalerSpec(autoscaler autoscaling.HorizontalPodAut
if refErrs := validateMetrics(autoscaler.Metrics, fldPath.Child("metrics"), autoscaler.MinReplicas); len(refErrs) > 0 { if refErrs := validateMetrics(autoscaler.Metrics, fldPath.Child("metrics"), autoscaler.MinReplicas); len(refErrs) > 0 {
allErrs = append(allErrs, refErrs...) allErrs = append(allErrs, refErrs...)
} }
if refErrs := validateBehavior(autoscaler.Behavior, fldPath.Child("behavior")); len(refErrs) > 0 { if refErrs := validateBehavior(autoscaler.Behavior, fldPath.Child("behavior"), opts); len(refErrs) > 0 {
allErrs = append(allErrs, refErrs...) allErrs = append(allErrs, refErrs...)
} }
return allErrs return allErrs
@ -103,38 +101,17 @@ func ValidateCrossVersionObjectReference(ref autoscaling.CrossVersionObjectRefer
// ValidateHorizontalPodAutoscaler validates a HorizontalPodAutoscaler and returns an // ValidateHorizontalPodAutoscaler validates a HorizontalPodAutoscaler and returns an
// ErrorList with any errors. // ErrorList with any errors.
func ValidateHorizontalPodAutoscaler(autoscaler *autoscaling.HorizontalPodAutoscaler) field.ErrorList { func ValidateHorizontalPodAutoscaler(autoscaler *autoscaling.HorizontalPodAutoscaler, opts HorizontalPodAutoscalerSpecValidationOptions) field.ErrorList {
allErrs := apivalidation.ValidateObjectMeta(&autoscaler.ObjectMeta, true, ValidateHorizontalPodAutoscalerName, field.NewPath("metadata")) allErrs := apivalidation.ValidateObjectMeta(&autoscaler.ObjectMeta, true, ValidateHorizontalPodAutoscalerName, field.NewPath("metadata"))
allErrs = append(allErrs, validateHorizontalPodAutoscalerSpec(autoscaler.Spec, field.NewPath("spec"), opts)...)
// MinReplicasLowerBound represents a minimum value for minReplicas
// 0 when HPA scale-to-zero feature is enabled
var minReplicasLowerBound int32
if utilfeature.DefaultFeatureGate.Enabled(features.HPAScaleToZero) {
minReplicasLowerBound = 0
} else {
minReplicasLowerBound = 1
}
allErrs = append(allErrs, validateHorizontalPodAutoscalerSpec(autoscaler.Spec, field.NewPath("spec"), minReplicasLowerBound)...)
return allErrs return allErrs
} }
// ValidateHorizontalPodAutoscalerUpdate validates an update to a HorizontalPodAutoscaler and returns an // ValidateHorizontalPodAutoscalerUpdate validates an update to a HorizontalPodAutoscaler and returns an
// ErrorList with any errors. // ErrorList with any errors.
func ValidateHorizontalPodAutoscalerUpdate(newAutoscaler, oldAutoscaler *autoscaling.HorizontalPodAutoscaler) field.ErrorList { func ValidateHorizontalPodAutoscalerUpdate(newAutoscaler, oldAutoscaler *autoscaling.HorizontalPodAutoscaler, opts HorizontalPodAutoscalerSpecValidationOptions) field.ErrorList {
allErrs := apivalidation.ValidateObjectMetaUpdate(&newAutoscaler.ObjectMeta, &oldAutoscaler.ObjectMeta, field.NewPath("metadata")) allErrs := apivalidation.ValidateObjectMetaUpdate(&newAutoscaler.ObjectMeta, &oldAutoscaler.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, validateHorizontalPodAutoscalerSpec(newAutoscaler.Spec, field.NewPath("spec"), opts)...)
// minReplicasLowerBound represents a minimum value for minReplicas
// 0 when HPA scale-to-zero feature is enabled or HPA object already has minReplicas=0
var minReplicasLowerBound int32
if utilfeature.DefaultFeatureGate.Enabled(features.HPAScaleToZero) || (oldAutoscaler.Spec.MinReplicas != nil && *oldAutoscaler.Spec.MinReplicas == 0) {
minReplicasLowerBound = 0
} else {
minReplicasLowerBound = 1
}
allErrs = append(allErrs, validateHorizontalPodAutoscalerSpec(newAutoscaler.Spec, field.NewPath("spec"), minReplicasLowerBound)...)
return allErrs return allErrs
} }
@ -148,6 +125,13 @@ func ValidateHorizontalPodAutoscalerStatusUpdate(newAutoscaler, oldAutoscaler *a
return allErrs return allErrs
} }
// HorizontalPodAutoscalerSpecValidationOptions contains the different settings for
// HorizontalPodAutoscaler spec validation.
type HorizontalPodAutoscalerSpecValidationOptions struct {
// The minimum value for minReplicas.
MinReplicasLowerBound int32
}
func validateMetrics(metrics []autoscaling.MetricSpec, fldPath *field.Path, minReplicas *int32) field.ErrorList { func validateMetrics(metrics []autoscaling.MetricSpec, fldPath *field.Path, minReplicas *int32) field.ErrorList {
allErrs := field.ErrorList{} allErrs := field.ErrorList{}
hasObjectMetrics := false hasObjectMetrics := false
@ -175,13 +159,13 @@ func validateMetrics(metrics []autoscaling.MetricSpec, fldPath *field.Path, minR
return allErrs return allErrs
} }
func validateBehavior(behavior *autoscaling.HorizontalPodAutoscalerBehavior, fldPath *field.Path) field.ErrorList { func validateBehavior(behavior *autoscaling.HorizontalPodAutoscalerBehavior, fldPath *field.Path, opts HorizontalPodAutoscalerSpecValidationOptions) field.ErrorList {
allErrs := field.ErrorList{} allErrs := field.ErrorList{}
if behavior != nil { if behavior != nil {
if scaleUpErrs := validateScalingRules(behavior.ScaleUp, fldPath.Child("scaleUp")); len(scaleUpErrs) > 0 { if scaleUpErrs := validateScalingRules(behavior.ScaleUp, fldPath.Child("scaleUp"), opts); len(scaleUpErrs) > 0 {
allErrs = append(allErrs, scaleUpErrs...) allErrs = append(allErrs, scaleUpErrs...)
} }
if scaleDownErrs := validateScalingRules(behavior.ScaleDown, fldPath.Child("scaleDown")); len(scaleDownErrs) > 0 { if scaleDownErrs := validateScalingRules(behavior.ScaleDown, fldPath.Child("scaleDown"), opts); len(scaleDownErrs) > 0 {
allErrs = append(allErrs, scaleDownErrs...) allErrs = append(allErrs, scaleDownErrs...)
} }
} }
@ -191,7 +175,7 @@ func validateBehavior(behavior *autoscaling.HorizontalPodAutoscalerBehavior, fld
var validSelectPolicyTypes = sets.NewString(string(autoscaling.MaxPolicySelect), string(autoscaling.MinPolicySelect), string(autoscaling.DisabledPolicySelect)) var validSelectPolicyTypes = sets.NewString(string(autoscaling.MaxPolicySelect), string(autoscaling.MinPolicySelect), string(autoscaling.DisabledPolicySelect))
var validSelectPolicyTypesList = validSelectPolicyTypes.List() var validSelectPolicyTypesList = validSelectPolicyTypes.List()
func validateScalingRules(rules *autoscaling.HPAScalingRules, fldPath *field.Path) field.ErrorList { func validateScalingRules(rules *autoscaling.HPAScalingRules, fldPath *field.Path, opts HorizontalPodAutoscalerSpecValidationOptions) field.ErrorList {
allErrs := field.ErrorList{} allErrs := field.ErrorList{}
if rules != nil { if rules != nil {
if rules.StabilizationWindowSeconds != nil && *rules.StabilizationWindowSeconds < 0 { if rules.StabilizationWindowSeconds != nil && *rules.StabilizationWindowSeconds < 0 {
@ -214,6 +198,9 @@ func validateScalingRules(rules *autoscaling.HPAScalingRules, fldPath *field.Pat
allErrs = append(allErrs, policyErrs...) allErrs = append(allErrs, policyErrs...)
} }
} }
if rules.Tolerance != nil {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeQuantity(*rules.Tolerance, fldPath.Child("tolerance"))...)
}
} }
return allErrs return allErrs
} }

View File

@ -22,12 +22,19 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/autoscaling"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/features"
utilpointer "k8s.io/utils/pointer" utilpointer "k8s.io/utils/pointer"
"k8s.io/utils/ptr"
)
var (
hpaSpecValidationOpts = HorizontalPodAutoscalerSpecValidationOptions{
MinReplicasLowerBound: 1,
}
hpaScaleToZeroSpecValidationOpts = HorizontalPodAutoscalerSpecValidationOptions{
MinReplicasLowerBound: 0,
}
) )
func TestValidateScale(t *testing.T) { func TestValidateScale(t *testing.T) {
@ -162,7 +169,7 @@ func TestValidateBehavior(t *testing.T) {
}} }}
for _, behavior := range successCases { for _, behavior := range successCases {
hpa := prepareHPAWithBehavior(behavior) hpa := prepareHPAWithBehavior(behavior)
if errs := ValidateHorizontalPodAutoscaler(&hpa); len(errs) != 0 { if errs := ValidateHorizontalPodAutoscaler(&hpa, hpaSpecValidationOpts); len(errs) != 0 {
t.Errorf("expected success: %v", errs) t.Errorf("expected success: %v", errs)
} }
} }
@ -356,7 +363,7 @@ func TestValidateBehavior(t *testing.T) {
}} }}
for _, c := range errorCases { for _, c := range errorCases {
hpa := prepareHPAWithBehavior(c.behavior) hpa := prepareHPAWithBehavior(c.behavior)
if errs := ValidateHorizontalPodAutoscaler(&hpa); len(errs) == 0 { if errs := ValidateHorizontalPodAutoscaler(&hpa, hpaSpecValidationOpts); len(errs) == 0 {
t.Errorf("expected failure for %s", c.msg) t.Errorf("expected failure for %s", c.msg)
} else if !strings.Contains(errs[0].Error(), c.msg) { } else if !strings.Contains(errs[0].Error(), c.msg) {
t.Errorf("unexpected error: %v, expected: %s", errs[0], c.msg) t.Errorf("unexpected error: %v, expected: %s", errs[0], c.msg)
@ -367,8 +374,9 @@ func TestValidateBehavior(t *testing.T) {
func prepareHPAWithBehavior(b autoscaling.HorizontalPodAutoscalerBehavior) autoscaling.HorizontalPodAutoscaler { func prepareHPAWithBehavior(b autoscaling.HorizontalPodAutoscalerBehavior) autoscaling.HorizontalPodAutoscaler {
return autoscaling.HorizontalPodAutoscaler{ return autoscaling.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "myautoscaler", Name: "myautoscaler",
Namespace: metav1.NamespaceDefault, Namespace: metav1.NamespaceDefault,
ResourceVersion: "1",
}, },
Spec: autoscaling.HorizontalPodAutoscalerSpec{ Spec: autoscaling.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscaling.CrossVersionObjectReference{ ScaleTargetRef: autoscaling.CrossVersionObjectReference{
@ -613,7 +621,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
}, },
}} }}
for _, successCase := range successCases { for _, successCase := range successCases {
if errs := ValidateHorizontalPodAutoscaler(&successCase); len(errs) != 0 { if errs := ValidateHorizontalPodAutoscaler(&successCase, hpaSpecValidationOpts); len(errs) != 0 {
t.Errorf("expected success: %v", errs) t.Errorf("expected success: %v", errs)
} }
} }
@ -1417,7 +1425,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
} }
for _, c := range errorCases { for _, c := range errorCases {
errs := ValidateHorizontalPodAutoscaler(&c.horizontalPodAutoscaler) errs := ValidateHorizontalPodAutoscaler(&c.horizontalPodAutoscaler, hpaSpecValidationOpts)
if len(errs) == 0 { if len(errs) == 0 {
t.Errorf("expected failure for %q", c.msg) t.Errorf("expected failure for %q", c.msg)
} else if !strings.Contains(errs[0].Error(), c.msg) { } else if !strings.Contains(errs[0].Error(), c.msg) {
@ -1488,7 +1496,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
MinReplicas: utilpointer.Int32(1), MinReplicas: utilpointer.Int32(1),
MaxReplicas: 5, Metrics: []autoscaling.MetricSpec{spec}, MaxReplicas: 5, Metrics: []autoscaling.MetricSpec{spec},
}, },
}) }, hpaSpecValidationOpts)
expectedMsg := "must populate information for the given metric source" expectedMsg := "must populate information for the given metric source"
@ -1568,26 +1576,20 @@ func prepareMinReplicasCases(t *testing.T, minReplicas int32) []autoscaling.Hori
} }
func TestValidateHorizontalPodAutoscalerScaleToZeroEnabled(t *testing.T) { func TestValidateHorizontalPodAutoscalerScaleToZeroEnabled(t *testing.T) {
// Enable HPAScaleToZero feature gate.
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HPAScaleToZero, true)
zeroMinReplicasCases := prepareMinReplicasCases(t, 0) zeroMinReplicasCases := prepareMinReplicasCases(t, 0)
for _, successCase := range zeroMinReplicasCases { for _, successCase := range zeroMinReplicasCases {
if errs := ValidateHorizontalPodAutoscaler(&successCase); len(errs) != 0 { if errs := ValidateHorizontalPodAutoscaler(&successCase, hpaScaleToZeroSpecValidationOpts); len(errs) != 0 {
t.Errorf("expected success: %v", errs) t.Errorf("expected success: %v", errs)
} }
} }
} }
func TestValidateHorizontalPodAutoscalerScaleToZeroDisabled(t *testing.T) { func TestValidateHorizontalPodAutoscalerScaleToZeroDisabled(t *testing.T) {
// Disable HPAScaleToZero feature gate.
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HPAScaleToZero, false)
zeroMinReplicasCases := prepareMinReplicasCases(t, 0) zeroMinReplicasCases := prepareMinReplicasCases(t, 0)
errorMsg := "must be greater than or equal to 1" errorMsg := "must be greater than or equal to 1"
for _, errorCase := range zeroMinReplicasCases { for _, errorCase := range zeroMinReplicasCases {
errs := ValidateHorizontalPodAutoscaler(&errorCase) errs := ValidateHorizontalPodAutoscaler(&errorCase, hpaSpecValidationOpts)
if len(errs) == 0 { if len(errs) == 0 {
t.Errorf("expected failure for %q", errorMsg) t.Errorf("expected failure for %q", errorMsg)
} else if !strings.Contains(errs[0].Error(), errorMsg) { } else if !strings.Contains(errs[0].Error(), errorMsg) {
@ -1599,43 +1601,37 @@ func TestValidateHorizontalPodAutoscalerScaleToZeroDisabled(t *testing.T) {
for _, successCase := range nonZeroMinReplicasCases { for _, successCase := range nonZeroMinReplicasCases {
successCase.Spec.MinReplicas = utilpointer.Int32(1) successCase.Spec.MinReplicas = utilpointer.Int32(1)
if errs := ValidateHorizontalPodAutoscaler(&successCase); len(errs) != 0 { if errs := ValidateHorizontalPodAutoscaler(&successCase, hpaSpecValidationOpts); len(errs) != 0 {
t.Errorf("expected success: %v", errs) t.Errorf("expected success: %v", errs)
} }
} }
} }
func TestValidateHorizontalPodAutoscalerUpdateScaleToZeroEnabled(t *testing.T) { func TestValidateHorizontalPodAutoscalerUpdateScaleToZeroEnabled(t *testing.T) {
// Enable HPAScaleToZero feature gate.
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HPAScaleToZero, true)
zeroMinReplicasCases := prepareMinReplicasCases(t, 0) zeroMinReplicasCases := prepareMinReplicasCases(t, 0)
nonZeroMinReplicasCases := prepareMinReplicasCases(t, 1) nonZeroMinReplicasCases := prepareMinReplicasCases(t, 1)
for i, zeroCase := range zeroMinReplicasCases { for i, zeroCase := range zeroMinReplicasCases {
nonZeroCase := nonZeroMinReplicasCases[i] nonZeroCase := nonZeroMinReplicasCases[i]
if errs := ValidateHorizontalPodAutoscalerUpdate(&nonZeroCase, &zeroCase); len(errs) != 0 { if errs := ValidateHorizontalPodAutoscalerUpdate(&nonZeroCase, &zeroCase, hpaScaleToZeroSpecValidationOpts); len(errs) != 0 {
t.Errorf("expected success: %v", errs) t.Errorf("expected success: %v", errs)
} }
if errs := ValidateHorizontalPodAutoscalerUpdate(&zeroCase, &nonZeroCase); len(errs) != 0 { if errs := ValidateHorizontalPodAutoscalerUpdate(&zeroCase, &nonZeroCase, hpaScaleToZeroSpecValidationOpts); len(errs) != 0 {
t.Errorf("expected success: %v", errs) t.Errorf("expected success: %v", errs)
} }
} }
} }
func TestValidateHorizontalPodAutoscalerScaleToZeroUpdateDisabled(t *testing.T) { func TestValidateHorizontalPodAutoscalerScaleToZeroUpdateDisabled(t *testing.T) {
// Disable HPAScaleToZero feature gate.
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HPAScaleToZero, false)
zeroMinReplicasCases := prepareMinReplicasCases(t, 0) zeroMinReplicasCases := prepareMinReplicasCases(t, 0)
nonZeroMinReplicasCases := prepareMinReplicasCases(t, 1) nonZeroMinReplicasCases := prepareMinReplicasCases(t, 1)
errorMsg := "must be greater than or equal to 1" errorMsg := "must be greater than or equal to 1"
for i, zeroCase := range zeroMinReplicasCases { for i, zeroCase := range zeroMinReplicasCases {
nonZeroCase := nonZeroMinReplicasCases[i] nonZeroCase := nonZeroMinReplicasCases[i]
errs := ValidateHorizontalPodAutoscalerUpdate(&zeroCase, &nonZeroCase) errs := ValidateHorizontalPodAutoscalerUpdate(&zeroCase, &nonZeroCase, hpaSpecValidationOpts)
if len(errs) == 0 { if len(errs) == 0 {
t.Errorf("expected failure for %q", errorMsg) t.Errorf("expected failure for %q", errorMsg)
@ -1643,12 +1639,188 @@ func TestValidateHorizontalPodAutoscalerScaleToZeroUpdateDisabled(t *testing.T)
t.Errorf("unexpected error: %q, expected: %q", errs[0], errorMsg) t.Errorf("unexpected error: %q, expected: %q", errs[0], errorMsg)
} }
if errs := ValidateHorizontalPodAutoscalerUpdate(&zeroCase, &zeroCase); len(errs) != 0 { if errs := ValidateHorizontalPodAutoscalerUpdate(&nonZeroCase, &zeroCase, hpaSpecValidationOpts); len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
if errs := ValidateHorizontalPodAutoscalerUpdate(&nonZeroCase, &zeroCase); len(errs) != 0 {
t.Errorf("expected success: %v", errs) t.Errorf("expected success: %v", errs)
} }
} }
} }
func TestValidateHorizontalPodAutoscalerConfigurableToleranceEnabled(t *testing.T) {
policiesList := []autoscaling.HPAScalingPolicy{{
Type: autoscaling.PodsScalingPolicy,
Value: 1,
PeriodSeconds: 1800,
}}
successCases := []autoscaling.HPAScalingRules{
{
Policies: policiesList,
Tolerance: ptr.To(resource.MustParse("0.1")),
},
{
Policies: policiesList,
Tolerance: ptr.To(resource.MustParse("10")),
},
{
Policies: policiesList,
Tolerance: ptr.To(resource.MustParse("0")),
},
{
Policies: policiesList,
Tolerance: resource.NewMilliQuantity(100, resource.DecimalSI),
},
{
Policies: policiesList,
Tolerance: resource.NewScaledQuantity(1, resource.Milli),
},
{
Policies: policiesList,
},
}
for _, c := range successCases {
b := autoscaling.HorizontalPodAutoscalerBehavior{
ScaleDown: &c,
}
hpa := prepareHPAWithBehavior(b)
if errs := ValidateHorizontalPodAutoscaler(&hpa, hpaScaleToZeroSpecValidationOpts); len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
}
failureCases := []struct {
rule autoscaling.HPAScalingRules
msg string
}{
{
rule: autoscaling.HPAScalingRules{},
msg: "at least one Policy",
},
{
rule: autoscaling.HPAScalingRules{
Policies: policiesList,
Tolerance: ptr.To(resource.MustParse("-0.001")),
},
msg: "greater than or equal to 0",
},
{
rule: autoscaling.HPAScalingRules{
Policies: policiesList,
Tolerance: resource.NewMilliQuantity(-10, resource.DecimalSI),
},
msg: "greater than or equal to 0",
},
{
rule: autoscaling.HPAScalingRules{
StabilizationWindowSeconds: utilpointer.Int32(60),
},
msg: "at least one Policy",
},
{
rule: autoscaling.HPAScalingRules{
Tolerance: resource.NewMilliQuantity(1, resource.DecimalSI),
StabilizationWindowSeconds: utilpointer.Int32(60),
},
msg: "at least one Policy",
},
}
for _, c := range failureCases {
b := autoscaling.HorizontalPodAutoscalerBehavior{
ScaleUp: &c.rule,
}
hpa := prepareHPAWithBehavior(b)
errs := ValidateHorizontalPodAutoscaler(&hpa, hpaScaleToZeroSpecValidationOpts)
if len(errs) != 1 {
t.Fatalf("expected exactly one error, got: %v", errs)
}
if !strings.Contains(errs[0].Error(), c.msg) {
t.Errorf("unexpected error: %q, expected: %q", errs[0], c.msg)
}
}
}
func TestValidateHorizontalPodAutoscalerConfigurableToleranceDisabled(t *testing.T) {
maxPolicy := autoscaling.MaxPolicySelect
policiesList := []autoscaling.HPAScalingPolicy{{
Type: autoscaling.PodsScalingPolicy,
Value: 1,
PeriodSeconds: 1800,
}}
successCases := []autoscaling.HPAScalingRules{
{
Policies: policiesList,
},
{
SelectPolicy: &maxPolicy,
Policies: policiesList,
},
{
StabilizationWindowSeconds: utilpointer.Int32(60),
Policies: policiesList,
},
}
for _, c := range successCases {
b := autoscaling.HorizontalPodAutoscalerBehavior{
ScaleDown: &c,
}
hpa := prepareHPAWithBehavior(b)
if errs := ValidateHorizontalPodAutoscaler(&hpa, hpaSpecValidationOpts); len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
}
failureCases := []struct {
rule autoscaling.HPAScalingRules
msg string
}{
{
rule: autoscaling.HPAScalingRules{},
msg: "at least one Policy",
},
{
rule: autoscaling.HPAScalingRules{
StabilizationWindowSeconds: utilpointer.Int32(60),
},
msg: "at least one Policy",
},
}
for _, c := range failureCases {
b := autoscaling.HorizontalPodAutoscalerBehavior{
ScaleUp: &c.rule,
}
hpa := prepareHPAWithBehavior(b)
errs := ValidateHorizontalPodAutoscaler(&hpa, hpaSpecValidationOpts)
if len(errs) != 1 {
t.Fatalf("expected exactly one error, got: %v", errs)
}
if !strings.Contains(errs[0].Error(), c.msg) {
t.Errorf("unexpected error: %q, expected: %q", errs[0], c.msg)
}
}
}
func TestValidateHorizontalPodAutoscalerUpdateConfigurableToleranceEnabled(t *testing.T) {
policiesList := []autoscaling.HPAScalingPolicy{{
Type: autoscaling.PodsScalingPolicy,
Value: 1,
PeriodSeconds: 1800,
}}
withToleranceHPA := prepareHPAWithBehavior(autoscaling.HorizontalPodAutoscalerBehavior{
ScaleUp: &autoscaling.HPAScalingRules{
Policies: policiesList,
Tolerance: resource.NewMilliQuantity(10, resource.DecimalSI),
}})
withoutToleranceHPA := prepareHPAWithBehavior(autoscaling.HorizontalPodAutoscalerBehavior{
ScaleUp: &autoscaling.HPAScalingRules{
Policies: policiesList,
}})
if errs := ValidateHorizontalPodAutoscalerUpdate(&withToleranceHPA, &withoutToleranceHPA, hpaScaleToZeroSpecValidationOpts); len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
if errs := ValidateHorizontalPodAutoscalerUpdate(&withoutToleranceHPA, &withToleranceHPA, hpaScaleToZeroSpecValidationOpts); len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
}

View File

@ -146,6 +146,11 @@ func (in *HPAScalingRules) DeepCopyInto(out *HPAScalingRules) {
*out = make([]HPAScalingPolicy, len(*in)) *out = make([]HPAScalingPolicy, len(*in))
copy(*out, *in) copy(*out, *in)
} }
if in.Tolerance != nil {
in, out := &in.Tolerance, &out.Tolerance
x := (*in).DeepCopy()
*out = &x
}
return return
} }

View File

@ -37,6 +37,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
autoscalinginformers "k8s.io/client-go/informers/autoscaling/v2" autoscalinginformers "k8s.io/client-go/informers/autoscaling/v2"
coreinformers "k8s.io/client-go/informers/core/v1" coreinformers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/kubernetes/scheme"
@ -53,6 +54,7 @@ import (
metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
"k8s.io/kubernetes/pkg/controller/podautoscaler/monitor" "k8s.io/kubernetes/pkg/controller/podautoscaler/monitor"
"k8s.io/kubernetes/pkg/controller/util/selectors" "k8s.io/kubernetes/pkg/controller/util/selectors"
"k8s.io/kubernetes/pkg/features"
) )
var ( var (
@ -86,6 +88,7 @@ type HorizontalController struct {
hpaNamespacer autoscalingclient.HorizontalPodAutoscalersGetter hpaNamespacer autoscalingclient.HorizontalPodAutoscalersGetter
mapper apimeta.RESTMapper mapper apimeta.RESTMapper
tolerance float64
replicaCalc *ReplicaCalculator replicaCalc *ReplicaCalculator
eventRecorder record.EventRecorder eventRecorder record.EventRecorder
@ -146,6 +149,7 @@ func NewHorizontalController(
eventRecorder: recorder, eventRecorder: recorder,
scaleNamespacer: scaleNamespacer, scaleNamespacer: scaleNamespacer,
hpaNamespacer: hpaNamespacer, hpaNamespacer: hpaNamespacer,
tolerance: tolerance,
downscaleStabilisationWindow: downscaleStabilisationWindow, downscaleStabilisationWindow: downscaleStabilisationWindow,
monitor: monitor.New(), monitor: monitor.New(),
queue: workqueue.NewTypedRateLimitingQueueWithConfig( queue: workqueue.NewTypedRateLimitingQueueWithConfig(
@ -181,7 +185,6 @@ func NewHorizontalController(
replicaCalc := NewReplicaCalculator( replicaCalc := NewReplicaCalculator(
metricsClient, metricsClient,
hpaController.podLister, hpaController.podLister,
tolerance,
cpuInitializationPeriod, cpuInitializationPeriod,
delayOfInitialReadinessStatus, delayOfInitialReadinessStatus,
) )
@ -539,8 +542,9 @@ func (a *HorizontalController) computeStatusForObjectMetric(specReplicas, status
}, },
}, },
} }
tolerances := a.tolerancesForHpa(hpa)
if metricSpec.Object.Target.Type == autoscalingv2.ValueMetricType && metricSpec.Object.Target.Value != nil { if metricSpec.Object.Target.Type == autoscalingv2.ValueMetricType && metricSpec.Object.Target.Value != nil {
replicaCountProposal, usageProposal, timestampProposal, err := a.replicaCalc.GetObjectMetricReplicas(specReplicas, metricSpec.Object.Target.Value.MilliValue(), metricSpec.Object.Metric.Name, hpa.Namespace, &metricSpec.Object.DescribedObject, selector, metricSelector) replicaCountProposal, usageProposal, timestampProposal, err := a.replicaCalc.GetObjectMetricReplicas(specReplicas, metricSpec.Object.Target.Value.MilliValue(), metricSpec.Object.Metric.Name, tolerances, hpa.Namespace, &metricSpec.Object.DescribedObject, selector, metricSelector)
if err != nil { if err != nil {
condition := a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err) condition := a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err)
return 0, timestampProposal, "", condition, err return 0, timestampProposal, "", condition, err
@ -549,7 +553,7 @@ func (a *HorizontalController) computeStatusForObjectMetric(specReplicas, status
*status = metricStatus *status = metricStatus
return replicaCountProposal, timestampProposal, fmt.Sprintf("%s metric %s", metricSpec.Object.DescribedObject.Kind, metricSpec.Object.Metric.Name), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil return replicaCountProposal, timestampProposal, fmt.Sprintf("%s metric %s", metricSpec.Object.DescribedObject.Kind, metricSpec.Object.Metric.Name), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
} else if metricSpec.Object.Target.Type == autoscalingv2.AverageValueMetricType && metricSpec.Object.Target.AverageValue != nil { } else if metricSpec.Object.Target.Type == autoscalingv2.AverageValueMetricType && metricSpec.Object.Target.AverageValue != nil {
replicaCountProposal, usageProposal, timestampProposal, err := a.replicaCalc.GetObjectPerPodMetricReplicas(statusReplicas, metricSpec.Object.Target.AverageValue.MilliValue(), metricSpec.Object.Metric.Name, hpa.Namespace, &metricSpec.Object.DescribedObject, metricSelector) replicaCountProposal, usageProposal, timestampProposal, err := a.replicaCalc.GetObjectPerPodMetricReplicas(statusReplicas, metricSpec.Object.Target.AverageValue.MilliValue(), metricSpec.Object.Metric.Name, tolerances, hpa.Namespace, &metricSpec.Object.DescribedObject, metricSelector)
if err != nil { if err != nil {
condition := a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err) condition := a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err)
return 0, time.Time{}, "", condition, fmt.Errorf("failed to get %s object metric: %v", metricSpec.Object.Metric.Name, err) return 0, time.Time{}, "", condition, fmt.Errorf("failed to get %s object metric: %v", metricSpec.Object.Metric.Name, err)
@ -566,7 +570,8 @@ func (a *HorizontalController) computeStatusForObjectMetric(specReplicas, status
// computeStatusForPodsMetric computes the desired number of replicas for the specified metric of type PodsMetricSourceType. // computeStatusForPodsMetric computes the desired number of replicas for the specified metric of type PodsMetricSourceType.
func (a *HorizontalController) computeStatusForPodsMetric(currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus, metricSelector labels.Selector) (replicaCountProposal int32, timestampProposal time.Time, metricNameProposal string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) { func (a *HorizontalController) computeStatusForPodsMetric(currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus, metricSelector labels.Selector) (replicaCountProposal int32, timestampProposal time.Time, metricNameProposal string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) {
replicaCountProposal, usageProposal, timestampProposal, err := a.replicaCalc.GetMetricReplicas(currentReplicas, metricSpec.Pods.Target.AverageValue.MilliValue(), metricSpec.Pods.Metric.Name, hpa.Namespace, selector, metricSelector) tolerances := a.tolerancesForHpa(hpa)
replicaCountProposal, usageProposal, timestampProposal, err := a.replicaCalc.GetMetricReplicas(currentReplicas, metricSpec.Pods.Target.AverageValue.MilliValue(), metricSpec.Pods.Metric.Name, tolerances, hpa.Namespace, selector, metricSelector)
if err != nil { if err != nil {
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetPodsMetric", err) condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetPodsMetric", err)
return 0, timestampProposal, "", condition, err return 0, timestampProposal, "", condition, err
@ -588,12 +593,14 @@ func (a *HorizontalController) computeStatusForPodsMetric(currentReplicas int32,
} }
func (a *HorizontalController) computeStatusForResourceMetricGeneric(ctx context.Context, currentReplicas int32, target autoscalingv2.MetricTarget, func (a *HorizontalController) computeStatusForResourceMetricGeneric(ctx context.Context, currentReplicas int32, target autoscalingv2.MetricTarget,
resourceName v1.ResourceName, namespace string, container string, selector labels.Selector, sourceType autoscalingv2.MetricSourceType) (replicaCountProposal int32, resourceName v1.ResourceName, hpa *autoscalingv2.HorizontalPodAutoscaler, container string, selector labels.Selector, sourceType autoscalingv2.MetricSourceType) (replicaCountProposal int32,
metricStatus *autoscalingv2.MetricValueStatus, timestampProposal time.Time, metricNameProposal string, metricStatus *autoscalingv2.MetricValueStatus, timestampProposal time.Time, metricNameProposal string,
condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) { condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) {
namespace := hpa.Namespace
tolerances := a.tolerancesForHpa(hpa)
if target.AverageValue != nil { if target.AverageValue != nil {
var rawProposal int64 var rawProposal int64
replicaCountProposal, rawProposal, timestampProposal, err := a.replicaCalc.GetRawResourceReplicas(ctx, currentReplicas, target.AverageValue.MilliValue(), resourceName, namespace, selector, container) replicaCountProposal, rawProposal, timestampProposal, err := a.replicaCalc.GetRawResourceReplicas(ctx, currentReplicas, target.AverageValue.MilliValue(), resourceName, tolerances, namespace, selector, container)
if err != nil { if err != nil {
return 0, nil, time.Time{}, "", condition, fmt.Errorf("failed to get %s usage: %v", resourceName, err) return 0, nil, time.Time{}, "", condition, fmt.Errorf("failed to get %s usage: %v", resourceName, err)
} }
@ -610,7 +617,7 @@ func (a *HorizontalController) computeStatusForResourceMetricGeneric(ctx context
} }
targetUtilization := *target.AverageUtilization targetUtilization := *target.AverageUtilization
replicaCountProposal, percentageProposal, rawProposal, timestampProposal, err := a.replicaCalc.GetResourceReplicas(ctx, currentReplicas, targetUtilization, resourceName, namespace, selector, container) replicaCountProposal, percentageProposal, rawProposal, timestampProposal, err := a.replicaCalc.GetResourceReplicas(ctx, currentReplicas, targetUtilization, resourceName, tolerances, namespace, selector, container)
if err != nil { if err != nil {
return 0, nil, time.Time{}, "", condition, fmt.Errorf("failed to get %s utilization: %v", resourceName, err) return 0, nil, time.Time{}, "", condition, fmt.Errorf("failed to get %s utilization: %v", resourceName, err)
} }
@ -630,7 +637,7 @@ func (a *HorizontalController) computeStatusForResourceMetricGeneric(ctx context
func (a *HorizontalController) computeStatusForResourceMetric(ctx context.Context, currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, func (a *HorizontalController) computeStatusForResourceMetric(ctx context.Context, currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler,
selector labels.Selector, status *autoscalingv2.MetricStatus) (replicaCountProposal int32, timestampProposal time.Time, selector labels.Selector, status *autoscalingv2.MetricStatus) (replicaCountProposal int32, timestampProposal time.Time,
metricNameProposal string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) { metricNameProposal string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) {
replicaCountProposal, metricValueStatus, timestampProposal, metricNameProposal, condition, err := a.computeStatusForResourceMetricGeneric(ctx, currentReplicas, metricSpec.Resource.Target, metricSpec.Resource.Name, hpa.Namespace, "", selector, autoscalingv2.ResourceMetricSourceType) replicaCountProposal, metricValueStatus, timestampProposal, metricNameProposal, condition, err := a.computeStatusForResourceMetricGeneric(ctx, currentReplicas, metricSpec.Resource.Target, metricSpec.Resource.Name, hpa, "", selector, autoscalingv2.ResourceMetricSourceType)
if err != nil { if err != nil {
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetResourceMetric", err) condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetResourceMetric", err)
return replicaCountProposal, timestampProposal, metricNameProposal, condition, err return replicaCountProposal, timestampProposal, metricNameProposal, condition, err
@ -649,7 +656,7 @@ func (a *HorizontalController) computeStatusForResourceMetric(ctx context.Contex
func (a *HorizontalController) computeStatusForContainerResourceMetric(ctx context.Context, currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, func (a *HorizontalController) computeStatusForContainerResourceMetric(ctx context.Context, currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler,
selector labels.Selector, status *autoscalingv2.MetricStatus) (replicaCountProposal int32, timestampProposal time.Time, selector labels.Selector, status *autoscalingv2.MetricStatus) (replicaCountProposal int32, timestampProposal time.Time,
metricNameProposal string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) { metricNameProposal string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) {
replicaCountProposal, metricValueStatus, timestampProposal, metricNameProposal, condition, err := a.computeStatusForResourceMetricGeneric(ctx, currentReplicas, metricSpec.ContainerResource.Target, metricSpec.ContainerResource.Name, hpa.Namespace, metricSpec.ContainerResource.Container, selector, autoscalingv2.ContainerResourceMetricSourceType) replicaCountProposal, metricValueStatus, timestampProposal, metricNameProposal, condition, err := a.computeStatusForResourceMetricGeneric(ctx, currentReplicas, metricSpec.ContainerResource.Target, metricSpec.ContainerResource.Name, hpa, metricSpec.ContainerResource.Container, selector, autoscalingv2.ContainerResourceMetricSourceType)
if err != nil { if err != nil {
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetContainerResourceMetric", err) condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetContainerResourceMetric", err)
return replicaCountProposal, timestampProposal, metricNameProposal, condition, err return replicaCountProposal, timestampProposal, metricNameProposal, condition, err
@ -667,8 +674,9 @@ func (a *HorizontalController) computeStatusForContainerResourceMetric(ctx conte
// computeStatusForExternalMetric computes the desired number of replicas for the specified metric of type ExternalMetricSourceType. // computeStatusForExternalMetric computes the desired number of replicas for the specified metric of type ExternalMetricSourceType.
func (a *HorizontalController) computeStatusForExternalMetric(specReplicas, statusReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus) (replicaCountProposal int32, timestampProposal time.Time, metricNameProposal string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) { func (a *HorizontalController) computeStatusForExternalMetric(specReplicas, statusReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus) (replicaCountProposal int32, timestampProposal time.Time, metricNameProposal string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) {
tolerances := a.tolerancesForHpa(hpa)
if metricSpec.External.Target.AverageValue != nil { if metricSpec.External.Target.AverageValue != nil {
replicaCountProposal, usageProposal, timestampProposal, err := a.replicaCalc.GetExternalPerPodMetricReplicas(statusReplicas, metricSpec.External.Target.AverageValue.MilliValue(), metricSpec.External.Metric.Name, hpa.Namespace, metricSpec.External.Metric.Selector) replicaCountProposal, usageProposal, timestampProposal, err := a.replicaCalc.GetExternalPerPodMetricReplicas(statusReplicas, metricSpec.External.Target.AverageValue.MilliValue(), metricSpec.External.Metric.Name, tolerances, hpa.Namespace, metricSpec.External.Metric.Selector)
if err != nil { if err != nil {
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetExternalMetric", err) condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetExternalMetric", err)
return 0, time.Time{}, "", condition, fmt.Errorf("failed to get %s external metric: %v", metricSpec.External.Metric.Name, err) return 0, time.Time{}, "", condition, fmt.Errorf("failed to get %s external metric: %v", metricSpec.External.Metric.Name, err)
@ -688,7 +696,7 @@ func (a *HorizontalController) computeStatusForExternalMetric(specReplicas, stat
return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.External.Metric.Name, metricSpec.External.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.External.Metric.Name, metricSpec.External.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
} }
if metricSpec.External.Target.Value != nil { if metricSpec.External.Target.Value != nil {
replicaCountProposal, usageProposal, timestampProposal, err := a.replicaCalc.GetExternalMetricReplicas(specReplicas, metricSpec.External.Target.Value.MilliValue(), metricSpec.External.Metric.Name, hpa.Namespace, metricSpec.External.Metric.Selector, selector) replicaCountProposal, usageProposal, timestampProposal, err := a.replicaCalc.GetExternalMetricReplicas(specReplicas, metricSpec.External.Target.Value.MilliValue(), metricSpec.External.Metric.Name, tolerances, hpa.Namespace, metricSpec.External.Metric.Selector, selector)
if err != nil { if err != nil {
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetExternalMetric", err) condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetExternalMetric", err)
return 0, time.Time{}, "", condition, fmt.Errorf("failed to get external metric %s: %v", metricSpec.External.Metric.Name, err) return 0, time.Time{}, "", condition, fmt.Errorf("failed to get external metric %s: %v", metricSpec.External.Metric.Name, err)
@ -835,6 +843,7 @@ func (a *HorizontalController) reconcileAutoscaler(ctx context.Context, hpaShare
logger.V(4).Info("Proposing desired replicas", logger.V(4).Info("Proposing desired replicas",
"desiredReplicas", metricDesiredReplicas, "desiredReplicas", metricDesiredReplicas,
"metric", metricName, "metric", metricName,
"tolerances", a.tolerancesForHpa(hpa),
"timestamp", metricTimestamp, "timestamp", metricTimestamp,
"scaleTarget", reference) "scaleTarget", reference)
@ -1384,6 +1393,25 @@ func (a *HorizontalController) updateStatus(ctx context.Context, hpa *autoscalin
return nil return nil
} }
// tolerancesForHpa returns the metrics usage ratio tolerances for a given HPA.
// It ignores configurable tolerances set in the HPA spec.behavior field if the
// HPAConfigurableTolerance feature gate is disabled.
func (a *HorizontalController) tolerancesForHpa(hpa *autoscalingv2.HorizontalPodAutoscaler) Tolerances {
t := Tolerances{a.tolerance, a.tolerance}
behavior := hpa.Spec.Behavior
allowConfigurableTolerances := utilfeature.DefaultFeatureGate.Enabled(features.HPAConfigurableTolerance)
if behavior == nil || !allowConfigurableTolerances {
return t
}
if behavior.ScaleDown != nil && behavior.ScaleDown.Tolerance != nil {
t.scaleDown = behavior.ScaleDown.Tolerance.AsApproximateFloat64()
}
if behavior.ScaleUp != nil && behavior.ScaleUp.Tolerance != nil {
t.scaleUp = behavior.ScaleUp.Tolerance.AsApproximateFloat64()
}
return t
}
// setCondition sets the specific condition type on the given HPA to the specified value with the given reason // setCondition sets the specific condition type on the given HPA to the specified value with the given reason
// and message. The message and args are treated like a format string. The condition will be added if it is // and message. The message and args are treated like a format string. The condition will be added if it is
// not present. // not present.

View File

@ -37,16 +37,19 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
scalefake "k8s.io/client-go/scale/fake" scalefake "k8s.io/client-go/scale/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/legacyscheme"
autoscalingapiv2 "k8s.io/kubernetes/pkg/apis/autoscaling/v2" autoscalingapiv2 "k8s.io/kubernetes/pkg/apis/autoscaling/v2"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
"k8s.io/kubernetes/pkg/controller/podautoscaler/monitor" "k8s.io/kubernetes/pkg/controller/podautoscaler/monitor"
"k8s.io/kubernetes/pkg/controller/util/selectors" "k8s.io/kubernetes/pkg/controller/util/selectors"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/test/utils/ktesting" "k8s.io/kubernetes/test/utils/ktesting"
cmapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta2" cmapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta2"
emapi "k8s.io/metrics/pkg/apis/external_metrics/v1beta1" emapi "k8s.io/metrics/pkg/apis/external_metrics/v1beta1"
@ -2216,6 +2219,107 @@ func TestTolerance(t *testing.T) {
tc.runTest(t) tc.runTest(t)
} }
func TestConfigurableTolerance(t *testing.T) {
onePercentQuantity := resource.MustParse("0.01")
ninetyPercentQuantity := resource.MustParse("0.9")
testCases := []struct {
name string
configurableToleranceGate bool
replicas int32
scaleUpRules *autoscalingv2.HPAScalingRules
scaleDownRules *autoscalingv2.HPAScalingRules
reportedLevels []uint64
reportedCPURequests []resource.Quantity
expectedDesiredReplicas int32
expectedConditionReason string
expectedActionLabel monitor.ActionLabel
}{
{
name: "Scaling up because of a 1% configurable tolerance",
configurableToleranceGate: true,
replicas: 3,
scaleUpRules: &autoscalingv2.HPAScalingRules{
Tolerance: &onePercentQuantity,
},
reportedLevels: []uint64{1010, 1030, 1020},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
expectedDesiredReplicas: 4,
expectedConditionReason: "SucceededRescale",
expectedActionLabel: monitor.ActionLabelScaleUp,
},
{
name: "No scale-down because of a 90% configurable tolerance",
configurableToleranceGate: true,
replicas: 3,
scaleDownRules: &autoscalingv2.HPAScalingRules{
Tolerance: &ninetyPercentQuantity,
},
reportedLevels: []uint64{300, 300, 300},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
expectedDesiredReplicas: 3,
expectedConditionReason: "ReadyForNewScale",
expectedActionLabel: monitor.ActionLabelNone,
},
{
name: "No scaling because of the large default tolerance",
configurableToleranceGate: true,
replicas: 3,
reportedLevels: []uint64{1010, 1030, 1020},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
expectedDesiredReplicas: 3,
expectedConditionReason: "ReadyForNewScale",
expectedActionLabel: monitor.ActionLabelNone,
},
{
name: "No scaling because the configurable tolerance is ignored as the feature gate is disabled",
configurableToleranceGate: false,
replicas: 3,
scaleUpRules: &autoscalingv2.HPAScalingRules{
Tolerance: &onePercentQuantity,
},
reportedLevels: []uint64{1010, 1030, 1020},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
expectedDesiredReplicas: 3,
expectedConditionReason: "ReadyForNewScale",
expectedActionLabel: monitor.ActionLabelNone,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HPAConfigurableTolerance, tc.configurableToleranceGate)
tc := testCase{
minReplicas: 1,
maxReplicas: 5,
specReplicas: tc.replicas,
statusReplicas: tc.replicas,
scaleDownRules: tc.scaleDownRules,
scaleUpRules: tc.scaleUpRules,
expectedDesiredReplicas: tc.expectedDesiredReplicas,
CPUTarget: 100,
reportedLevels: tc.reportedLevels,
reportedCPURequests: tc.reportedCPURequests,
useMetricsAPI: true,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.AbleToScale,
Status: v1.ConditionTrue,
Reason: tc.expectedConditionReason,
}),
expectedReportedReconciliationActionLabel: tc.expectedActionLabel,
expectedReportedReconciliationErrorLabel: monitor.ErrorLabelNone,
expectedReportedMetricComputationActionLabels: map[autoscalingv2.MetricSourceType]monitor.ActionLabel{
autoscalingv2.ResourceMetricSourceType: tc.expectedActionLabel,
},
expectedReportedMetricComputationErrorLabels: map[autoscalingv2.MetricSourceType]monitor.ErrorLabel{
autoscalingv2.ResourceMetricSourceType: monitor.ErrorLabelNone,
},
}
tc.runTest(t)
})
}
}
func TestToleranceCM(t *testing.T) { func TestToleranceCM(t *testing.T) {
averageValue := resource.MustParse("20.0") averageValue := resource.MustParse("20.0")
tc := testCase{ tc := testCase{

View File

@ -40,21 +40,33 @@ const (
defaultTestingDelayOfInitialReadinessStatus = 10 * time.Second defaultTestingDelayOfInitialReadinessStatus = 10 * time.Second
) )
// Tolerances contains metric usage ratio scale-up and scale-down tolerances.
type Tolerances struct {
scaleDown float64
scaleUp float64
}
func (t Tolerances) String() string {
return fmt.Sprintf("[down:%.1f%%, up:%.1f%%]", t.scaleDown*100., t.scaleUp*100.)
}
func (t Tolerances) isWithin(usageRatio float64) bool {
return (1.0-t.scaleDown) <= usageRatio && usageRatio <= (1.0+t.scaleUp)
}
// ReplicaCalculator bundles all needed information to calculate the target amount of replicas // ReplicaCalculator bundles all needed information to calculate the target amount of replicas
type ReplicaCalculator struct { type ReplicaCalculator struct {
metricsClient metricsclient.MetricsClient metricsClient metricsclient.MetricsClient
podLister corelisters.PodLister podLister corelisters.PodLister
tolerance float64
cpuInitializationPeriod time.Duration cpuInitializationPeriod time.Duration
delayOfInitialReadinessStatus time.Duration delayOfInitialReadinessStatus time.Duration
} }
// NewReplicaCalculator creates a new ReplicaCalculator and passes all necessary information to the new instance // NewReplicaCalculator creates a new ReplicaCalculator and passes all necessary information to the new instance
func NewReplicaCalculator(metricsClient metricsclient.MetricsClient, podLister corelisters.PodLister, tolerance float64, cpuInitializationPeriod, delayOfInitialReadinessStatus time.Duration) *ReplicaCalculator { func NewReplicaCalculator(metricsClient metricsclient.MetricsClient, podLister corelisters.PodLister, cpuInitializationPeriod, delayOfInitialReadinessStatus time.Duration) *ReplicaCalculator {
return &ReplicaCalculator{ return &ReplicaCalculator{
metricsClient: metricsClient, metricsClient: metricsClient,
podLister: podLister, podLister: podLister,
tolerance: tolerance,
cpuInitializationPeriod: cpuInitializationPeriod, cpuInitializationPeriod: cpuInitializationPeriod,
delayOfInitialReadinessStatus: delayOfInitialReadinessStatus, delayOfInitialReadinessStatus: delayOfInitialReadinessStatus,
} }
@ -62,7 +74,7 @@ func NewReplicaCalculator(metricsClient metricsclient.MetricsClient, podLister c
// GetResourceReplicas calculates the desired replica count based on a target resource utilization percentage // GetResourceReplicas calculates the desired replica count based on a target resource utilization percentage
// of the given resource for pods matching the given selector in the given namespace, and the current replica count // of the given resource for pods matching the given selector in the given namespace, and the current replica count
func (c *ReplicaCalculator) GetResourceReplicas(ctx context.Context, currentReplicas int32, targetUtilization int32, resource v1.ResourceName, namespace string, selector labels.Selector, container string) (replicaCount int32, utilization int32, rawUtilization int64, timestamp time.Time, err error) { func (c *ReplicaCalculator) GetResourceReplicas(ctx context.Context, currentReplicas int32, targetUtilization int32, resource v1.ResourceName, tolerances Tolerances, namespace string, selector labels.Selector, container string) (replicaCount int32, utilization int32, rawUtilization int64, timestamp time.Time, err error) {
metrics, timestamp, err := c.metricsClient.GetResourceMetric(ctx, resource, namespace, selector, container) metrics, timestamp, err := c.metricsClient.GetResourceMetric(ctx, resource, namespace, selector, container)
if err != nil { if err != nil {
return 0, 0, 0, time.Time{}, fmt.Errorf("unable to get metrics for resource %s: %v", resource, err) return 0, 0, 0, time.Time{}, fmt.Errorf("unable to get metrics for resource %s: %v", resource, err)
@ -94,7 +106,7 @@ func (c *ReplicaCalculator) GetResourceReplicas(ctx context.Context, currentRepl
scaleUpWithUnready := len(unreadyPods) > 0 && usageRatio > 1.0 scaleUpWithUnready := len(unreadyPods) > 0 && usageRatio > 1.0
if !scaleUpWithUnready && len(missingPods) == 0 { if !scaleUpWithUnready && len(missingPods) == 0 {
if math.Abs(1.0-usageRatio) <= c.tolerance { if tolerances.isWithin(usageRatio) {
// return the current replicas if the change would be too small // return the current replicas if the change would be too small
return currentReplicas, utilization, rawUtilization, timestamp, nil return currentReplicas, utilization, rawUtilization, timestamp, nil
} }
@ -132,7 +144,7 @@ func (c *ReplicaCalculator) GetResourceReplicas(ctx context.Context, currentRepl
return 0, utilization, rawUtilization, time.Time{}, err return 0, utilization, rawUtilization, time.Time{}, err
} }
if math.Abs(1.0-newUsageRatio) <= c.tolerance || (usageRatio < 1.0 && newUsageRatio > 1.0) || (usageRatio > 1.0 && newUsageRatio < 1.0) { if tolerances.isWithin(newUsageRatio) || (usageRatio < 1.0 && newUsageRatio > 1.0) || (usageRatio > 1.0 && newUsageRatio < 1.0) {
// return the current replicas if the change would be too small, // return the current replicas if the change would be too small,
// or if the new usage ratio would cause a change in scale direction // or if the new usage ratio would cause a change in scale direction
return currentReplicas, utilization, rawUtilization, timestamp, nil return currentReplicas, utilization, rawUtilization, timestamp, nil
@ -151,31 +163,31 @@ func (c *ReplicaCalculator) GetResourceReplicas(ctx context.Context, currentRepl
// GetRawResourceReplicas calculates the desired replica count based on a target resource usage (as a raw milli-value) // GetRawResourceReplicas calculates the desired replica count based on a target resource usage (as a raw milli-value)
// for pods matching the given selector in the given namespace, and the current replica count // for pods matching the given selector in the given namespace, and the current replica count
func (c *ReplicaCalculator) GetRawResourceReplicas(ctx context.Context, currentReplicas int32, targetUsage int64, resource v1.ResourceName, namespace string, selector labels.Selector, container string) (replicaCount int32, usage int64, timestamp time.Time, err error) { func (c *ReplicaCalculator) GetRawResourceReplicas(ctx context.Context, currentReplicas int32, targetUsage int64, resource v1.ResourceName, tolerances Tolerances, namespace string, selector labels.Selector, container string) (replicaCount int32, usage int64, timestamp time.Time, err error) {
metrics, timestamp, err := c.metricsClient.GetResourceMetric(ctx, resource, namespace, selector, container) metrics, timestamp, err := c.metricsClient.GetResourceMetric(ctx, resource, namespace, selector, container)
if err != nil { if err != nil {
return 0, 0, time.Time{}, fmt.Errorf("unable to get metrics for resource %s: %v", resource, err) return 0, 0, time.Time{}, fmt.Errorf("unable to get metrics for resource %s: %v", resource, err)
} }
replicaCount, usage, err = c.calcPlainMetricReplicas(metrics, currentReplicas, targetUsage, namespace, selector, resource) replicaCount, usage, err = c.calcPlainMetricReplicas(metrics, currentReplicas, targetUsage, tolerances, namespace, selector, resource)
return replicaCount, usage, timestamp, err return replicaCount, usage, timestamp, err
} }
// GetMetricReplicas calculates the desired replica count based on a target metric usage // GetMetricReplicas calculates the desired replica count based on a target metric usage
// (as a milli-value) for pods matching the given selector in the given namespace, and the // (as a milli-value) for pods matching the given selector in the given namespace, and the
// current replica count // current replica count
func (c *ReplicaCalculator) GetMetricReplicas(currentReplicas int32, targetUsage int64, metricName string, namespace string, selector labels.Selector, metricSelector labels.Selector) (replicaCount int32, usage int64, timestamp time.Time, err error) { func (c *ReplicaCalculator) GetMetricReplicas(currentReplicas int32, targetUsage int64, metricName string, tolerances Tolerances, namespace string, selector labels.Selector, metricSelector labels.Selector) (replicaCount int32, usage int64, timestamp time.Time, err error) {
metrics, timestamp, err := c.metricsClient.GetRawMetric(metricName, namespace, selector, metricSelector) metrics, timestamp, err := c.metricsClient.GetRawMetric(metricName, namespace, selector, metricSelector)
if err != nil { if err != nil {
return 0, 0, time.Time{}, fmt.Errorf("unable to get metric %s: %v", metricName, err) return 0, 0, time.Time{}, fmt.Errorf("unable to get metric %s: %v", metricName, err)
} }
replicaCount, usage, err = c.calcPlainMetricReplicas(metrics, currentReplicas, targetUsage, namespace, selector, v1.ResourceName("")) replicaCount, usage, err = c.calcPlainMetricReplicas(metrics, currentReplicas, targetUsage, tolerances, namespace, selector, v1.ResourceName(""))
return replicaCount, usage, timestamp, err return replicaCount, usage, timestamp, err
} }
// calcPlainMetricReplicas calculates the desired replicas for plain (i.e. non-utilization percentage) metrics. // calcPlainMetricReplicas calculates the desired replicas for plain (i.e. non-utilization percentage) metrics.
func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMetricsInfo, currentReplicas int32, targetUsage int64, namespace string, selector labels.Selector, resource v1.ResourceName) (replicaCount int32, usage int64, err error) { func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMetricsInfo, currentReplicas int32, targetUsage int64, tolerances Tolerances, namespace string, selector labels.Selector, resource v1.ResourceName) (replicaCount int32, usage int64, err error) {
podList, err := c.podLister.Pods(namespace).List(selector) podList, err := c.podLister.Pods(namespace).List(selector)
if err != nil { if err != nil {
@ -199,7 +211,7 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet
scaleUpWithUnready := len(unreadyPods) > 0 && usageRatio > 1.0 scaleUpWithUnready := len(unreadyPods) > 0 && usageRatio > 1.0
if !scaleUpWithUnready && len(missingPods) == 0 { if !scaleUpWithUnready && len(missingPods) == 0 {
if math.Abs(1.0-usageRatio) <= c.tolerance { if tolerances.isWithin(usageRatio) {
// return the current replicas if the change would be too small // return the current replicas if the change would be too small
return currentReplicas, usage, nil return currentReplicas, usage, nil
} }
@ -232,7 +244,7 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet
// re-run the usage calculation with our new numbers // re-run the usage calculation with our new numbers
newUsageRatio, _ := metricsclient.GetMetricUsageRatio(metrics, targetUsage) newUsageRatio, _ := metricsclient.GetMetricUsageRatio(metrics, targetUsage)
if math.Abs(1.0-newUsageRatio) <= c.tolerance || (usageRatio < 1.0 && newUsageRatio > 1.0) || (usageRatio > 1.0 && newUsageRatio < 1.0) { if tolerances.isWithin(newUsageRatio) || (usageRatio < 1.0 && newUsageRatio > 1.0) || (usageRatio > 1.0 && newUsageRatio < 1.0) {
// return the current replicas if the change would be too small, // return the current replicas if the change would be too small,
// or if the new usage ratio would cause a change in scale direction // or if the new usage ratio would cause a change in scale direction
return currentReplicas, usage, nil return currentReplicas, usage, nil
@ -251,22 +263,22 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet
// GetObjectMetricReplicas calculates the desired replica count based on a target metric usage (as a milli-value) // GetObjectMetricReplicas calculates the desired replica count based on a target metric usage (as a milli-value)
// for the given object in the given namespace, and the current replica count. // for the given object in the given namespace, and the current replica count.
func (c *ReplicaCalculator) GetObjectMetricReplicas(currentReplicas int32, targetUsage int64, metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference, selector labels.Selector, metricSelector labels.Selector) (replicaCount int32, usage int64, timestamp time.Time, err error) { func (c *ReplicaCalculator) GetObjectMetricReplicas(currentReplicas int32, targetUsage int64, metricName string, tolerances Tolerances, namespace string, objectRef *autoscaling.CrossVersionObjectReference, selector labels.Selector, metricSelector labels.Selector) (replicaCount int32, usage int64, timestamp time.Time, err error) {
usage, _, err = c.metricsClient.GetObjectMetric(metricName, namespace, objectRef, metricSelector) usage, _, err = c.metricsClient.GetObjectMetric(metricName, namespace, objectRef, metricSelector)
if err != nil { if err != nil {
return 0, 0, time.Time{}, fmt.Errorf("unable to get metric %s: %v on %s %s/%s", metricName, objectRef.Kind, namespace, objectRef.Name, err) return 0, 0, time.Time{}, fmt.Errorf("unable to get metric %s: %v on %s %s/%s", metricName, objectRef.Kind, namespace, objectRef.Name, err)
} }
usageRatio := float64(usage) / float64(targetUsage) usageRatio := float64(usage) / float64(targetUsage)
replicaCount, timestamp, err = c.getUsageRatioReplicaCount(currentReplicas, usageRatio, namespace, selector) replicaCount, timestamp, err = c.getUsageRatioReplicaCount(currentReplicas, usageRatio, tolerances, namespace, selector)
return replicaCount, usage, timestamp, err return replicaCount, usage, timestamp, err
} }
// getUsageRatioReplicaCount calculates the desired replica count based on usageRatio and ready pods count. // getUsageRatioReplicaCount calculates the desired replica count based on usageRatio and ready pods count.
// For currentReplicas=0 doesn't take into account ready pods count and tolerance to support scaling to zero pods. // For currentReplicas=0 doesn't take into account ready pods count and tolerance to support scaling to zero pods.
func (c *ReplicaCalculator) getUsageRatioReplicaCount(currentReplicas int32, usageRatio float64, namespace string, selector labels.Selector) (replicaCount int32, timestamp time.Time, err error) { func (c *ReplicaCalculator) getUsageRatioReplicaCount(currentReplicas int32, usageRatio float64, tolerances Tolerances, namespace string, selector labels.Selector) (replicaCount int32, timestamp time.Time, err error) {
if currentReplicas != 0 { if currentReplicas != 0 {
if math.Abs(1.0-usageRatio) <= c.tolerance { if tolerances.isWithin(usageRatio) {
// return the current replicas if the change would be too small // return the current replicas if the change would be too small
return currentReplicas, timestamp, nil return currentReplicas, timestamp, nil
} }
@ -286,7 +298,7 @@ func (c *ReplicaCalculator) getUsageRatioReplicaCount(currentReplicas int32, usa
// GetObjectPerPodMetricReplicas calculates the desired replica count based on a target metric usage (as a milli-value) // GetObjectPerPodMetricReplicas calculates the desired replica count based on a target metric usage (as a milli-value)
// for the given object in the given namespace, and the current replica count. // for the given object in the given namespace, and the current replica count.
func (c *ReplicaCalculator) GetObjectPerPodMetricReplicas(statusReplicas int32, targetAverageUsage int64, metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference, metricSelector labels.Selector) (replicaCount int32, usage int64, timestamp time.Time, err error) { func (c *ReplicaCalculator) GetObjectPerPodMetricReplicas(statusReplicas int32, targetAverageUsage int64, metricName string, tolerances Tolerances, namespace string, objectRef *autoscaling.CrossVersionObjectReference, metricSelector labels.Selector) (replicaCount int32, usage int64, timestamp time.Time, err error) {
usage, timestamp, err = c.metricsClient.GetObjectMetric(metricName, namespace, objectRef, metricSelector) usage, timestamp, err = c.metricsClient.GetObjectMetric(metricName, namespace, objectRef, metricSelector)
if err != nil { if err != nil {
return 0, 0, time.Time{}, fmt.Errorf("unable to get metric %s: %v on %s %s/%s", metricName, objectRef.Kind, namespace, objectRef.Name, err) return 0, 0, time.Time{}, fmt.Errorf("unable to get metric %s: %v on %s %s/%s", metricName, objectRef.Kind, namespace, objectRef.Name, err)
@ -294,7 +306,7 @@ func (c *ReplicaCalculator) GetObjectPerPodMetricReplicas(statusReplicas int32,
replicaCount = statusReplicas replicaCount = statusReplicas
usageRatio := float64(usage) / (float64(targetAverageUsage) * float64(replicaCount)) usageRatio := float64(usage) / (float64(targetAverageUsage) * float64(replicaCount))
if math.Abs(1.0-usageRatio) > c.tolerance { if !tolerances.isWithin(usageRatio) {
// update number of replicas if change is large enough // update number of replicas if change is large enough
replicaCount = int32(math.Ceil(float64(usage) / float64(targetAverageUsage))) replicaCount = int32(math.Ceil(float64(usage) / float64(targetAverageUsage)))
} }
@ -329,7 +341,7 @@ func (c *ReplicaCalculator) getReadyPodsCount(namespace string, selector labels.
// GetExternalMetricReplicas calculates the desired replica count based on a // GetExternalMetricReplicas calculates the desired replica count based on a
// target metric value (as a milli-value) for the external metric in the given // target metric value (as a milli-value) for the external metric in the given
// namespace, and the current replica count. // namespace, and the current replica count.
func (c *ReplicaCalculator) GetExternalMetricReplicas(currentReplicas int32, targetUsage int64, metricName, namespace string, metricSelector *metav1.LabelSelector, podSelector labels.Selector) (replicaCount int32, usage int64, timestamp time.Time, err error) { func (c *ReplicaCalculator) GetExternalMetricReplicas(currentReplicas int32, targetUsage int64, metricName string, tolerances Tolerances, namespace string, metricSelector *metav1.LabelSelector, podSelector labels.Selector) (replicaCount int32, usage int64, timestamp time.Time, err error) {
metricLabelSelector, err := metav1.LabelSelectorAsSelector(metricSelector) metricLabelSelector, err := metav1.LabelSelectorAsSelector(metricSelector)
if err != nil { if err != nil {
return 0, 0, time.Time{}, err return 0, 0, time.Time{}, err
@ -344,14 +356,14 @@ func (c *ReplicaCalculator) GetExternalMetricReplicas(currentReplicas int32, tar
} }
usageRatio := float64(usage) / float64(targetUsage) usageRatio := float64(usage) / float64(targetUsage)
replicaCount, timestamp, err = c.getUsageRatioReplicaCount(currentReplicas, usageRatio, namespace, podSelector) replicaCount, timestamp, err = c.getUsageRatioReplicaCount(currentReplicas, usageRatio, tolerances, namespace, podSelector)
return replicaCount, usage, timestamp, err return replicaCount, usage, timestamp, err
} }
// GetExternalPerPodMetricReplicas calculates the desired replica count based on a // GetExternalPerPodMetricReplicas calculates the desired replica count based on a
// target metric value per pod (as a milli-value) for the external metric in the // target metric value per pod (as a milli-value) for the external metric in the
// given namespace, and the current replica count. // given namespace, and the current replica count.
func (c *ReplicaCalculator) GetExternalPerPodMetricReplicas(statusReplicas int32, targetUsagePerPod int64, metricName, namespace string, metricSelector *metav1.LabelSelector) (replicaCount int32, usage int64, timestamp time.Time, err error) { func (c *ReplicaCalculator) GetExternalPerPodMetricReplicas(statusReplicas int32, targetUsagePerPod int64, metricName string, tolerances Tolerances, namespace string, metricSelector *metav1.LabelSelector) (replicaCount int32, usage int64, timestamp time.Time, err error) {
metricLabelSelector, err := metav1.LabelSelectorAsSelector(metricSelector) metricLabelSelector, err := metav1.LabelSelectorAsSelector(metricSelector)
if err != nil { if err != nil {
return 0, 0, time.Time{}, err return 0, 0, time.Time{}, err
@ -367,7 +379,7 @@ func (c *ReplicaCalculator) GetExternalPerPodMetricReplicas(statusReplicas int32
replicaCount = statusReplicas replicaCount = statusReplicas
usageRatio := float64(usage) / (float64(targetUsagePerPod) * float64(replicaCount)) usageRatio := float64(usage) / (float64(targetUsagePerPod) * float64(replicaCount))
if math.Abs(1.0-usageRatio) > c.tolerance { if !tolerances.isWithin(usageRatio) {
// update number of replicas if the change is large enough // update number of replicas if the change is large enough
replicaCount = int32(math.Ceil(float64(usage) / float64(targetUsagePerPod))) replicaCount = int32(math.Ceil(float64(usage) / float64(targetUsagePerPod)))
} }

View File

@ -90,9 +90,10 @@ type replicaCalcTestCase struct {
timestamp time.Time timestamp time.Time
resource *resourceInfo tolerances *Tolerances
metric *metricInfo resource *resourceInfo
container string metric *metricInfo
container string
podReadiness []v1.ConditionStatus podReadiness []v1.ConditionStatus
podStartTime []metav1.Time podStartTime []metav1.Time
@ -343,7 +344,7 @@ func (tc *replicaCalcTestCase) runTest(t *testing.T) {
informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc()) informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc())
informer := informerFactory.Core().V1().Pods() informer := informerFactory.Core().V1().Pods()
replicaCalc := NewReplicaCalculator(metricsClient, informer.Lister(), defaultTestingTolerance, defaultTestingCPUInitializationPeriod, defaultTestingDelayOfInitialReadinessStatus) replicaCalc := NewReplicaCalculator(metricsClient, informer.Lister(), defaultTestingCPUInitializationPeriod, defaultTestingDelayOfInitialReadinessStatus)
stop := make(chan struct{}) stop := make(chan struct{})
defer close(stop) defer close(stop)
@ -357,8 +358,14 @@ func (tc *replicaCalcTestCase) runTest(t *testing.T) {
}) })
require.NoError(t, err, "something went horribly wrong...") require.NoError(t, err, "something went horribly wrong...")
// Use default if tolerances are not specified in the test case.
tolerances := Tolerances{defaultTestingTolerance, defaultTestingTolerance}
if tc.tolerances != nil {
tolerances = *tc.tolerances
}
if tc.resource != nil { if tc.resource != nil {
outReplicas, outUtilization, outRawValue, outTimestamp, err := replicaCalc.GetResourceReplicas(context.TODO(), tc.currentReplicas, tc.resource.targetUtilization, tc.resource.name, testNamespace, selector, tc.container) outReplicas, outUtilization, outRawValue, outTimestamp, err := replicaCalc.GetResourceReplicas(context.TODO(), tc.currentReplicas, tc.resource.targetUtilization, tc.resource.name, tolerances, testNamespace, selector, tc.container)
if tc.expectedError != nil { if tc.expectedError != nil {
require.Error(t, err, "there should be an error calculating the replica count") require.Error(t, err, "there should be an error calculating the replica count")
@ -381,12 +388,12 @@ func (tc *replicaCalcTestCase) runTest(t *testing.T) {
if tc.metric.singleObject == nil { if tc.metric.singleObject == nil {
t.Fatal("Metric specified as objectMetric but metric.singleObject is nil.") t.Fatal("Metric specified as objectMetric but metric.singleObject is nil.")
} }
outReplicas, outUsage, outTimestamp, err = replicaCalc.GetObjectMetricReplicas(tc.currentReplicas, tc.metric.targetUsage, tc.metric.name, testNamespace, tc.metric.singleObject, selector, nil) outReplicas, outUsage, outTimestamp, err = replicaCalc.GetObjectMetricReplicas(tc.currentReplicas, tc.metric.targetUsage, tc.metric.name, tolerances, testNamespace, tc.metric.singleObject, selector, nil)
case objectPerPodMetric: case objectPerPodMetric:
if tc.metric.singleObject == nil { if tc.metric.singleObject == nil {
t.Fatal("Metric specified as objectMetric but metric.singleObject is nil.") t.Fatal("Metric specified as objectMetric but metric.singleObject is nil.")
} }
outReplicas, outUsage, outTimestamp, err = replicaCalc.GetObjectPerPodMetricReplicas(tc.currentReplicas, tc.metric.perPodTargetUsage, tc.metric.name, testNamespace, tc.metric.singleObject, nil) outReplicas, outUsage, outTimestamp, err = replicaCalc.GetObjectPerPodMetricReplicas(tc.currentReplicas, tc.metric.perPodTargetUsage, tc.metric.name, tolerances, testNamespace, tc.metric.singleObject, nil)
case externalMetric: case externalMetric:
if tc.metric.selector == nil { if tc.metric.selector == nil {
t.Fatal("Metric specified as externalMetric but metric.selector is nil.") t.Fatal("Metric specified as externalMetric but metric.selector is nil.")
@ -394,7 +401,7 @@ func (tc *replicaCalcTestCase) runTest(t *testing.T) {
if tc.metric.targetUsage <= 0 { if tc.metric.targetUsage <= 0 {
t.Fatalf("Metric specified as externalMetric but metric.targetUsage is %d which is <=0.", tc.metric.targetUsage) t.Fatalf("Metric specified as externalMetric but metric.targetUsage is %d which is <=0.", tc.metric.targetUsage)
} }
outReplicas, outUsage, outTimestamp, err = replicaCalc.GetExternalMetricReplicas(tc.currentReplicas, tc.metric.targetUsage, tc.metric.name, testNamespace, tc.metric.selector, selector) outReplicas, outUsage, outTimestamp, err = replicaCalc.GetExternalMetricReplicas(tc.currentReplicas, tc.metric.targetUsage, tc.metric.name, tolerances, testNamespace, tc.metric.selector, selector)
case externalPerPodMetric: case externalPerPodMetric:
if tc.metric.selector == nil { if tc.metric.selector == nil {
t.Fatal("Metric specified as externalPerPodMetric but metric.selector is nil.") t.Fatal("Metric specified as externalPerPodMetric but metric.selector is nil.")
@ -403,9 +410,9 @@ func (tc *replicaCalcTestCase) runTest(t *testing.T) {
t.Fatalf("Metric specified as externalPerPodMetric but metric.perPodTargetUsage is %d which is <=0.", tc.metric.perPodTargetUsage) t.Fatalf("Metric specified as externalPerPodMetric but metric.perPodTargetUsage is %d which is <=0.", tc.metric.perPodTargetUsage)
} }
outReplicas, outUsage, outTimestamp, err = replicaCalc.GetExternalPerPodMetricReplicas(tc.currentReplicas, tc.metric.perPodTargetUsage, tc.metric.name, testNamespace, tc.metric.selector) outReplicas, outUsage, outTimestamp, err = replicaCalc.GetExternalPerPodMetricReplicas(tc.currentReplicas, tc.metric.perPodTargetUsage, tc.metric.name, tolerances, testNamespace, tc.metric.selector)
case podMetric: case podMetric:
outReplicas, outUsage, outTimestamp, err = replicaCalc.GetMetricReplicas(tc.currentReplicas, tc.metric.targetUsage, tc.metric.name, testNamespace, selector, nil) outReplicas, outUsage, outTimestamp, err = replicaCalc.GetMetricReplicas(tc.currentReplicas, tc.metric.targetUsage, tc.metric.name, tolerances, testNamespace, selector, nil)
default: default:
t.Fatalf("Unknown metric type: %d", tc.metric.metricType) t.Fatalf("Unknown metric type: %d", tc.metric.metricType)
} }
@ -1263,6 +1270,188 @@ func TestReplicaCalcTolerancePerPodCMExternal(t *testing.T) {
tc.runTest(t) tc.runTest(t)
} }
func TestReplicaCalcConfigurableTolerance(t *testing.T) {
testCases := []struct {
name string
replicaCalcTestCase
}{
{
name: "Outside of a 0% tolerance",
replicaCalcTestCase: replicaCalcTestCase{
tolerances: &Tolerances{0., 0.},
currentReplicas: 3,
expectedReplicas: 4,
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
levels: makePodMetricLevels(909, 1010, 1111),
targetUtilization: 100,
expectedUtilization: 101,
expectedValue: numContainersPerPod * 1010,
},
},
},
{
name: "Within a 200% scale-up tolerance",
replicaCalcTestCase: replicaCalcTestCase{
tolerances: &Tolerances{defaultTestingTolerance, 2.},
currentReplicas: 3,
expectedReplicas: 3,
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
levels: makePodMetricLevels(1890, 1910, 1900),
targetUtilization: 100,
expectedUtilization: 190,
expectedValue: numContainersPerPod * 1900,
},
},
},
{
name: "Outside 8% scale-up tolerance (and superfuous scale-down tolerance)",
replicaCalcTestCase: replicaCalcTestCase{
tolerances: &Tolerances{2., .08},
currentReplicas: 3,
expectedReplicas: 4,
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
levels: makePodMetricLevels(1100, 1080, 1090),
targetUtilization: 100,
expectedUtilization: 109,
expectedValue: numContainersPerPod * 1090,
},
},
},
{
name: "Within a 36% scale-down tolerance",
replicaCalcTestCase: replicaCalcTestCase{
tolerances: &Tolerances{.36, defaultTestingTolerance},
currentReplicas: 3,
expectedReplicas: 3,
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
levels: makePodMetricLevels(660, 640, 650),
targetUtilization: 100,
expectedUtilization: 65,
expectedValue: numContainersPerPod * 650,
},
},
},
{
name: "Outside a 34% scale-down tolerance",
replicaCalcTestCase: replicaCalcTestCase{
tolerances: &Tolerances{.34, defaultTestingTolerance},
currentReplicas: 3,
expectedReplicas: 2,
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
levels: makePodMetricLevels(660, 640, 650),
targetUtilization: 100,
expectedUtilization: 65,
expectedValue: numContainersPerPod * 650,
},
},
},
}
for _, tc := range testCases {
t.Run(tc.name, tc.runTest)
}
}
func TestReplicaCalcConfigurableToleranceCM(t *testing.T) {
tc := replicaCalcTestCase{
tolerances: &Tolerances{defaultTestingTolerance, .01},
currentReplicas: 3,
expectedReplicas: 4,
metric: &metricInfo{
name: "qps",
levels: []int64{20000, 21000, 21000},
targetUsage: 20000,
expectedUsage: 20666,
metricType: podMetric,
},
}
tc.runTest(t)
}
func TestReplicaCalcConfigurableToleranceCMObject(t *testing.T) {
tc := replicaCalcTestCase{
tolerances: &Tolerances{defaultTestingTolerance, .01},
currentReplicas: 3,
expectedReplicas: 4,
metric: &metricInfo{
name: "qps",
levels: []int64{20666},
targetUsage: 20000,
expectedUsage: 20666,
singleObject: &autoscalingv2.CrossVersionObjectReference{
Kind: "Deployment",
APIVersion: "apps/v1",
Name: "some-deployment",
},
},
}
tc.runTest(t)
}
func TestReplicaCalcConfigurableTolerancePerPodCMObject(t *testing.T) {
tc := replicaCalcTestCase{
tolerances: &Tolerances{defaultTestingTolerance, .01},
currentReplicas: 4,
expectedReplicas: 5,
metric: &metricInfo{
metricType: objectPerPodMetric,
name: "qps",
levels: []int64{20208},
perPodTargetUsage: 5000,
expectedUsage: 5052,
singleObject: &autoscalingv2.CrossVersionObjectReference{
Kind: "Deployment",
APIVersion: "apps/v1",
Name: "some-deployment",
},
},
}
tc.runTest(t)
}
func TestReplicaCalcConfigurableToleranceCMExternal(t *testing.T) {
tc := replicaCalcTestCase{
tolerances: &Tolerances{defaultTestingTolerance, .01},
currentReplicas: 3,
expectedReplicas: 4,
metric: &metricInfo{
name: "qps",
levels: []int64{8900},
targetUsage: 8800,
expectedUsage: 8900,
selector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
metricType: externalMetric,
},
}
tc.runTest(t)
}
func TestReplicaCalcConfigurableTolerancePerPodCMExternal(t *testing.T) {
tc := replicaCalcTestCase{
tolerances: &Tolerances{defaultTestingTolerance, .01},
currentReplicas: 3,
expectedReplicas: 4,
metric: &metricInfo{
name: "qps",
levels: []int64{8600},
perPodTargetUsage: 2800,
expectedUsage: 2867,
selector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
metricType: externalPerPodMetric,
},
}
tc.runTest(t)
}
func TestReplicaCalcSuperfluousMetrics(t *testing.T) { func TestReplicaCalcSuperfluousMetrics(t *testing.T) {
tc := replicaCalcTestCase{ tc := replicaCalcTestCase{
currentReplicas: 4, currentReplicas: 4,

View File

@ -293,6 +293,12 @@ const (
// Make the kubelet use shutdown configuration based on pod priority values for graceful shutdown. // Make the kubelet use shutdown configuration based on pod priority values for graceful shutdown.
GracefulNodeShutdownBasedOnPodPriority featuregate.Feature = "GracefulNodeShutdownBasedOnPodPriority" GracefulNodeShutdownBasedOnPodPriority featuregate.Feature = "GracefulNodeShutdownBasedOnPodPriority"
// owner: @jm-franc
// kep: https://kep.k8s.io/4951
//
// Enables support of configurable HPA scale-up and scale-down tolerances.
HPAConfigurableTolerance featuregate.Feature = "HPAConfigurableTolerance"
// owner: @dxist // owner: @dxist
// //
// Enables support of HPA scaling to zero pods when an object or custom metric is configured. // Enables support of HPA scaling to zero pods when an object or custom metric is configured.
@ -1347,6 +1353,10 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.36 {Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.36
}, },
HPAConfigurableTolerance: {
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Alpha},
},
HPAScaleToZero: { HPAScaleToZero: {
{Version: version.MustParse("1.16"), Default: false, PreRelease: featuregate.Alpha}, {Version: version.MustParse("1.16"), Default: false, PreRelease: featuregate.Alpha},
}, },

View File

@ -14693,7 +14693,7 @@ func schema_k8sio_api_autoscaling_v2_HPAScalingRules(ref common.ReferenceCallbac
return common.OpenAPIDefinition{ return common.OpenAPIDefinition{
Schema: spec.Schema{ Schema: spec.Schema{
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.", Description: "HPAScalingRules configures the scaling behavior for one direction via scaling Policy Rules and a configurable metric tolerance.\n\nScaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.\n\nThe tolerance is applied to the metric values and prevents scaling too eagerly for small metric variations. (Note that setting a tolerance requires enabling the alpha HPAConfigurableTolerance feature gate.)",
Type: []string{"object"}, Type: []string{"object"},
Properties: map[string]spec.Schema{ Properties: map[string]spec.Schema{
"stabilizationWindowSeconds": { "stabilizationWindowSeconds": {
@ -14717,7 +14717,7 @@ func schema_k8sio_api_autoscaling_v2_HPAScalingRules(ref common.ReferenceCallbac
}, },
}, },
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid", Description: "policies is a list of potential scaling polices which can be used during scaling. If not set, use the default values: - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window. - For scale down: allow all pods to be removed in a 15s window.",
Type: []string{"array"}, Type: []string{"array"},
Items: &spec.SchemaOrArray{ Items: &spec.SchemaOrArray{
Schema: &spec.Schema{ Schema: &spec.Schema{
@ -14729,11 +14729,17 @@ func schema_k8sio_api_autoscaling_v2_HPAScalingRules(ref common.ReferenceCallbac
}, },
}, },
}, },
"tolerance": {
SchemaProps: spec.SchemaProps{
Description: "tolerance is the tolerance on the ratio between the current and desired metric value under which no updates are made to the desired number of replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not set, the default cluster-wide tolerance is applied (by default 10%).\n\nFor example, if autoscaling is configured with a memory consumption target of 100Mi, and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be triggered when the actual consumption falls below 95Mi or exceeds 101Mi.\n\nThis is an alpha field and requires enabling the HPAConfigurableTolerance feature gate.",
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
}, },
}, },
}, },
Dependencies: []string{ Dependencies: []string{
"k8s.io/api/autoscaling/v2.HPAScalingPolicy"}, "k8s.io/api/autoscaling/v2.HPAScalingPolicy", "k8s.io/apimachinery/pkg/api/resource.Quantity"},
} }
} }

View File

@ -22,9 +22,11 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apiserver/pkg/storage/names" "k8s.io/apiserver/pkg/storage/names"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/kubernetes/pkg/apis/autoscaling/validation" "k8s.io/kubernetes/pkg/apis/autoscaling/validation"
"k8s.io/kubernetes/pkg/features"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath" "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
) )
@ -70,12 +72,15 @@ func (autoscalerStrategy) PrepareForCreate(ctx context.Context, obj runtime.Obje
// create cannot set status // create cannot set status
newHPA.Status = autoscaling.HorizontalPodAutoscalerStatus{} newHPA.Status = autoscaling.HorizontalPodAutoscalerStatus{}
dropDisabledFields(newHPA, nil)
} }
// Validate validates a new autoscaler. // Validate validates a new autoscaler.
func (autoscalerStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList { func (autoscalerStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
autoscaler := obj.(*autoscaling.HorizontalPodAutoscaler) autoscaler := obj.(*autoscaling.HorizontalPodAutoscaler)
return validation.ValidateHorizontalPodAutoscaler(autoscaler) opts := validationOptionsForHorizontalPodAutoscaler(autoscaler, nil)
return validation.ValidateHorizontalPodAutoscaler(autoscaler, opts)
} }
// WarningsOnCreate returns warnings for the creation of the given object. // WarningsOnCreate returns warnings for the creation of the given object.
@ -98,11 +103,16 @@ func (autoscalerStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime
oldHPA := old.(*autoscaling.HorizontalPodAutoscaler) oldHPA := old.(*autoscaling.HorizontalPodAutoscaler)
// Update is not allowed to set status // Update is not allowed to set status
newHPA.Status = oldHPA.Status newHPA.Status = oldHPA.Status
dropDisabledFields(newHPA, oldHPA)
} }
// ValidateUpdate is the default update validation for an end user. // ValidateUpdate is the default update validation for an end user.
func (autoscalerStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList { func (autoscalerStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
return validation.ValidateHorizontalPodAutoscalerUpdate(obj.(*autoscaling.HorizontalPodAutoscaler), old.(*autoscaling.HorizontalPodAutoscaler)) newHPA := obj.(*autoscaling.HorizontalPodAutoscaler)
oldHPA := old.(*autoscaling.HorizontalPodAutoscaler)
opts := validationOptionsForHorizontalPodAutoscaler(newHPA, oldHPA)
return validation.ValidateHorizontalPodAutoscalerUpdate(newHPA, oldHPA, opts)
} }
// WarningsOnUpdate returns warnings for the given update. // WarningsOnUpdate returns warnings for the given update.
@ -157,3 +167,48 @@ func (autoscalerStatusStrategy) ValidateUpdate(ctx context.Context, obj, old run
func (autoscalerStatusStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string { func (autoscalerStatusStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
return nil return nil
} }
func validationOptionsForHorizontalPodAutoscaler(newHPA, oldHPA *autoscaling.HorizontalPodAutoscaler) validation.HorizontalPodAutoscalerSpecValidationOptions {
opts := validation.HorizontalPodAutoscalerSpecValidationOptions{
MinReplicasLowerBound: 1,
}
oldHasZeroMinReplicas := oldHPA != nil && (oldHPA.Spec.MinReplicas != nil && *oldHPA.Spec.MinReplicas == 0)
if utilfeature.DefaultFeatureGate.Enabled(features.HPAScaleToZero) || oldHasZeroMinReplicas {
opts.MinReplicasLowerBound = 0
}
return opts
}
// dropDisabledFields will drop any disabled fields that have not previously been
// set on the old HPA. oldHPA is ignored if nil.
func dropDisabledFields(newHPA, oldHPA *autoscaling.HorizontalPodAutoscaler) {
if utilfeature.DefaultFeatureGate.Enabled(features.HPAConfigurableTolerance) {
return
}
if toleranceInUse(oldHPA) {
return
}
newBehavior := newHPA.Spec.Behavior
if newBehavior == nil {
return
}
for _, sr := range []*autoscaling.HPAScalingRules{newBehavior.ScaleDown, newBehavior.ScaleUp} {
if sr != nil {
sr.Tolerance = nil
}
}
}
func toleranceInUse(hpa *autoscaling.HorizontalPodAutoscaler) bool {
if hpa == nil || hpa.Spec.Behavior == nil {
return false
}
for _, sr := range []*autoscaling.HPAScalingRules{hpa.Spec.Behavior.ScaleDown, hpa.Spec.Behavior.ScaleUp} {
if sr != nil && sr.Tolerance != nil {
return true
}
}
return false
}

View File

@ -0,0 +1,161 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package horizontalpodautoscaler
import (
"context"
"testing"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/apis/autoscaling"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/features"
"k8s.io/utils/ptr"
)
type toleranceSet bool
type zeroMinReplicasSet bool
const (
withTolerance toleranceSet = true
withoutTolerance = false
zeroMinReplicas zeroMinReplicasSet = true
oneMinReplicas = false
)
func TestPrepareForCreateConfigurableToleranceEnabled(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HPAConfigurableTolerance, true)
hpa := prepareHPA(oneMinReplicas, withTolerance)
Strategy.PrepareForCreate(context.Background(), &hpa)
if hpa.Spec.Behavior.ScaleUp.Tolerance == nil {
t.Error("Expected tolerance field, got none")
}
}
func TestPrepareForCreateConfigurableToleranceDisabled(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HPAConfigurableTolerance, false)
hpa := prepareHPA(oneMinReplicas, withTolerance)
Strategy.PrepareForCreate(context.Background(), &hpa)
if hpa.Spec.Behavior.ScaleUp.Tolerance != nil {
t.Errorf("Expected tolerance field wiped out, got %v", hpa.Spec.Behavior.ScaleUp.Tolerance)
}
}
func TestPrepareForUpdateConfigurableToleranceEnabled(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HPAConfigurableTolerance, true)
newHPA := prepareHPA(oneMinReplicas, withTolerance)
oldHPA := prepareHPA(oneMinReplicas, withTolerance)
Strategy.PrepareForUpdate(context.Background(), &newHPA, &oldHPA)
if newHPA.Spec.Behavior.ScaleUp.Tolerance == nil {
t.Error("Expected tolerance field, got none")
}
}
func TestPrepareForUpdateConfigurableToleranceDisabled(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HPAConfigurableTolerance, false)
newHPA := prepareHPA(oneMinReplicas, withTolerance)
oldHPA := prepareHPA(oneMinReplicas, withoutTolerance)
Strategy.PrepareForUpdate(context.Background(), &newHPA, &oldHPA)
if newHPA.Spec.Behavior.ScaleUp.Tolerance != nil {
t.Errorf("Expected tolerance field wiped out, got %v", newHPA.Spec.Behavior.ScaleUp.Tolerance)
}
newHPA = prepareHPA(oneMinReplicas, withTolerance)
oldHPA = prepareHPA(oneMinReplicas, withTolerance)
Strategy.PrepareForUpdate(context.Background(), &newHPA, &oldHPA)
if newHPA.Spec.Behavior.ScaleUp.Tolerance == nil {
t.Errorf("Expected tolerance field not wiped out, got nil")
}
}
func TestValidateOptionsScaleToZeroEnabled(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HPAScaleToZero, true)
oneReplicasHPA := prepareHPA(oneMinReplicas, withoutTolerance)
opts := validationOptionsForHorizontalPodAutoscaler(&oneReplicasHPA, &oneReplicasHPA)
if opts.MinReplicasLowerBound != 0 {
t.Errorf("Expected zero minReplicasLowerBound, got %v", opts.MinReplicasLowerBound)
}
}
func TestValidateOptionsScaleToZeroDisabled(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HPAScaleToZero, false)
zeroReplicasHPA := prepareHPA(zeroMinReplicas, withoutTolerance)
oneReplicasHPA := prepareHPA(oneMinReplicas, withoutTolerance)
// MinReplicas should be 0 despite the gate being disabled since the old HPA
// had MinReplicas set to 0 already.
opts := validationOptionsForHorizontalPodAutoscaler(&zeroReplicasHPA, &zeroReplicasHPA)
if opts.MinReplicasLowerBound != 0 {
t.Errorf("Expected zero minReplicasLowerBound, got %v", opts.MinReplicasLowerBound)
}
opts = validationOptionsForHorizontalPodAutoscaler(&zeroReplicasHPA, &oneReplicasHPA)
if opts.MinReplicasLowerBound == 0 {
t.Errorf("Expected non-zero minReplicasLowerBound, got 0")
}
}
func prepareHPA(hasZeroMinReplicas zeroMinReplicasSet, hasTolerance toleranceSet) autoscaling.HorizontalPodAutoscaler {
tolerance := ptr.To(resource.MustParse("0.1"))
if !hasTolerance {
tolerance = nil
}
minReplicas := int32(0)
if !hasZeroMinReplicas {
minReplicas = 1
}
return autoscaling.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: "myautoscaler",
Namespace: metav1.NamespaceDefault,
ResourceVersion: "1",
},
Spec: autoscaling.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscaling.CrossVersionObjectReference{
Kind: "ReplicationController",
Name: "myrc",
},
MinReplicas: &minReplicas,
MaxReplicas: 5,
Metrics: []autoscaling.MetricSpec{{
Type: autoscaling.ResourceMetricSourceType,
Resource: &autoscaling.ResourceMetricSource{
Name: api.ResourceCPU,
Target: autoscaling.MetricTarget{
Type: autoscaling.UtilizationMetricType,
AverageUtilization: ptr.To(int32(70)),
},
},
}},
Behavior: &autoscaling.HorizontalPodAutoscalerBehavior{
ScaleUp: &autoscaling.HPAScalingRules{
Tolerance: tolerance,
},
},
},
}
}

View File

@ -751,115 +751,116 @@ func init() {
} }
var fileDescriptor_4d5f2c8767749221 = []byte{ var fileDescriptor_4d5f2c8767749221 = []byte{
// 1722 bytes of a gzipped FileDescriptorProto // 1742 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcb, 0x8f, 0x1b, 0x49, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xc9, 0x8f, 0x1b, 0x4b,
0x19, 0x9f, 0xb6, 0x3d, 0xaf, 0xf2, 0x3c, 0x2b, 0x2f, 0x67, 0xa2, 0xd8, 0xa3, 0x26, 0x90, 0x07, 0x19, 0x9f, 0xb6, 0x3d, 0x5b, 0x79, 0xd6, 0xca, 0xe6, 0x4c, 0x14, 0x7b, 0xd4, 0x04, 0xb2, 0x40,
0xa4, 0x4d, 0x4c, 0x88, 0x22, 0x72, 0x40, 0xd3, 0x13, 0x20, 0xa3, 0xcc, 0x30, 0x4e, 0x39, 0xc9, 0xda, 0xc4, 0x84, 0x28, 0x22, 0x07, 0x34, 0x3d, 0x01, 0x32, 0xca, 0x0c, 0xe3, 0x94, 0x27, 0x19,
0x00, 0x02, 0x94, 0x72, 0x77, 0x8d, 0xa7, 0x18, 0xbb, 0xdb, 0xea, 0x6e, 0x3b, 0x99, 0x48, 0x48, 0x76, 0xa5, 0xdc, 0x5d, 0xe3, 0x29, 0xc6, 0xee, 0xb6, 0xba, 0xdb, 0x4e, 0x26, 0x12, 0x12, 0x17,
0x5c, 0xb8, 0x23, 0x50, 0x84, 0xf8, 0x1f, 0x22, 0x4e, 0xa0, 0x70, 0x00, 0x09, 0x69, 0xf7, 0x90, 0xee, 0x08, 0x14, 0xf1, 0x4f, 0x44, 0x9c, 0x40, 0xe1, 0x00, 0x12, 0x12, 0x1c, 0x72, 0x41, 0xca,
0xcb, 0x4a, 0x39, 0xec, 0x21, 0x27, 0x6b, 0xe3, 0x95, 0xf6, 0xb8, 0x7f, 0x40, 0x4e, 0xab, 0x7a, 0x81, 0x43, 0x4e, 0x16, 0x31, 0xd2, 0x3b, 0xbe, 0xe3, 0x3b, 0xe4, 0xf4, 0x54, 0x4b, 0xaf, 0xde,
0xf4, 0xd3, 0xaf, 0x71, 0x76, 0x32, 0xd2, 0xdc, 0x5c, 0x55, 0xdf, 0xf7, 0xfb, 0x1e, 0xf5, 0xbd, 0xc6, 0x79, 0x93, 0x91, 0xe6, 0xe6, 0xaa, 0xfa, 0xbe, 0xdf, 0xb7, 0xd4, 0xb7, 0x55, 0x1b, 0x5c,
0xaa, 0x0d, 0xae, 0xee, 0xdf, 0x76, 0x35, 0x6a, 0x17, 0x71, 0x93, 0x16, 0x71, 0xcb, 0xb3, 0x5d, 0x3f, 0xb8, 0xeb, 0x6a, 0xd4, 0x2e, 0xe2, 0x26, 0x2d, 0xe2, 0x96, 0x67, 0xbb, 0x06, 0xae, 0x53,
0x03, 0xd7, 0xa9, 0x55, 0x2b, 0xb6, 0x4b, 0xc5, 0x1a, 0xb1, 0x88, 0x83, 0x3d, 0x62, 0x6a, 0x4d, 0xab, 0x56, 0x6c, 0x97, 0x8a, 0x35, 0x62, 0x11, 0x07, 0x7b, 0xc4, 0xd4, 0x9a, 0x8e, 0xed, 0xd9,
0xc7, 0xf6, 0x6c, 0x78, 0x5e, 0x90, 0x6a, 0xb8, 0x49, 0xb5, 0x08, 0xa9, 0xd6, 0x2e, 0xad, 0x5c, 0xf0, 0xa2, 0x20, 0xd5, 0x70, 0x93, 0x6a, 0x11, 0x52, 0xad, 0x5d, 0x5a, 0xb9, 0x59, 0xa3, 0xde,
0xaf, 0x51, 0x6f, 0xaf, 0x55, 0xd5, 0x0c, 0xbb, 0x51, 0xac, 0xd9, 0x35, 0xbb, 0xc8, 0x39, 0xaa, 0x7e, 0xab, 0xaa, 0x19, 0x76, 0xa3, 0x58, 0xb3, 0x6b, 0x76, 0x91, 0x73, 0x54, 0x5b, 0x7b, 0x7c,
0xad, 0x5d, 0xbe, 0xe2, 0x0b, 0xfe, 0x4b, 0x20, 0xad, 0xa8, 0x11, 0xa1, 0x86, 0xed, 0x90, 0x62, 0xc5, 0x17, 0xfc, 0x97, 0x40, 0x5a, 0x51, 0x23, 0x42, 0x0d, 0xdb, 0x21, 0xc5, 0xf6, 0xad, 0xa4,
0xfb, 0x46, 0x52, 0xda, 0xca, 0xcd, 0x90, 0xa6, 0x81, 0x8d, 0x3d, 0x6a, 0x11, 0xe7, 0xa0, 0xd8, 0xb4, 0x95, 0xdb, 0x21, 0x4d, 0x03, 0x1b, 0xfb, 0xd4, 0x22, 0xce, 0x61, 0xb1, 0x79, 0x50, 0xe3,
0xdc, 0xaf, 0x71, 0x26, 0x87, 0xb8, 0x76, 0xcb, 0x31, 0xc8, 0x58, 0x5c, 0x6e, 0xb1, 0x41, 0x3c, 0x4c, 0x0e, 0x71, 0xed, 0x96, 0x63, 0x90, 0xb1, 0xb8, 0xdc, 0x62, 0x83, 0x78, 0xb8, 0x9f, 0xac,
0xdc, 0x4f, 0x56, 0x71, 0x10, 0x97, 0xd3, 0xb2, 0x3c, 0xda, 0xe8, 0x15, 0x73, 0x6b, 0x14, 0x83, 0xe2, 0x20, 0x2e, 0xa7, 0x65, 0x79, 0xb4, 0xd1, 0x2b, 0xe6, 0xce, 0x28, 0x06, 0xd7, 0xd8, 0x27,
0x6b, 0xec, 0x91, 0x06, 0x4e, 0xf2, 0xa9, 0x5f, 0x29, 0xe0, 0xe2, 0xba, 0x6d, 0x79, 0x98, 0x71, 0x0d, 0x9c, 0xe4, 0x53, 0x3f, 0x53, 0xc0, 0xe5, 0x75, 0xdb, 0xf2, 0x30, 0xe3, 0x40, 0xd2, 0x88,
0x20, 0x69, 0xc4, 0x16, 0xf1, 0x1c, 0x6a, 0x54, 0xf8, 0x6f, 0xb8, 0x0e, 0x32, 0x16, 0x6e, 0x90, 0x2d, 0xe2, 0x39, 0xd4, 0xa8, 0xf0, 0xdf, 0x70, 0x1d, 0x64, 0x2c, 0xdc, 0x20, 0x39, 0x65, 0x55,
0x9c, 0xb2, 0xaa, 0x5c, 0x99, 0xd5, 0x8b, 0xaf, 0x3b, 0x85, 0x89, 0x6e, 0xa7, 0x90, 0xf9, 0x25, 0xb9, 0x36, 0xab, 0x17, 0xdf, 0x74, 0x0a, 0x13, 0xdd, 0x4e, 0x21, 0xf3, 0x63, 0xdc, 0x20, 0x1f,
0x6e, 0x90, 0xf7, 0x9d, 0x42, 0xa1, 0xd7, 0x71, 0x9a, 0x0f, 0xc3, 0x48, 0x10, 0x67, 0x86, 0xdb, 0x3a, 0x85, 0x42, 0xaf, 0xe3, 0x34, 0x1f, 0x86, 0x91, 0x20, 0xce, 0x0c, 0xb7, 0xc1, 0x94, 0x87,
0x60, 0xca, 0xc3, 0x4e, 0x8d, 0x78, 0xb9, 0xd4, 0xaa, 0x72, 0x25, 0x5b, 0xba, 0xac, 0x0d, 0xbc, 0x9d, 0x1a, 0xf1, 0x72, 0xa9, 0x55, 0xe5, 0x5a, 0xb6, 0x74, 0x55, 0x1b, 0x78, 0x75, 0x9a, 0x90,
0x3a, 0x4d, 0x48, 0x7f, 0xc8, 0xc9, 0xf5, 0x05, 0x29, 0x6f, 0x4a, 0xac, 0x91, 0x84, 0x81, 0x45, 0xbe, 0xc3, 0xc9, 0xf5, 0x05, 0x29, 0x6f, 0x4a, 0xac, 0x91, 0x84, 0x81, 0x45, 0x30, 0x6b, 0xf8,
0x30, 0x6b, 0xf8, 0x6a, 0xe7, 0xd2, 0x5c, 0xb5, 0x65, 0x49, 0x3a, 0x1b, 0xda, 0x13, 0xd2, 0xa8, 0x6a, 0xe7, 0xd2, 0x5c, 0xb5, 0x65, 0x49, 0x3a, 0x1b, 0xda, 0x13, 0xd2, 0xa8, 0x9f, 0x0f, 0x31,
0x5f, 0x0f, 0x31, 0xd4, 0xc3, 0x5e, 0xcb, 0x3d, 0x1a, 0x43, 0x77, 0xc0, 0xb4, 0xd1, 0x72, 0x1c, 0xd4, 0xc3, 0x5e, 0xcb, 0x3d, 0x1e, 0x43, 0x77, 0xc1, 0xb4, 0xd1, 0x72, 0x1c, 0x62, 0xf9, 0x96,
0x62, 0xf9, 0x96, 0xfe, 0x60, 0xa4, 0xa5, 0x8f, 0x71, 0xbd, 0x45, 0x84, 0x0e, 0xfa, 0xa2, 0x94, 0x7e, 0x6b, 0xa4, 0xa5, 0x4f, 0x70, 0xbd, 0x45, 0x84, 0x0e, 0xfa, 0xa2, 0x94, 0x3a, 0xbd, 0x2e,
0x3a, 0xbd, 0x2e, 0x40, 0x90, 0x8f, 0x36, 0xbe, 0xc1, 0x2f, 0x14, 0x70, 0x61, 0xdd, 0xb1, 0x5d, 0x40, 0x90, 0x8f, 0x36, 0xbe, 0xc1, 0x2f, 0x15, 0x70, 0x69, 0xdd, 0xb1, 0x5d, 0xf7, 0x09, 0x71,
0xf7, 0x31, 0x71, 0x5c, 0x6a, 0x5b, 0xdb, 0xd5, 0x3f, 0x10, 0xc3, 0x43, 0x64, 0x97, 0x38, 0xc4, 0x5c, 0x6a, 0x5b, 0xdb, 0xd5, 0x5f, 0x13, 0xc3, 0x43, 0x64, 0x8f, 0x38, 0xc4, 0x32, 0x08, 0x5c,
0x32, 0x08, 0x5c, 0x05, 0x99, 0x7d, 0x6a, 0x99, 0xd2, 0xdc, 0x39, 0xdf, 0xdc, 0xfb, 0xd4, 0x32, 0x05, 0x99, 0x03, 0x6a, 0x99, 0xd2, 0xdc, 0x39, 0xdf, 0xdc, 0x87, 0xd4, 0x32, 0x11, 0x3f, 0x61,
0x11, 0x3f, 0x61, 0x14, 0xdc, 0x21, 0xa9, 0x38, 0x45, 0xc4, 0xda, 0x12, 0x00, 0xb8, 0x49, 0xa5, 0x14, 0xdc, 0x21, 0xa9, 0x38, 0x45, 0xc4, 0xda, 0x12, 0x00, 0xb8, 0x49, 0xa5, 0x00, 0xa9, 0x15,
0x00, 0xa9, 0x15, 0x94, 0x74, 0x60, 0xad, 0xbc, 0x21, 0x4f, 0x50, 0x84, 0x4a, 0xfd, 0xaf, 0x02, 0x94, 0x74, 0x60, 0xad, 0xbc, 0x21, 0x4f, 0x50, 0x84, 0x4a, 0xfd, 0xbb, 0x02, 0xce, 0xfe, 0xe0,
0x4e, 0xff, 0xec, 0x99, 0x47, 0x1c, 0x0b, 0xd7, 0x63, 0x81, 0x56, 0x01, 0x53, 0x0d, 0xbe, 0xe6, 0xb9, 0x47, 0x1c, 0x0b, 0xd7, 0x63, 0x81, 0x56, 0x01, 0x53, 0x0d, 0xbe, 0xe6, 0x2a, 0x65, 0x4b,
0x2a, 0x65, 0x4b, 0xdf, 0x1f, 0xe9, 0xb9, 0x0d, 0x93, 0x58, 0x1e, 0xdd, 0xa5, 0xc4, 0x09, 0xe3, 0xdf, 0x1c, 0xe9, 0xb9, 0x0d, 0x93, 0x58, 0x1e, 0xdd, 0xa3, 0xc4, 0x09, 0xe3, 0x44, 0x9c, 0x20,
0x44, 0x9c, 0x20, 0x09, 0x75, 0xe4, 0x81, 0xa7, 0x7e, 0xda, 0xab, 0xbe, 0x08, 0x9f, 0x8f, 0xa2, 0x09, 0x75, 0xec, 0x81, 0xa7, 0xfe, 0xbb, 0x57, 0x7d, 0x11, 0x3e, 0x9f, 0x44, 0xfd, 0x4f, 0x15,
0xfe, 0xc7, 0x0a, 0x27, 0xf5, 0x9f, 0x0a, 0x58, 0xba, 0x57, 0x5e, 0xab, 0x08, 0xee, 0xb2, 0x5d, 0x4e, 0xea, 0x9f, 0x15, 0xb0, 0xf4, 0xa0, 0xbc, 0x56, 0x11, 0xdc, 0x65, 0xbb, 0x4e, 0x8d, 0x43,
0xa7, 0xc6, 0x01, 0xbc, 0x0d, 0x32, 0xde, 0x41, 0xd3, 0xcf, 0x80, 0x4b, 0xfe, 0x85, 0x3f, 0x3c, 0x78, 0x17, 0x64, 0xbc, 0xc3, 0xa6, 0x9f, 0x01, 0x57, 0xfc, 0x0b, 0xdf, 0x39, 0x6c, 0xb2, 0x0c,
0x68, 0xb2, 0x0c, 0x38, 0x9d, 0xa4, 0x67, 0xfb, 0x88, 0x73, 0xc0, 0xef, 0x80, 0xc9, 0x36, 0x93, 0x38, 0x9b, 0xa4, 0x67, 0xfb, 0x88, 0x73, 0xc0, 0xaf, 0x81, 0xc9, 0x36, 0x93, 0xcb, 0xb5, 0x9c,
0xcb, 0xb5, 0x9c, 0xd4, 0xe7, 0x25, 0xeb, 0x24, 0x57, 0x06, 0x89, 0x33, 0x78, 0x07, 0xcc, 0x37, 0xd4, 0xe7, 0x25, 0xeb, 0x24, 0x57, 0x06, 0x89, 0x33, 0x78, 0x0f, 0xcc, 0x37, 0x89, 0x43, 0x6d,
0x89, 0x43, 0x6d, 0xb3, 0x42, 0x0c, 0xdb, 0x32, 0x5d, 0x1e, 0x30, 0x93, 0xfa, 0x19, 0x49, 0x3c, 0xb3, 0x42, 0x0c, 0xdb, 0x32, 0x5d, 0x1e, 0x30, 0x93, 0xfa, 0x39, 0x49, 0x3c, 0x5f, 0x8e, 0x1e,
0x5f, 0x8e, 0x1e, 0xa2, 0x38, 0xad, 0xfa, 0x8f, 0x14, 0x58, 0x0c, 0x15, 0x40, 0xad, 0x3a, 0x71, 0xa2, 0x38, 0xad, 0xfa, 0x45, 0x0a, 0x2c, 0x86, 0x0a, 0xa0, 0x56, 0x9d, 0xb8, 0xf0, 0x57, 0x60,
0xe1, 0xef, 0xc1, 0x8a, 0xeb, 0xe1, 0x2a, 0xad, 0xd3, 0xe7, 0xd8, 0xa3, 0xb6, 0xb5, 0x43, 0x2d, 0xc5, 0xf5, 0x70, 0x95, 0xd6, 0xe9, 0x0b, 0xec, 0x51, 0xdb, 0xda, 0xa5, 0x96, 0x69, 0x3f, 0x8b,
0xd3, 0x7e, 0x1a, 0x47, 0xcf, 0x77, 0x3b, 0x85, 0x95, 0xca, 0x40, 0x2a, 0x34, 0x04, 0x01, 0xde, 0xa3, 0xe7, 0xbb, 0x9d, 0xc2, 0x4a, 0x65, 0x20, 0x15, 0x1a, 0x82, 0x00, 0x1f, 0x82, 0x39, 0x97,
0x07, 0x73, 0x2e, 0xa9, 0x13, 0xc3, 0x13, 0xf6, 0x4a, 0xbf, 0x5c, 0xee, 0x76, 0x0a, 0x73, 0x95, 0xd4, 0x89, 0xe1, 0x09, 0x7b, 0xa5, 0x5f, 0xae, 0x76, 0x3b, 0x85, 0xb9, 0x4a, 0x64, 0xff, 0x43,
0xc8, 0xfe, 0xfb, 0x4e, 0xe1, 0x54, 0xcc, 0x31, 0xe2, 0x10, 0xc5, 0x98, 0xe1, 0xaf, 0xc1, 0x4c, 0xa7, 0x70, 0x26, 0xe6, 0x18, 0x71, 0x88, 0x62, 0xcc, 0xf0, 0xa7, 0x60, 0xa6, 0xc9, 0x7e, 0x51,
0x93, 0xfd, 0xa2, 0xc4, 0xcd, 0xa5, 0x56, 0xd3, 0x23, 0x22, 0x24, 0xe9, 0x6b, 0x7d, 0x49, 0x7a, 0xe2, 0xe6, 0x52, 0xab, 0xe9, 0x11, 0x11, 0x92, 0xf4, 0xb5, 0xbe, 0x24, 0xbd, 0x34, 0x53, 0x96,
0x69, 0xa6, 0x2c, 0x41, 0x50, 0x00, 0xa7, 0xbe, 0x4a, 0x81, 0x73, 0xf7, 0x6c, 0x87, 0x3e, 0x67, 0x20, 0x28, 0x80, 0x83, 0x3f, 0x07, 0xb3, 0x9e, 0x5d, 0x27, 0x0e, 0xb6, 0x0c, 0x92, 0xcb, 0xf0,
0xc9, 0x5f, 0x2f, 0xdb, 0xe6, 0x9a, 0x04, 0x23, 0x0e, 0x7c, 0x02, 0x66, 0x58, 0x93, 0x31, 0xb1, 0x38, 0xd1, 0x22, 0xd8, 0x41, 0x43, 0xd0, 0x9a, 0x07, 0x35, 0x2e, 0xcc, 0xef, 0x56, 0xda, 0xa3,
0x87, 0x65, 0x60, 0xfe, 0x30, 0x22, 0x36, 0xe8, 0x15, 0x5a, 0x73, 0xbf, 0xc6, 0x36, 0x5c, 0x8d, 0x16, 0xb6, 0x3c, 0xea, 0x1d, 0xea, 0xf3, 0xac, 0x8e, 0xec, 0xf8, 0x20, 0x28, 0xc4, 0x53, 0x5f,
0x51, 0x6b, 0xed, 0x1b, 0x9a, 0xa8, 0x17, 0x5b, 0xc4, 0xc3, 0x61, 0x4a, 0x87, 0x7b, 0x28, 0x40, 0xa7, 0xc0, 0x85, 0x07, 0xb6, 0x43, 0x5f, 0xb0, 0xca, 0x52, 0x2f, 0xdb, 0xe6, 0x9a, 0xd4, 0x94,
0x85, 0xbf, 0x02, 0x19, 0xb7, 0x49, 0x0c, 0x19, 0xa0, 0xb7, 0x86, 0x19, 0xd5, 0x5f, 0xc7, 0x4a, 0x38, 0xf0, 0x29, 0x98, 0x61, 0x1d, 0xcc, 0xc4, 0x1e, 0x96, 0x51, 0xff, 0xed, 0x61, 0x72, 0x5d,
0x93, 0x18, 0x61, 0x79, 0x61, 0x2b, 0xc4, 0x11, 0xe1, 0x13, 0x30, 0xe5, 0xf2, 0x40, 0xe6, 0x77, 0x8d, 0x51, 0x6b, 0xed, 0x5b, 0x9a, 0x28, 0x46, 0x5b, 0xc4, 0xc3, 0x61, 0xbd, 0x08, 0xf7, 0x50,
0x99, 0x2d, 0xdd, 0xfe, 0x00, 0x6c, 0x91, 0x08, 0x41, 0x7e, 0x89, 0x35, 0x92, 0xb8, 0xea, 0x67, 0x80, 0x0a, 0x7f, 0x02, 0x32, 0x6e, 0x93, 0x18, 0x32, 0xfa, 0xef, 0x0c, 0xf3, 0x58, 0x7f, 0x1d,
0x0a, 0x28, 0x0c, 0xe0, 0xd4, 0xc9, 0x1e, 0x6e, 0x53, 0xdb, 0x81, 0x0f, 0xc0, 0x34, 0xdf, 0x79, 0x2b, 0x4d, 0x62, 0x84, 0xb5, 0x8b, 0xad, 0x10, 0x47, 0x84, 0x4f, 0xc1, 0x94, 0xcb, 0xb3, 0x84,
0xd4, 0x94, 0x0e, 0xbc, 0x76, 0xa8, 0x7b, 0xe3, 0x21, 0xaa, 0x67, 0x59, 0xf6, 0x55, 0x04, 0x3b, 0x07, 0x4a, 0xb6, 0x74, 0xf7, 0x23, 0xb0, 0x45, 0x96, 0x05, 0xc9, 0x2b, 0xd6, 0x48, 0xe2, 0xaa,
0xf2, 0x71, 0xe0, 0x0e, 0x98, 0xe5, 0x3f, 0xef, 0xda, 0x4f, 0x2d, 0xe9, 0xb7, 0x71, 0x40, 0xe7, 0xff, 0x51, 0x40, 0x61, 0x00, 0xa7, 0x4e, 0xf6, 0x71, 0x9b, 0xda, 0x0e, 0x7c, 0x04, 0xa6, 0xf9,
0x59, 0xd1, 0xaf, 0xf8, 0x00, 0x28, 0xc4, 0x52, 0xff, 0x9c, 0x06, 0xab, 0x03, 0xec, 0x59, 0xb7, 0xce, 0xe3, 0xa6, 0x74, 0xe0, 0x8d, 0x23, 0x05, 0x05, 0x8f, 0x7f, 0x3d, 0xcb, 0x52, 0xbb, 0x22,
0x2d, 0x93, 0xb2, 0x18, 0x87, 0xf7, 0x62, 0x69, 0x7e, 0x33, 0x91, 0xe6, 0x97, 0x46, 0xf1, 0x47, 0xd8, 0x91, 0x8f, 0x03, 0x77, 0xc1, 0x2c, 0xff, 0x79, 0xdf, 0x7e, 0x66, 0x49, 0xbf, 0x8d, 0x03,
0xd2, 0x7e, 0x33, 0xb8, 0xa0, 0x54, 0x0c, 0x4b, 0xba, 0xf9, 0x7d, 0xa7, 0xd0, 0x67, 0xb0, 0xd2, 0xca, 0x23, 0xa1, 0xe2, 0x03, 0xa0, 0x10, 0x4b, 0xfd, 0x5d, 0x1a, 0xac, 0x0e, 0xb0, 0x67, 0xdd,
0x02, 0xa4, 0xf8, 0x65, 0xc0, 0x36, 0x80, 0x75, 0xec, 0x7a, 0x0f, 0x1d, 0x6c, 0xb9, 0x42, 0x12, 0xb6, 0x4c, 0xca, 0x12, 0x08, 0x3e, 0x88, 0xd5, 0x90, 0xdb, 0x89, 0x1a, 0x72, 0x65, 0x14, 0x7f,
0x6d, 0x10, 0x79, 0xf5, 0xd7, 0x0e, 0x17, 0xb4, 0x8c, 0x43, 0x5f, 0x91, 0x5a, 0xc0, 0xcd, 0x1e, 0xa4, 0xa6, 0x6c, 0x06, 0x17, 0x94, 0x8a, 0x61, 0x49, 0x37, 0x7f, 0xe8, 0x14, 0xfa, 0x4c, 0x6d,
0x34, 0xd4, 0x47, 0x02, 0xfc, 0x1e, 0x98, 0x72, 0x08, 0x76, 0x6d, 0x2b, 0x97, 0xe1, 0x56, 0x04, 0x5a, 0x80, 0x14, 0xbf, 0x0c, 0xd8, 0x06, 0xb0, 0x8e, 0x5d, 0x6f, 0xc7, 0xc1, 0x96, 0x2b, 0x24,
0xc1, 0x82, 0xf8, 0x2e, 0x92, 0xa7, 0xf0, 0x2a, 0x98, 0x6e, 0x10, 0xd7, 0xc5, 0x35, 0x92, 0x9b, 0xd1, 0x06, 0x91, 0x57, 0x7f, 0xe3, 0x68, 0x41, 0xcb, 0x38, 0xf4, 0x15, 0xa9, 0x05, 0xdc, 0xec,
0xe4, 0x84, 0x41, 0x79, 0xdd, 0x12, 0xdb, 0xc8, 0x3f, 0x57, 0x3f, 0x57, 0xc0, 0x85, 0x01, 0x7e, 0x41, 0x43, 0x7d, 0x24, 0xc0, 0x6f, 0x80, 0x29, 0x87, 0x60, 0xd7, 0xb6, 0x78, 0x62, 0xce, 0x86,
0xdc, 0xa4, 0xae, 0x07, 0x7f, 0xdb, 0x93, 0x95, 0xda, 0xe1, 0x0c, 0x64, 0xdc, 0x3c, 0x27, 0x83, 0xc1, 0x82, 0xf8, 0x2e, 0x92, 0xa7, 0xf0, 0x3a, 0x98, 0x6e, 0x10, 0xd7, 0xc5, 0x35, 0x92, 0x9b,
0x7a, 0xe0, 0xef, 0x44, 0x32, 0x72, 0x07, 0x4c, 0x52, 0x8f, 0x34, 0xfc, 0x3a, 0x53, 0x1a, 0x3f, 0xe4, 0x84, 0x41, 0xed, 0xde, 0x12, 0xdb, 0xc8, 0x3f, 0x57, 0xff, 0xab, 0x80, 0x4b, 0x03, 0xfc,
0x6d, 0xc2, 0x0a, 0xbe, 0xc1, 0x80, 0x90, 0xc0, 0x53, 0x5f, 0xa5, 0x07, 0x9a, 0xc5, 0xd2, 0x16, 0xb8, 0x49, 0x5d, 0x0f, 0xfe, 0xa2, 0x27, 0x2b, 0xb5, 0xa3, 0x19, 0xc8, 0xb8, 0x79, 0x4e, 0x06,
0xb6, 0xc1, 0x02, 0x5f, 0xc9, 0x9e, 0x49, 0x76, 0xa5, 0x71, 0xc3, 0x8a, 0xc2, 0x90, 0x19, 0x45, 0xc5, 0xc6, 0xdf, 0x89, 0x64, 0xe4, 0x2e, 0x98, 0xa4, 0x1e, 0x69, 0xf8, 0x45, 0xac, 0x34, 0x7e,
0x3f, 0x2b, 0xb5, 0x58, 0xa8, 0xc4, 0x50, 0x51, 0x42, 0x0a, 0xbc, 0x01, 0xb2, 0x0d, 0x6a, 0x21, 0xda, 0x84, 0xed, 0x61, 0x83, 0x01, 0x21, 0x81, 0xa7, 0xbe, 0x4e, 0x0f, 0x34, 0x8b, 0xa5, 0x2d,
0xd2, 0xac, 0x53, 0x03, 0xbb, 0xb2, 0x09, 0x2d, 0x76, 0x3b, 0x85, 0xec, 0x56, 0xb8, 0x8d, 0xa2, 0x6c, 0x83, 0x05, 0xbe, 0x92, 0x0d, 0x99, 0xec, 0x49, 0xe3, 0x86, 0x15, 0x85, 0x21, 0x03, 0x90,
0x34, 0xf0, 0xc7, 0x20, 0xdb, 0xc0, 0xcf, 0x02, 0x16, 0xd1, 0x2c, 0x4e, 0x49, 0x79, 0xd9, 0xad, 0x7e, 0x5e, 0x6a, 0xb1, 0x50, 0x89, 0xa1, 0xa2, 0x84, 0x14, 0x78, 0x0b, 0x64, 0x1b, 0xd4, 0x42,
0xf0, 0x08, 0x45, 0xe9, 0x60, 0x99, 0xc5, 0x00, 0x6b, 0xb3, 0x6e, 0x2e, 0xc3, 0x9d, 0xfb, 0xdd, 0xa4, 0x59, 0xa7, 0x06, 0x76, 0x65, 0x87, 0x5b, 0xec, 0x76, 0x0a, 0xd9, 0xad, 0x70, 0x1b, 0x45,
0x91, 0x0d, 0x99, 0x97, 0xb7, 0x48, 0xa8, 0x70, 0x6e, 0xe4, 0xc3, 0x40, 0x13, 0xcc, 0x54, 0x65, 0x69, 0xe0, 0x77, 0x41, 0xb6, 0x81, 0x9f, 0x07, 0x2c, 0xa2, 0x13, 0x9d, 0x91, 0xf2, 0xb2, 0x5b,
0xa9, 0xe1, 0x61, 0x95, 0x2d, 0xfd, 0xe4, 0x03, 0xee, 0x4b, 0x22, 0xe8, 0x73, 0x2c, 0x24, 0xfc, 0xe1, 0x11, 0x8a, 0xd2, 0xc1, 0x32, 0x8b, 0x01, 0xd6, 0xc3, 0xdd, 0x5c, 0x86, 0x3b, 0xf7, 0xeb,
0x15, 0x0a, 0x90, 0xd5, 0x97, 0x19, 0x70, 0x71, 0x68, 0x89, 0x84, 0x3f, 0x07, 0xd0, 0xae, 0xba, 0x23, 0xbb, 0x3d, 0x2f, 0x6f, 0x91, 0x50, 0xe1, 0xdc, 0xc8, 0x87, 0x81, 0x26, 0x98, 0xa9, 0xca,
0xc4, 0x69, 0x13, 0xf3, 0x17, 0xe2, 0x91, 0xc0, 0x66, 0x3a, 0x76, 0x7f, 0x69, 0xfd, 0x2c, 0xcb, 0x52, 0xc3, 0xc3, 0x2a, 0x5b, 0xfa, 0xde, 0x47, 0xdc, 0x97, 0x44, 0xd0, 0xe7, 0x58, 0x48, 0xf8,
0xa6, 0xed, 0x9e, 0x53, 0xd4, 0x87, 0x03, 0x1a, 0x60, 0x9e, 0xe5, 0x98, 0xb8, 0x31, 0x2a, 0xc7, 0x2b, 0x14, 0x20, 0xab, 0xaf, 0x32, 0xe0, 0xf2, 0xd0, 0x12, 0x09, 0x7f, 0x08, 0xa0, 0x5d, 0x75,
0xc7, 0xf1, 0x12, 0x78, 0x99, 0x4d, 0x03, 0x9b, 0x51, 0x10, 0x14, 0xc7, 0x84, 0x6b, 0x60, 0x51, 0x89, 0xd3, 0x26, 0xe6, 0x8f, 0xc4, 0x0b, 0x84, 0x0d, 0x8c, 0xec, 0xfe, 0xd2, 0xfa, 0x79, 0x96,
0x4e, 0x32, 0x89, 0x1b, 0x3c, 0x27, 0xfd, 0xbc, 0xb8, 0x1e, 0x3f, 0x46, 0x49, 0x7a, 0x06, 0x61, 0x4d, 0xdb, 0x3d, 0xa7, 0xa8, 0x0f, 0x07, 0x34, 0xc0, 0x3c, 0xcb, 0x31, 0x71, 0x63, 0x54, 0xce,
0x12, 0x97, 0x3a, 0xc4, 0x0c, 0x20, 0x32, 0x71, 0x88, 0xbb, 0xf1, 0x63, 0x94, 0xa4, 0x87, 0x35, 0xa6, 0xe3, 0x25, 0xf0, 0x32, 0x1b, 0x35, 0x36, 0xa3, 0x20, 0x28, 0x8e, 0x09, 0xd7, 0xc0, 0xa2,
0xb0, 0x20, 0x51, 0xe5, 0xad, 0xe6, 0x26, 0x79, 0x4c, 0x8c, 0x1e, 0x32, 0x65, 0x5b, 0x0a, 0xe2, 0x1c, 0x93, 0x12, 0x37, 0x78, 0x41, 0xfa, 0x79, 0x71, 0x3d, 0x7e, 0x8c, 0x92, 0xf4, 0x0c, 0xc2,
0x7b, 0x3d, 0x06, 0x83, 0x12, 0xb0, 0xd0, 0x06, 0xc0, 0xf0, 0x8b, 0xa6, 0x9b, 0x9b, 0xe2, 0x42, 0x24, 0x2e, 0x75, 0x88, 0x19, 0x40, 0x64, 0xe2, 0x10, 0xf7, 0xe3, 0xc7, 0x28, 0x49, 0x0f, 0x6b,
0xee, 0x8c, 0x1f, 0x25, 0x41, 0xe1, 0x0d, 0x3b, 0x7a, 0xb0, 0xe5, 0xa2, 0x88, 0x08, 0xf5, 0x6f, 0x60, 0x41, 0xa2, 0xca, 0x5b, 0xcd, 0x4d, 0xf2, 0x98, 0x18, 0x3d, 0xc1, 0xca, 0xb6, 0x14, 0xc4,
0x0a, 0x58, 0x4a, 0x0e, 0xa9, 0xc1, 0x7b, 0x40, 0x19, 0xf8, 0x1e, 0xf8, 0x1d, 0x98, 0x11, 0x33, 0xf7, 0x7a, 0x0c, 0x06, 0x25, 0x60, 0xa1, 0x0d, 0x80, 0xe1, 0x17, 0x4d, 0x37, 0x37, 0xc5, 0x85,
0x8f, 0xed, 0xc8, 0x6b, 0xff, 0xd1, 0x21, 0xcb, 0x1a, 0xae, 0x92, 0x7a, 0x45, 0xb2, 0x8a, 0x20, 0xdc, 0x1b, 0x3f, 0x4a, 0x82, 0xc2, 0x1b, 0x76, 0xf4, 0x60, 0xcb, 0x45, 0x11, 0x11, 0xea, 0x1f,
0xf6, 0x57, 0x28, 0x80, 0x54, 0x5f, 0x64, 0x00, 0x08, 0x73, 0x0a, 0xde, 0x8c, 0xf5, 0xb1, 0xd5, 0x15, 0xb0, 0x94, 0x9c, 0x80, 0x83, 0xc7, 0x86, 0x32, 0xf0, 0xb1, 0xf1, 0x4b, 0x30, 0x23, 0x06,
0x44, 0x1f, 0x5b, 0x8a, 0x3e, 0x2e, 0x22, 0x3d, 0xeb, 0x01, 0x98, 0xb2, 0x79, 0x99, 0x91, 0x1a, 0x2a, 0xdb, 0x91, 0xd7, 0xfe, 0x9d, 0x23, 0x96, 0x35, 0x5c, 0x25, 0xf5, 0x8a, 0x64, 0x15, 0x41,
0x5e, 0x1f, 0xe2, 0xc7, 0x60, 0xde, 0x09, 0x80, 0x74, 0xc0, 0x1a, 0x83, 0xac, 0x53, 0x12, 0x08, 0xec, 0xaf, 0x50, 0x00, 0xa9, 0xbe, 0xcc, 0x00, 0x10, 0xe6, 0x14, 0xbc, 0x1d, 0xeb, 0x63, 0xab,
0x6e, 0x80, 0x4c, 0xd3, 0x36, 0xfd, 0x29, 0x65, 0xd8, 0x58, 0x57, 0xb6, 0x4d, 0x37, 0x06, 0x37, 0x89, 0x3e, 0xb6, 0x14, 0x7d, 0xb9, 0x44, 0x7a, 0xd6, 0x23, 0x30, 0x65, 0xf3, 0x32, 0x23, 0x35,
0xc3, 0x34, 0x66, 0xbb, 0x88, 0x43, 0xb0, 0x29, 0xd1, 0xff, 0x94, 0xc0, 0xc3, 0x31, 0x5b, 0x2a, 0xbc, 0x39, 0xc4, 0x8f, 0xc1, 0xbc, 0x13, 0x00, 0xe9, 0x80, 0x35, 0x06, 0x59, 0xa7, 0x24, 0x10,
0x0e, 0x81, 0xeb, 0xf7, 0x60, 0x17, 0xde, 0xf3, 0x4f, 0x50, 0x00, 0x07, 0xff, 0x08, 0x96, 0x8d, 0xdc, 0x00, 0x99, 0xa6, 0x6d, 0xfa, 0x53, 0xca, 0xb0, 0x99, 0xb1, 0x6c, 0x9b, 0x6e, 0x0c, 0x6e,
0xe4, 0x03, 0x38, 0x37, 0x3d, 0x72, 0xb0, 0x1a, 0xfa, 0x75, 0x40, 0x3f, 0xd3, 0xed, 0x14, 0x96, 0x86, 0x69, 0xcc, 0x76, 0x11, 0x87, 0x60, 0x23, 0xa8, 0x3f, 0xf9, 0xc9, 0x31, 0xb1, 0x38, 0x04,
0x7b, 0x48, 0x50, 0xaf, 0x24, 0x66, 0x19, 0x91, 0xef, 0x26, 0x59, 0xe7, 0x86, 0x59, 0xd6, 0xef, 0xae, 0xdf, 0xd7, 0x00, 0xe1, 0x3d, 0xff, 0x04, 0x05, 0x70, 0xf0, 0x37, 0x60, 0xd9, 0x48, 0xbe,
0x85, 0x28, 0x2c, 0xf3, 0x4f, 0x50, 0x00, 0xa7, 0xfe, 0x3d, 0x03, 0xe6, 0x62, 0x6f, 0xb1, 0x63, 0xae, 0x73, 0xd3, 0x23, 0x07, 0xab, 0xa1, 0x9f, 0x1e, 0xf4, 0x73, 0xdd, 0x4e, 0x61, 0xb9, 0x87,
0x8e, 0x0c, 0x91, 0xcc, 0x47, 0x16, 0x19, 0x02, 0xee, 0x48, 0x23, 0x43, 0x40, 0x1e, 0x53, 0x64, 0x04, 0xf5, 0x4a, 0x62, 0x96, 0x11, 0xf9, 0x28, 0x93, 0x75, 0x6e, 0x98, 0x65, 0xfd, 0x9e, 0x9f,
0x08, 0x61, 0xc7, 0x14, 0x19, 0x11, 0xcb, 0xfa, 0x44, 0xc6, 0x27, 0x29, 0x3f, 0x32, 0xc4, 0xb0, 0xc2, 0x32, 0xff, 0x04, 0x05, 0x70, 0xea, 0x9f, 0x32, 0x60, 0x2e, 0xf6, 0xd0, 0x3b, 0xe1, 0xc8,
0x70, 0xb8, 0xc8, 0x10, 0xb4, 0x91, 0xc8, 0xd8, 0x8e, 0x3e, 0x6f, 0x47, 0xcc, 0x6a, 0x9a, 0xef, 0x10, 0xc9, 0x7c, 0x6c, 0x91, 0x21, 0xe0, 0x8e, 0x35, 0x32, 0x04, 0xe4, 0x09, 0x45, 0x86, 0x10,
0x56, 0xed, 0x41, 0x0b, 0x5b, 0x1e, 0xf5, 0x0e, 0xf4, 0xd9, 0x9e, 0xa7, 0xb0, 0x09, 0xe6, 0x70, 0x76, 0x42, 0x91, 0x11, 0xb1, 0xac, 0x4f, 0x64, 0xfc, 0x2b, 0xe5, 0x47, 0x86, 0x18, 0x16, 0x8e,
0x9b, 0x38, 0xb8, 0x46, 0xf8, 0xb6, 0x8c, 0x8f, 0x71, 0x71, 0x97, 0xd8, 0x4b, 0x74, 0x2d, 0x82, 0x16, 0x19, 0x82, 0x36, 0x12, 0x19, 0xdb, 0xd1, 0xb7, 0xf3, 0xf8, 0x2f, 0xb7, 0xd9, 0x9e, 0x77,
0x83, 0x62, 0xa8, 0xac, 0xa5, 0xcb, 0xf5, 0x23, 0x2f, 0x78, 0xe2, 0xca, 0x2e, 0xc7, 0x5b, 0xfa, 0xb6, 0x09, 0xe6, 0x70, 0x9b, 0x38, 0xb8, 0x46, 0xf8, 0xb6, 0x8c, 0x8f, 0x71, 0x71, 0x97, 0xd8,
0x5a, 0xcf, 0x29, 0xea, 0xc3, 0xa1, 0xfe, 0x35, 0x05, 0x96, 0x7b, 0x3e, 0x2e, 0x84, 0x4e, 0x51, 0x33, 0x77, 0x2d, 0x82, 0x83, 0x62, 0xa8, 0xac, 0xa5, 0xcb, 0xf5, 0x63, 0x2f, 0x78, 0x3f, 0xcb,
0x3e, 0x92, 0x53, 0x52, 0xc7, 0xe8, 0x94, 0xf4, 0xd8, 0x4e, 0xf9, 0x77, 0x0a, 0xc0, 0xde, 0xfe, 0x2e, 0xc7, 0x5b, 0xfa, 0x5a, 0xcf, 0x29, 0xea, 0xc3, 0xa1, 0xfe, 0x21, 0x05, 0x96, 0x7b, 0xbe,
0x00, 0x0f, 0xf8, 0x58, 0x61, 0x38, 0xb4, 0x4a, 0x4c, 0x71, 0xfc, 0x2d, 0x67, 0xe0, 0xe8, 0x38, 0x5c, 0x84, 0x4e, 0x51, 0x3e, 0x91, 0x53, 0x52, 0x27, 0xe8, 0x94, 0xf4, 0xd8, 0x4e, 0xf9, 0x6b,
0x12, 0x85, 0x45, 0x49, 0x39, 0x47, 0xff, 0x91, 0x35, 0xfc, 0xa4, 0x95, 0x3e, 0xb2, 0x4f, 0x5a, 0x0a, 0xc0, 0xde, 0xfe, 0x00, 0x0f, 0xf9, 0x58, 0x61, 0x38, 0xb4, 0x4a, 0x4c, 0x71, 0xfc, 0x15,
0xea, 0xff, 0x92, 0x7e, 0x3b, 0x81, 0x9f, 0xcf, 0xfa, 0xdd, 0x72, 0xfa, 0x78, 0x6e, 0x59, 0xfd, 0x67, 0xe0, 0xe8, 0x38, 0x12, 0x85, 0x45, 0x49, 0x39, 0xc7, 0xff, 0x05, 0x37, 0xfc, 0x5e, 0x96,
0x8f, 0x02, 0x96, 0x92, 0x63, 0xc4, 0x09, 0xf9, 0x76, 0xfa, 0xff, 0xb8, 0xea, 0x27, 0xf1, 0xbb, 0x3e, 0xb6, 0xef, 0x65, 0xea, 0x3f, 0x92, 0x7e, 0x3b, 0x85, 0xdf, 0xe6, 0xfa, 0xdd, 0x72, 0xfa,
0xe9, 0x4b, 0x05, 0x9c, 0x3e, 0x39, 0x7f, 0x93, 0xa8, 0xff, 0xea, 0x55, 0xf7, 0x04, 0xfc, 0xd9, 0x64, 0x6e, 0x59, 0xfd, 0x9b, 0x02, 0x96, 0x92, 0x63, 0xc4, 0x29, 0xf9, 0x30, 0xfb, 0xcf, 0xb8,
0xa1, 0xff, 0xf4, 0xf5, 0xbb, 0xfc, 0xc4, 0x9b, 0x77, 0xf9, 0x89, 0xb7, 0xef, 0xf2, 0x13, 0x7f, 0xea, 0xa7, 0xf1, 0xa3, 0xec, 0x2b, 0x05, 0x9c, 0x3d, 0x3d, 0xff, 0xc1, 0xa8, 0x7f, 0xe9, 0x55,
0xea, 0xe6, 0x95, 0xd7, 0xdd, 0xbc, 0xf2, 0xa6, 0x9b, 0x57, 0xde, 0x76, 0xf3, 0xca, 0x17, 0xdd, 0xf7, 0x14, 0xfc, 0x93, 0xa2, 0x7f, 0xff, 0xcd, 0xfb, 0xfc, 0xc4, 0xdb, 0xf7, 0xf9, 0x89, 0x77,
0xbc, 0xf2, 0x97, 0x2f, 0xf3, 0x13, 0xbf, 0x39, 0x3f, 0xf0, 0x9f, 0xc2, 0x6f, 0x02, 0x00, 0x00, 0xef, 0xf3, 0x13, 0xbf, 0xed, 0xe6, 0x95, 0x37, 0xdd, 0xbc, 0xf2, 0xb6, 0x9b, 0x57, 0xde, 0x75,
0xff, 0xff, 0xca, 0x8b, 0x47, 0xba, 0x45, 0x1c, 0x00, 0x00, 0xf3, 0xca, 0xff, 0xba, 0x79, 0xe5, 0xf7, 0xff, 0xcf, 0x4f, 0xfc, 0xec, 0xe2, 0xc0, 0xbf, 0x21,
0xbf, 0x0c, 0x00, 0x00, 0xff, 0xff, 0xbe, 0x23, 0xae, 0x54, 0xa2, 0x1c, 0x00, 0x00,
} }
func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) { func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) {
@ -1126,6 +1127,18 @@ func (m *HPAScalingRules) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i _ = i
var l int var l int
_ = l _ = l
if m.Tolerance != nil {
{
size, err := m.Tolerance.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
}
if m.StabilizationWindowSeconds != nil { if m.StabilizationWindowSeconds != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.StabilizationWindowSeconds)) i = encodeVarintGenerated(dAtA, i, uint64(*m.StabilizationWindowSeconds))
i-- i--
@ -2203,6 +2216,10 @@ func (m *HPAScalingRules) Size() (n int) {
if m.StabilizationWindowSeconds != nil { if m.StabilizationWindowSeconds != nil {
n += 1 + sovGenerated(uint64(*m.StabilizationWindowSeconds)) n += 1 + sovGenerated(uint64(*m.StabilizationWindowSeconds))
} }
if m.Tolerance != nil {
l = m.Tolerance.Size()
n += 1 + l + sovGenerated(uint64(l))
}
return n return n
} }
@ -2619,6 +2636,7 @@ func (this *HPAScalingRules) String() string {
`SelectPolicy:` + valueToStringGenerated(this.SelectPolicy) + `,`, `SelectPolicy:` + valueToStringGenerated(this.SelectPolicy) + `,`,
`Policies:` + repeatedStringForPolicies + `,`, `Policies:` + repeatedStringForPolicies + `,`,
`StabilizationWindowSeconds:` + valueToStringGenerated(this.StabilizationWindowSeconds) + `,`, `StabilizationWindowSeconds:` + valueToStringGenerated(this.StabilizationWindowSeconds) + `,`,
`Tolerance:` + strings.Replace(fmt.Sprintf("%v", this.Tolerance), "Quantity", "resource.Quantity", 1) + `,`,
`}`, `}`,
}, "") }, "")
return s return s
@ -3770,6 +3788,42 @@ func (m *HPAScalingRules) Unmarshal(dAtA []byte) error {
} }
} }
m.StabilizationWindowSeconds = &v m.StabilizationWindowSeconds = &v
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Tolerance", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Tolerance == nil {
m.Tolerance = &resource.Quantity{}
}
if err := m.Tolerance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default: default:
iNdEx = preIndex iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:]) skippy, err := skipGenerated(dAtA[iNdEx:])

View File

@ -112,12 +112,18 @@ message HPAScalingPolicy {
optional int32 periodSeconds = 3; optional int32 periodSeconds = 3;
} }
// HPAScalingRules configures the scaling behavior for one direction. // HPAScalingRules configures the scaling behavior for one direction via
// These Rules are applied after calculating DesiredReplicas from metrics for the HPA. // scaling Policy Rules and a configurable metric tolerance.
//
// Scaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA.
// They can limit the scaling velocity by specifying scaling policies. // They can limit the scaling velocity by specifying scaling policies.
// They can prevent flapping by specifying the stabilization window, so that the // They can prevent flapping by specifying the stabilization window, so that the
// number of replicas is not set instantly, instead, the safest value from the stabilization // number of replicas is not set instantly, instead, the safest value from the stabilization
// window is chosen. // window is chosen.
//
// The tolerance is applied to the metric values and prevents scaling too
// eagerly for small metric variations. (Note that setting a tolerance requires
// enabling the alpha HPAConfigurableTolerance feature gate.)
message HPAScalingRules { message HPAScalingRules {
// stabilizationWindowSeconds is the number of seconds for which past recommendations should be // stabilizationWindowSeconds is the number of seconds for which past recommendations should be
// considered while scaling up or scaling down. // considered while scaling up or scaling down.
@ -134,10 +140,28 @@ message HPAScalingRules {
optional string selectPolicy = 1; optional string selectPolicy = 1;
// policies is a list of potential scaling polices which can be used during scaling. // policies is a list of potential scaling polices which can be used during scaling.
// At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid // If not set, use the default values:
// - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window.
// - For scale down: allow all pods to be removed in a 15s window.
// +listType=atomic // +listType=atomic
// +optional // +optional
repeated HPAScalingPolicy policies = 2; repeated HPAScalingPolicy policies = 2;
// tolerance is the tolerance on the ratio between the current and desired
// metric value under which no updates are made to the desired number of
// replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not
// set, the default cluster-wide tolerance is applied (by default 10%).
//
// For example, if autoscaling is configured with a memory consumption target of 100Mi,
// and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be
// triggered when the actual consumption falls below 95Mi or exceeds 101Mi.
//
// This is an alpha field and requires enabling the HPAConfigurableTolerance
// feature gate.
//
// +featureGate=HPAConfigurableTolerance
// +optional
optional .k8s.io.apimachinery.pkg.api.resource.Quantity tolerance = 4;
} }
// HorizontalPodAutoscaler is the configuration for a horizontal pod // HorizontalPodAutoscaler is the configuration for a horizontal pod

View File

@ -171,12 +171,18 @@ const (
DisabledPolicySelect ScalingPolicySelect = "Disabled" DisabledPolicySelect ScalingPolicySelect = "Disabled"
) )
// HPAScalingRules configures the scaling behavior for one direction. // HPAScalingRules configures the scaling behavior for one direction via
// These Rules are applied after calculating DesiredReplicas from metrics for the HPA. // scaling Policy Rules and a configurable metric tolerance.
//
// Scaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA.
// They can limit the scaling velocity by specifying scaling policies. // They can limit the scaling velocity by specifying scaling policies.
// They can prevent flapping by specifying the stabilization window, so that the // They can prevent flapping by specifying the stabilization window, so that the
// number of replicas is not set instantly, instead, the safest value from the stabilization // number of replicas is not set instantly, instead, the safest value from the stabilization
// window is chosen. // window is chosen.
//
// The tolerance is applied to the metric values and prevents scaling too
// eagerly for small metric variations. (Note that setting a tolerance requires
// enabling the alpha HPAConfigurableTolerance feature gate.)
type HPAScalingRules struct { type HPAScalingRules struct {
// stabilizationWindowSeconds is the number of seconds for which past recommendations should be // stabilizationWindowSeconds is the number of seconds for which past recommendations should be
// considered while scaling up or scaling down. // considered while scaling up or scaling down.
@ -193,10 +199,28 @@ type HPAScalingRules struct {
SelectPolicy *ScalingPolicySelect `json:"selectPolicy,omitempty" protobuf:"bytes,1,opt,name=selectPolicy"` SelectPolicy *ScalingPolicySelect `json:"selectPolicy,omitempty" protobuf:"bytes,1,opt,name=selectPolicy"`
// policies is a list of potential scaling polices which can be used during scaling. // policies is a list of potential scaling polices which can be used during scaling.
// At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid // If not set, use the default values:
// - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window.
// - For scale down: allow all pods to be removed in a 15s window.
// +listType=atomic // +listType=atomic
// +optional // +optional
Policies []HPAScalingPolicy `json:"policies,omitempty" listType:"atomic" protobuf:"bytes,2,rep,name=policies"` Policies []HPAScalingPolicy `json:"policies,omitempty" listType:"atomic" protobuf:"bytes,2,rep,name=policies"`
// tolerance is the tolerance on the ratio between the current and desired
// metric value under which no updates are made to the desired number of
// replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not
// set, the default cluster-wide tolerance is applied (by default 10%).
//
// For example, if autoscaling is configured with a memory consumption target of 100Mi,
// and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be
// triggered when the actual consumption falls below 95Mi or exceeds 101Mi.
//
// This is an alpha field and requires enabling the HPAConfigurableTolerance
// feature gate.
//
// +featureGate=HPAConfigurableTolerance
// +optional
Tolerance *resource.Quantity `json:"tolerance,omitempty" protobuf:"bytes,4,opt,name=tolerance"`
} }
// HPAScalingPolicyType is the type of the policy which could be used while making scaling decisions. // HPAScalingPolicyType is the type of the policy which could be used while making scaling decisions.

View File

@ -92,10 +92,11 @@ func (HPAScalingPolicy) SwaggerDoc() map[string]string {
} }
var map_HPAScalingRules = map[string]string{ var map_HPAScalingRules = map[string]string{
"": "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.", "": "HPAScalingRules configures the scaling behavior for one direction via scaling Policy Rules and a configurable metric tolerance.\n\nScaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.\n\nThe tolerance is applied to the metric values and prevents scaling too eagerly for small metric variations. (Note that setting a tolerance requires enabling the alpha HPAConfigurableTolerance feature gate.)",
"stabilizationWindowSeconds": "stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", "stabilizationWindowSeconds": "stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).",
"selectPolicy": "selectPolicy is used to specify which policy should be used. If not set, the default value Max is used.", "selectPolicy": "selectPolicy is used to specify which policy should be used. If not set, the default value Max is used.",
"policies": "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid", "policies": "policies is a list of potential scaling polices which can be used during scaling. If not set, use the default values: - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window. - For scale down: allow all pods to be removed in a 15s window.",
"tolerance": "tolerance is the tolerance on the ratio between the current and desired metric value under which no updates are made to the desired number of replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not set, the default cluster-wide tolerance is applied (by default 10%).\n\nFor example, if autoscaling is configured with a memory consumption target of 100Mi, and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be triggered when the actual consumption falls below 95Mi or exceeds 101Mi.\n\nThis is an alpha field and requires enabling the HPAConfigurableTolerance feature gate.",
} }
func (HPAScalingRules) SwaggerDoc() map[string]string { func (HPAScalingRules) SwaggerDoc() map[string]string {

View File

@ -146,6 +146,11 @@ func (in *HPAScalingRules) DeepCopyInto(out *HPAScalingRules) {
*out = make([]HPAScalingPolicy, len(*in)) *out = make([]HPAScalingPolicy, len(*in))
copy(*out, *in) copy(*out, *in)
} }
if in.Tolerance != nil {
in, out := &in.Tolerance, &out.Tolerance
x := (*in).DeepCopy()
*out = &x
}
return return
} }

View File

@ -165,7 +165,8 @@
"value": 2, "value": 2,
"periodSeconds": 3 "periodSeconds": 3
} }
] ],
"tolerance": "0"
}, },
"scaleDown": { "scaleDown": {
"stabilizationWindowSeconds": 3, "stabilizationWindowSeconds": 3,
@ -176,7 +177,8 @@
"value": 2, "value": 2,
"periodSeconds": 3 "periodSeconds": 3
} }
] ],
"tolerance": "0"
} }
} }
}, },

View File

@ -41,6 +41,7 @@ spec:
value: 2 value: 2
selectPolicy: selectPolicyValue selectPolicy: selectPolicyValue
stabilizationWindowSeconds: 3 stabilizationWindowSeconds: 3
tolerance: "0"
scaleUp: scaleUp:
policies: policies:
- periodSeconds: 3 - periodSeconds: 3
@ -48,6 +49,7 @@ spec:
value: 2 value: 2
selectPolicy: selectPolicyValue selectPolicy: selectPolicyValue
stabilizationWindowSeconds: 3 stabilizationWindowSeconds: 3
tolerance: "0"
maxReplicas: 3 maxReplicas: 3
metrics: metrics:
- containerResource: - containerResource:

View File

@ -20,6 +20,7 @@ package v2
import ( import (
autoscalingv2 "k8s.io/api/autoscaling/v2" autoscalingv2 "k8s.io/api/autoscaling/v2"
resource "k8s.io/apimachinery/pkg/api/resource"
) )
// HPAScalingRulesApplyConfiguration represents a declarative configuration of the HPAScalingRules type for use // HPAScalingRulesApplyConfiguration represents a declarative configuration of the HPAScalingRules type for use
@ -28,6 +29,7 @@ type HPAScalingRulesApplyConfiguration struct {
StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"` StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"`
SelectPolicy *autoscalingv2.ScalingPolicySelect `json:"selectPolicy,omitempty"` SelectPolicy *autoscalingv2.ScalingPolicySelect `json:"selectPolicy,omitempty"`
Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"` Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"`
Tolerance *resource.Quantity `json:"tolerance,omitempty"`
} }
// HPAScalingRulesApplyConfiguration constructs a declarative configuration of the HPAScalingRules type for use with // HPAScalingRulesApplyConfiguration constructs a declarative configuration of the HPAScalingRules type for use with
@ -64,3 +66,11 @@ func (b *HPAScalingRulesApplyConfiguration) WithPolicies(values ...*HPAScalingPo
} }
return b return b
} }
// WithTolerance sets the Tolerance field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Tolerance field is set to the value of the last call.
func (b *HPAScalingRulesApplyConfiguration) WithTolerance(value resource.Quantity) *HPAScalingRulesApplyConfiguration {
b.Tolerance = &value
return b
}

View File

@ -2934,6 +2934,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: stabilizationWindowSeconds - name: stabilizationWindowSeconds
type: type:
scalar: numeric scalar: numeric
- name: tolerance
type:
namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
- name: io.k8s.api.autoscaling.v2.HorizontalPodAutoscaler - name: io.k8s.api.autoscaling.v2.HorizontalPodAutoscaler
map: map:
fields: fields:

View File

@ -539,6 +539,12 @@
lockToDefault: true lockToDefault: true
preRelease: GA preRelease: GA
version: "1.33" version: "1.33"
- name: HPAConfigurableTolerance
versionedSpecs:
- default: false
lockToDefault: false
preRelease: Alpha
version: "1.33"
- name: HPAScaleToZero - name: HPAScaleToZero
versionedSpecs: versionedSpecs:
- default: false - default: false

View File

@ -21,6 +21,7 @@ import (
"time" "time"
autoscalingv2 "k8s.io/api/autoscaling/v2" autoscalingv2 "k8s.io/api/autoscaling/v2"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling" e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling"
@ -30,38 +31,30 @@ import (
"github.com/onsi/gomega" "github.com/onsi/gomega"
) )
const (
hpaName = "consumer"
podCPURequest = 500
targetCPUUtilizationPercent = 25
fullWindowOfNewUsage = 30 * time.Second
windowWithOldUsagePasses = 30 * time.Second
newPodMetricsDelay = 15 * time.Second
metricsAvailableDelay = fullWindowOfNewUsage + windowWithOldUsagePasses + newPodMetricsDelay
hpaReconciliationInterval = 15 * time.Second
actuationDelay = 10 * time.Second
maxHPAReactionTime = metricsAvailableDelay + hpaReconciliationInterval + actuationDelay
maxConsumeCPUDelay = 30 * time.Second
waitForReplicasPollInterval = 20 * time.Second
maxResourceConsumerDelay = maxConsumeCPUDelay + waitForReplicasPollInterval
)
var _ = SIGDescribe(feature.HPA, framework.WithSerial(), framework.WithSlow(), "Horizontal pod autoscaling (non-default behavior)", func() { var _ = SIGDescribe(feature.HPA, framework.WithSerial(), framework.WithSlow(), "Horizontal pod autoscaling (non-default behavior)", func() {
f := framework.NewDefaultFramework("horizontal-pod-autoscaling") f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
hpaName := "consumer"
podCPURequest := 500
targetCPUUtilizationPercent := 25
// usageForReplicas returns usage for (n - 0.5) replicas as if they would consume all CPU
// under the target. The 0.5 replica reduction is to accommodate for the deviation between
// the actual consumed cpu and requested usage by the ResourceConsumer.
// HPA rounds up the recommendations. So, if the usage is e.g. for 3.5 replicas,
// the recommended replica number will be 4.
usageForReplicas := func(replicas int) int {
usagePerReplica := podCPURequest * targetCPUUtilizationPercent / 100
return replicas*usagePerReplica - usagePerReplica/2
}
fullWindowOfNewUsage := 30 * time.Second
windowWithOldUsagePasses := 30 * time.Second
newPodMetricsDelay := 15 * time.Second
metricsAvailableDelay := fullWindowOfNewUsage + windowWithOldUsagePasses + newPodMetricsDelay
hpaReconciliationInterval := 15 * time.Second
actuationDelay := 10 * time.Second
maxHPAReactionTime := metricsAvailableDelay + hpaReconciliationInterval + actuationDelay
maxConsumeCPUDelay := 30 * time.Second
waitForReplicasPollInterval := 20 * time.Second
maxResourceConsumerDelay := maxConsumeCPUDelay + waitForReplicasPollInterval
waitBuffer := 1 * time.Minute waitBuffer := 1 * time.Minute
ginkgo.Describe("with short downscale stabilization window", func() { ginkgo.Describe("with short downscale stabilization window", func() {
@ -505,3 +498,61 @@ var _ = SIGDescribe(feature.HPA, framework.WithSerial(), framework.WithSlow(), "
}) })
}) })
}) })
var _ = SIGDescribe(feature.HPAConfigurableTolerance, framework.WithFeatureGate(features.HPAConfigurableTolerance),
framework.WithSerial(), framework.WithSlow(), "Horizontal pod autoscaling (configurable tolerance)", func() {
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
waitBuffer := 1 * time.Minute
ginkgo.Describe("with large configurable tolerance", func() {
ginkgo.It("should not scale", func(ctx context.Context) {
ginkgo.By("setting up resource consumer and HPA")
initPods := 1
initCPUUsageTotal := usageForReplicas(initPods)
rc := e2eautoscaling.NewDynamicResourceConsumer(ctx,
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
ginkgo.DeferCleanup(rc.CleanUp)
scaleRule := e2eautoscaling.HPAScalingRuleWithToleranceMilli(10000)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
rc, int32(targetCPUUtilizationPercent), 1, 10,
e2eautoscaling.HPABehaviorWithScaleUpAndDownRules(scaleRule, scaleRule),
)
ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
waitDeadline := maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer
ginkgo.By("trying to trigger scale up")
rc.ConsumeCPU(usageForReplicas(8))
waitStart := time.Now()
rc.EnsureDesiredReplicasInRange(ctx, initPods, initPods, waitDeadline, hpa.Name)
timeWaited := time.Since(waitStart)
ginkgo.By("verifying time waited for a scale up")
framework.Logf("time waited for scale up: %s", timeWaited)
gomega.Expect(timeWaited).To(gomega.BeNumerically(">", waitDeadline), "waited %s, wanted to wait more than %s", timeWaited, waitDeadline)
ginkgo.By("verifying number of replicas")
replicas, err := rc.GetReplicas(ctx)
framework.ExpectNoError(err)
gomega.Expect(replicas).To(gomega.BeNumerically("==", initPods), "had %s replicas, still have %s replicas after time deadline", initPods, replicas)
})
})
})
// usageForReplicas returns usage for (n - 0.5) replicas as if they would consume all CPU
// under the target. The 0.5 replica reduction is to accommodate for the deviation between
// the actual consumed cpu and requested usage by the ResourceConsumer.
// HPA rounds up the recommendations. So, if the usage is e.g. for 3.5 replicas,
// the recommended replica number will be 4.
func usageForReplicas(replicas int) int {
usagePerReplica := podCPURequest * targetCPUUtilizationPercent / 100
return replicas*usagePerReplica - usagePerReplica/2
}

View File

@ -219,6 +219,10 @@ var (
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
HPA = framework.WithFeature(framework.ValidFeatures.Add("HPA")) HPA = framework.WithFeature(framework.ValidFeatures.Add("HPA"))
// OWNER: sig-autoscaling
// Marks tests that require HPA configurable tolerance (https://kep.k8s.io/4951).
HPAConfigurableTolerance = framework.WithFeature(framework.ValidFeatures.Add("HPAConfigurableTolerance"))
// owner: sig-node // owner: sig-node
HostAccess = framework.WithFeature(framework.ValidFeatures.Add("HostAccess")) HostAccess = framework.WithFeature(framework.ValidFeatures.Add("HostAccess"))

View File

@ -880,6 +880,13 @@ func HPAScalingRuleWithScalingPolicy(policyType autoscalingv2.HPAScalingPolicyTy
} }
} }
func HPAScalingRuleWithToleranceMilli(toleranceMilli int64) *autoscalingv2.HPAScalingRules {
quantity := resource.NewMilliQuantity(toleranceMilli, resource.DecimalSI)
return &autoscalingv2.HPAScalingRules{
Tolerance: quantity,
}
}
func HPABehaviorWithStabilizationWindows(upscaleStabilization, downscaleStabilization time.Duration) *autoscalingv2.HorizontalPodAutoscalerBehavior { func HPABehaviorWithStabilizationWindows(upscaleStabilization, downscaleStabilization time.Duration) *autoscalingv2.HorizontalPodAutoscalerBehavior {
scaleUpRule := HPAScalingRuleWithStabilizationWindow(int32(upscaleStabilization.Seconds())) scaleUpRule := HPAScalingRuleWithStabilizationWindow(int32(upscaleStabilization.Seconds()))
scaleDownRule := HPAScalingRuleWithStabilizationWindow(int32(downscaleStabilization.Seconds())) scaleDownRule := HPAScalingRuleWithStabilizationWindow(int32(downscaleStabilization.Seconds()))