mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 03:11:40 +00:00
HPA scale-to-zero for custom object/external metrics
Add support for scaling to zero pods minReplicas is allowed to be zero condition is set once Based on https://github.com/kubernetes/kubernetes/pull/61423 set original valid condition add scale to/from zero and invalid metric tests Scaling up from zero pods ignores tolerance validate metrics when minReplicas is 0 Document HPA behaviour when minReplicas is 0 Documented minReplicas field in autoscaling APIs
This commit is contained in:
parent
1bf7103ea0
commit
d55f037b7d
@ -76,8 +76,11 @@ type HorizontalPodAutoscalerSpec struct {
|
||||
// ScaleTargetRef points to the target resource to scale, and is used to the pods for which metrics
|
||||
// should be collected, as well as to actually change the replica count.
|
||||
ScaleTargetRef CrossVersionObjectReference
|
||||
// MinReplicas is the lower limit for the number of replicas to which the autoscaler can scale down.
|
||||
// It defaults to 1 pod.
|
||||
// minReplicas is the lower limit for the number of replicas to which the autoscaler
|
||||
// can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the
|
||||
// alpha feature gate HPAScaleToZero is enabled and at least one Object or External
|
||||
// metric is configured. Scaling is active as long as at least one metric value is
|
||||
// available.
|
||||
// +optional
|
||||
MinReplicas *int32
|
||||
// MaxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up.
|
||||
|
@ -17,14 +17,17 @@ limitations under the License.
|
||||
package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
|
||||
pathvalidation "k8s.io/apimachinery/pkg/api/validation/path"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||
apivalidation "k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func ValidateScale(scale *autoscaling.Scale) field.ErrorList {
|
||||
@ -42,10 +45,12 @@ func ValidateScale(scale *autoscaling.Scale) field.ErrorList {
|
||||
// Prefix indicates this name will be used as part of generation, in which case trailing dashes are allowed.
|
||||
var ValidateHorizontalPodAutoscalerName = apivalidation.ValidateReplicationControllerName
|
||||
|
||||
func validateHorizontalPodAutoscalerSpec(autoscaler autoscaling.HorizontalPodAutoscalerSpec, fldPath *field.Path) field.ErrorList {
|
||||
func validateHorizontalPodAutoscalerSpec(autoscaler autoscaling.HorizontalPodAutoscalerSpec, fldPath *field.Path, minReplicasLowerBound int32) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if autoscaler.MinReplicas != nil && *autoscaler.MinReplicas < 1 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("minReplicas"), *autoscaler.MinReplicas, "must be greater than 0"))
|
||||
|
||||
if autoscaler.MinReplicas != nil && *autoscaler.MinReplicas < minReplicasLowerBound {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("minReplicas"), *autoscaler.MinReplicas,
|
||||
fmt.Sprintf("must be greater than or equal to %d", minReplicasLowerBound)))
|
||||
}
|
||||
if autoscaler.MaxReplicas < 1 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("maxReplicas"), autoscaler.MaxReplicas, "must be greater than 0"))
|
||||
@ -56,7 +61,7 @@ func validateHorizontalPodAutoscalerSpec(autoscaler autoscaling.HorizontalPodAut
|
||||
if refErrs := ValidateCrossVersionObjectReference(autoscaler.ScaleTargetRef, fldPath.Child("scaleTargetRef")); len(refErrs) > 0 {
|
||||
allErrs = append(allErrs, refErrs...)
|
||||
}
|
||||
if refErrs := validateMetrics(autoscaler.Metrics, fldPath.Child("metrics")); len(refErrs) > 0 {
|
||||
if refErrs := validateMetrics(autoscaler.Metrics, fldPath.Child("metrics"), autoscaler.MinReplicas); len(refErrs) > 0 {
|
||||
allErrs = append(allErrs, refErrs...)
|
||||
}
|
||||
return allErrs
|
||||
@ -85,13 +90,34 @@ func ValidateCrossVersionObjectReference(ref autoscaling.CrossVersionObjectRefer
|
||||
|
||||
func ValidateHorizontalPodAutoscaler(autoscaler *autoscaling.HorizontalPodAutoscaler) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMeta(&autoscaler.ObjectMeta, true, ValidateHorizontalPodAutoscalerName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, validateHorizontalPodAutoscalerSpec(autoscaler.Spec, field.NewPath("spec"))...)
|
||||
|
||||
// MinReplicasLowerBound represents a minimum value for minReplicas
|
||||
// 0 when HPA scale-to-zero feature is enabled
|
||||
var minReplicasLowerBound int32
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.HPAScaleToZero) {
|
||||
minReplicasLowerBound = 0
|
||||
} else {
|
||||
minReplicasLowerBound = 1
|
||||
}
|
||||
allErrs = append(allErrs, validateHorizontalPodAutoscalerSpec(autoscaler.Spec, field.NewPath("spec"), minReplicasLowerBound)...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidateHorizontalPodAutoscalerUpdate(newAutoscaler, oldAutoscaler *autoscaling.HorizontalPodAutoscaler) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMetaUpdate(&newAutoscaler.ObjectMeta, &oldAutoscaler.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, validateHorizontalPodAutoscalerSpec(newAutoscaler.Spec, field.NewPath("spec"))...)
|
||||
|
||||
// minReplicasLowerBound represents a minimum value for minReplicas
|
||||
// 0 when HPA scale-to-zero feature is enabled or HPA object already has minReplicas=0
|
||||
var minReplicasLowerBound int32
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.HPAScaleToZero) || (oldAutoscaler.Spec.MinReplicas != nil && *oldAutoscaler.Spec.MinReplicas == 0) {
|
||||
minReplicasLowerBound = 0
|
||||
} else {
|
||||
minReplicasLowerBound = 1
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, validateHorizontalPodAutoscalerSpec(newAutoscaler.Spec, field.NewPath("spec"), minReplicasLowerBound)...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
@ -103,14 +129,28 @@ func ValidateHorizontalPodAutoscalerStatusUpdate(newAutoscaler, oldAutoscaler *a
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateMetrics(metrics []autoscaling.MetricSpec, fldPath *field.Path) field.ErrorList {
|
||||
func validateMetrics(metrics []autoscaling.MetricSpec, fldPath *field.Path, minReplicas *int32) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
hasObjectMetrics := false
|
||||
hasExternalMetrics := false
|
||||
|
||||
for i, metricSpec := range metrics {
|
||||
idxPath := fldPath.Index(i)
|
||||
if targetErrs := validateMetricSpec(metricSpec, idxPath); len(targetErrs) > 0 {
|
||||
allErrs = append(allErrs, targetErrs...)
|
||||
}
|
||||
if metricSpec.Type == autoscaling.ObjectMetricSourceType {
|
||||
hasObjectMetrics = true
|
||||
}
|
||||
if metricSpec.Type == autoscaling.ExternalMetricSourceType {
|
||||
hasExternalMetrics = true
|
||||
}
|
||||
}
|
||||
|
||||
if minReplicas != nil && *minReplicas == 0 {
|
||||
if !hasObjectMetrics && !hasExternalMetrics {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath, "must specify at least one Object or External metric to support scaling to zero replicas"))
|
||||
}
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
@ -22,8 +22,11 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
@ -96,6 +99,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("unable to parse label selector: %v", err)
|
||||
}
|
||||
|
||||
successCases := []autoscaling.HorizontalPodAutoscaler{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -396,7 +400,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
|
||||
MaxReplicas: 5,
|
||||
},
|
||||
},
|
||||
msg: "must be greater than 0",
|
||||
msg: "must be greater than or equal to 1",
|
||||
},
|
||||
{
|
||||
horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{
|
||||
@ -1002,3 +1006,162 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func prepareMinReplicasCases(t *testing.T, minReplicas int32) []autoscaling.HorizontalPodAutoscaler {
|
||||
metricLabelSelector, err := metav1.ParseToLabelSelector("label=value")
|
||||
if err != nil {
|
||||
t.Errorf("unable to parse label selector: %v", err)
|
||||
}
|
||||
minReplicasCases := []autoscaling.HorizontalPodAutoscaler{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "myautoscaler",
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
ResourceVersion: "theversion",
|
||||
},
|
||||
Spec: autoscaling.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: autoscaling.CrossVersionObjectReference{
|
||||
Kind: "ReplicationController",
|
||||
Name: "myrc",
|
||||
},
|
||||
MinReplicas: utilpointer.Int32Ptr(minReplicas),
|
||||
MaxReplicas: 5,
|
||||
Metrics: []autoscaling.MetricSpec{
|
||||
{
|
||||
Type: autoscaling.ObjectMetricSourceType,
|
||||
Object: &autoscaling.ObjectMetricSource{
|
||||
DescribedObject: autoscaling.CrossVersionObjectReference{
|
||||
Kind: "ReplicationController",
|
||||
Name: "myrc",
|
||||
},
|
||||
Metric: autoscaling.MetricIdentifier{
|
||||
Name: "somemetric",
|
||||
},
|
||||
Target: autoscaling.MetricTarget{
|
||||
Type: autoscaling.ValueMetricType,
|
||||
Value: resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "myautoscaler",
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
ResourceVersion: "theversion",
|
||||
},
|
||||
Spec: autoscaling.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: autoscaling.CrossVersionObjectReference{
|
||||
Kind: "ReplicationController",
|
||||
Name: "myrc",
|
||||
},
|
||||
MinReplicas: utilpointer.Int32Ptr(minReplicas),
|
||||
MaxReplicas: 5,
|
||||
Metrics: []autoscaling.MetricSpec{
|
||||
{
|
||||
Type: autoscaling.ExternalMetricSourceType,
|
||||
External: &autoscaling.ExternalMetricSource{
|
||||
Metric: autoscaling.MetricIdentifier{
|
||||
Name: "somemetric",
|
||||
Selector: metricLabelSelector,
|
||||
},
|
||||
Target: autoscaling.MetricTarget{
|
||||
Type: autoscaling.AverageValueMetricType,
|
||||
AverageValue: resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return minReplicasCases
|
||||
}
|
||||
|
||||
func TestValidateHorizontalPodAutoscalerScaleToZeroEnabled(t *testing.T) {
|
||||
// Enable HPAScaleToZero feature gate.
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HPAScaleToZero, true)()
|
||||
|
||||
zeroMinReplicasCases := prepareMinReplicasCases(t, 0)
|
||||
for _, successCase := range zeroMinReplicasCases {
|
||||
if errs := ValidateHorizontalPodAutoscaler(&successCase); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateHorizontalPodAutoscalerScaleToZeroDisabled(t *testing.T) {
|
||||
// Disable HPAScaleToZero feature gate.
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HPAScaleToZero, false)()
|
||||
|
||||
zeroMinReplicasCases := prepareMinReplicasCases(t, 0)
|
||||
errorMsg := "must be greater than or equal to 1"
|
||||
|
||||
for _, errorCase := range zeroMinReplicasCases {
|
||||
errs := ValidateHorizontalPodAutoscaler(&errorCase)
|
||||
if len(errs) == 0 {
|
||||
t.Errorf("expected failure for %q", errorMsg)
|
||||
} else if !strings.Contains(errs[0].Error(), errorMsg) {
|
||||
t.Errorf("unexpected error: %q, expected: %q", errs[0], errorMsg)
|
||||
}
|
||||
}
|
||||
|
||||
nonZeroMinReplicasCases := prepareMinReplicasCases(t, 1)
|
||||
|
||||
for _, successCase := range nonZeroMinReplicasCases {
|
||||
successCase.Spec.MinReplicas = utilpointer.Int32Ptr(1)
|
||||
if errs := ValidateHorizontalPodAutoscaler(&successCase); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateHorizontalPodAutoscalerUpdateScaleToZeroEnabled(t *testing.T) {
|
||||
// Enable HPAScaleToZero feature gate.
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HPAScaleToZero, true)()
|
||||
|
||||
zeroMinReplicasCases := prepareMinReplicasCases(t, 0)
|
||||
nonZeroMinReplicasCases := prepareMinReplicasCases(t, 1)
|
||||
|
||||
for i, zeroCase := range zeroMinReplicasCases {
|
||||
nonZeroCase := nonZeroMinReplicasCases[i]
|
||||
|
||||
if errs := ValidateHorizontalPodAutoscalerUpdate(&nonZeroCase, &zeroCase); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
|
||||
if errs := ValidateHorizontalPodAutoscalerUpdate(&zeroCase, &nonZeroCase); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateHorizontalPodAutoscalerScaleToZeroUpdateDisabled(t *testing.T) {
|
||||
// Disable HPAScaleToZero feature gate.
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HPAScaleToZero, false)()
|
||||
|
||||
zeroMinReplicasCases := prepareMinReplicasCases(t, 0)
|
||||
nonZeroMinReplicasCases := prepareMinReplicasCases(t, 1)
|
||||
errorMsg := "must be greater than or equal to 1"
|
||||
|
||||
for i, zeroCase := range zeroMinReplicasCases {
|
||||
nonZeroCase := nonZeroMinReplicasCases[i]
|
||||
errs := ValidateHorizontalPodAutoscalerUpdate(&zeroCase, &nonZeroCase)
|
||||
|
||||
if len(errs) == 0 {
|
||||
t.Errorf("expected failure for %q", errorMsg)
|
||||
} else if !strings.Contains(errs[0].Error(), errorMsg) {
|
||||
t.Errorf("unexpected error: %q, expected: %q", errs[0], errorMsg)
|
||||
}
|
||||
|
||||
if errs := ValidateHorizontalPodAutoscalerUpdate(&zeroCase, &zeroCase); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
|
||||
if errs := ValidateHorizontalPodAutoscalerUpdate(&nonZeroCase, &zeroCase); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -230,16 +230,11 @@ func (a *HorizontalController) processNextWorkItem() bool {
|
||||
}
|
||||
|
||||
// computeReplicasForMetrics computes the desired number of replicas for the metric specifications listed in the HPA,
|
||||
// returning the maximum of the computed replica counts, a description of the associated metric, and the statuses of
|
||||
// returning the maximum of the computed replica counts, a description of the associated metric, and the statuses of
|
||||
// all metrics computed.
|
||||
func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.HorizontalPodAutoscaler, scale *autoscalingv1.Scale,
|
||||
metricSpecs []autoscalingv2.MetricSpec) (replicas int32, metric string, statuses []autoscalingv2.MetricStatus, timestamp time.Time, err error) {
|
||||
|
||||
specReplicas := scale.Spec.Replicas
|
||||
statusReplicas := scale.Status.Replicas
|
||||
|
||||
statuses = make([]autoscalingv2.MetricStatus, len(metricSpecs))
|
||||
|
||||
if scale.Status.Selector == "" {
|
||||
errMsg := "selector is required"
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg)
|
||||
@ -255,15 +250,23 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
|
||||
return 0, "", nil, time.Time{}, fmt.Errorf(errMsg)
|
||||
}
|
||||
|
||||
specReplicas := scale.Spec.Replicas
|
||||
statusReplicas := scale.Status.Replicas
|
||||
statuses = make([]autoscalingv2.MetricStatus, len(metricSpecs))
|
||||
|
||||
invalidMetricsCount := 0
|
||||
var invalidMetricError error
|
||||
var invalidMetricCondition autoscalingv2.HorizontalPodAutoscalerCondition
|
||||
|
||||
for i, metricSpec := range metricSpecs {
|
||||
replicaCountProposal, metricNameProposal, timestampProposal, err := a.computeReplicasForMetric(hpa, metricSpec, specReplicas, statusReplicas, selector, &statuses[i])
|
||||
replicaCountProposal, metricNameProposal, timestampProposal, condition, err := a.computeReplicasForMetric(hpa, metricSpec, specReplicas, statusReplicas, selector, &statuses[i])
|
||||
|
||||
if err != nil {
|
||||
if invalidMetricsCount <= 0 {
|
||||
invalidMetricCondition = condition
|
||||
invalidMetricError = err
|
||||
}
|
||||
invalidMetricsCount++
|
||||
invalidMetricError = err
|
||||
}
|
||||
if err == nil && (replicas == 0 || replicaCountProposal > replicas) {
|
||||
timestamp = timestampProposal
|
||||
@ -272,62 +275,59 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
|
||||
}
|
||||
}
|
||||
|
||||
// If all metrics are invalid or some are invalid and we would scale down,
|
||||
// return an error and set the condition of the hpa based on the first invalid metric.
|
||||
// Otherwise set the condition as scaling active as we're going to scale
|
||||
if invalidMetricsCount >= len(metricSpecs) || (invalidMetricsCount > 0 && replicas < specReplicas) {
|
||||
return 0, "", statuses, time.Time{}, fmt.Errorf("Invalid metrics (%v invalid out of %v), last error was: %v", invalidMetricsCount, len(metricSpecs), invalidMetricError)
|
||||
} else {
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionTrue, "ValidMetricFound", "the HPA was able to successfully calculate a replica count from %v", metric)
|
||||
// If all metrics are invalid return error and set condition on hpa based on first invalid metric.
|
||||
if invalidMetricsCount >= len(metricSpecs) {
|
||||
setCondition(hpa, invalidMetricCondition.Type, invalidMetricCondition.Status, invalidMetricCondition.Reason, invalidMetricCondition.Message)
|
||||
return 0, "", statuses, time.Time{}, fmt.Errorf("Invalid metrics (%v invalid out of %v), first error is: %v", invalidMetricsCount, len(metricSpecs), invalidMetricError)
|
||||
}
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionTrue, "ValidMetricFound", "the HPA was able to successfully calculate a replica count from %s", metric)
|
||||
return replicas, metric, statuses, timestamp, nil
|
||||
}
|
||||
|
||||
// computeReplicasForMetric computes the desired number of replicas for for a specific hpa and single metric specification.
|
||||
// Computes the desired number of replicas for for a specific hpa and metric specification,
|
||||
// returning the metric status and a proposed condition to be set on the HPA object.
|
||||
func (a *HorizontalController) computeReplicasForMetric(hpa *autoscalingv2.HorizontalPodAutoscaler, spec autoscalingv2.MetricSpec,
|
||||
specReplicas, statusReplicas int32, selector labels.Selector, status *autoscalingv2.MetricStatus) (replicaCountProposal int32, metricNameProposal string,
|
||||
timestampProposal time.Time, err error) {
|
||||
timestampProposal time.Time, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) {
|
||||
|
||||
switch spec.Type {
|
||||
case autoscalingv2.ObjectMetricSourceType:
|
||||
metricSelector, err := metav1.LabelSelectorAsSelector(spec.Object.Metric.Selector)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetObjectMetric", err.Error())
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetObjectMetric", "the HPA was unable to compute the replica count: %v", err)
|
||||
return 0, "", time.Time{}, fmt.Errorf("failed to get object metric value: %v", err)
|
||||
condition := a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err)
|
||||
return 0, "", time.Time{}, condition, fmt.Errorf("failed to get object metric value: %v", err)
|
||||
}
|
||||
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForObjectMetric(specReplicas, statusReplicas, spec, hpa, selector, status, metricSelector)
|
||||
replicaCountProposal, timestampProposal, metricNameProposal, condition, err = a.computeStatusForObjectMetric(specReplicas, statusReplicas, spec, hpa, selector, status, metricSelector)
|
||||
if err != nil {
|
||||
return 0, "", time.Time{}, fmt.Errorf("failed to get object metric value: %v", err)
|
||||
return 0, "", time.Time{}, condition, fmt.Errorf("failed to get object metric value: %v", err)
|
||||
}
|
||||
case autoscalingv2.PodsMetricSourceType:
|
||||
metricSelector, err := metav1.LabelSelectorAsSelector(spec.Pods.Metric.Selector)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetPodsMetric", err.Error())
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetPodsMetric", "the HPA was unable to compute the replica count: %v", err)
|
||||
return 0, "", time.Time{}, fmt.Errorf("failed to get pods metric value: %v", err)
|
||||
condition := a.getUnableComputeReplicaCountCondition(hpa, "FailedGetPodsMetric", err)
|
||||
return 0, "", time.Time{}, condition, fmt.Errorf("failed to get pods metric value: %v", err)
|
||||
}
|
||||
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForPodsMetric(specReplicas, spec, hpa, selector, status, metricSelector)
|
||||
replicaCountProposal, timestampProposal, metricNameProposal, condition, err = a.computeStatusForPodsMetric(specReplicas, spec, hpa, selector, status, metricSelector)
|
||||
if err != nil {
|
||||
return 0, "", time.Time{}, fmt.Errorf("failed to get object metric value: %v", err)
|
||||
return 0, "", time.Time{}, condition, fmt.Errorf("failed to get pods metric value: %v", err)
|
||||
}
|
||||
case autoscalingv2.ResourceMetricSourceType:
|
||||
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForResourceMetric(specReplicas, spec, hpa, selector, status)
|
||||
replicaCountProposal, timestampProposal, metricNameProposal, condition, err = a.computeStatusForResourceMetric(specReplicas, spec, hpa, selector, status)
|
||||
if err != nil {
|
||||
return 0, "", time.Time{}, err
|
||||
return 0, "", time.Time{}, condition, err
|
||||
}
|
||||
case autoscalingv2.ExternalMetricSourceType:
|
||||
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForExternalMetric(specReplicas, statusReplicas, spec, hpa, selector, status)
|
||||
replicaCountProposal, timestampProposal, metricNameProposal, condition, err = a.computeStatusForExternalMetric(specReplicas, statusReplicas, spec, hpa, selector, status)
|
||||
if err != nil {
|
||||
return 0, "", time.Time{}, err
|
||||
return 0, "", time.Time{}, condition, err
|
||||
}
|
||||
default:
|
||||
errMsg := fmt.Sprintf("unknown metric source type %q", string(spec.Type))
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidMetricSourceType", errMsg)
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidMetricSourceType", "the HPA was unable to compute the replica count: %v", fmt.Errorf(errMsg))
|
||||
return 0, "", time.Time{}, fmt.Errorf(errMsg)
|
||||
err = fmt.Errorf(errMsg)
|
||||
condition := a.getUnableComputeReplicaCountCondition(hpa, "InvalidMetricSourceType", err)
|
||||
return 0, "", time.Time{}, condition, err
|
||||
}
|
||||
return replicaCountProposal, metricNameProposal, timestampProposal, nil
|
||||
return replicaCountProposal, metricNameProposal, timestampProposal, autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
|
||||
}
|
||||
|
||||
func (a *HorizontalController) reconcileKey(key string) (deleted bool, err error) {
|
||||
@ -347,13 +347,12 @@ func (a *HorizontalController) reconcileKey(key string) (deleted bool, err error
|
||||
}
|
||||
|
||||
// computeStatusForObjectMetric computes the desired number of replicas for the specified metric of type ObjectMetricSourceType.
|
||||
func (a *HorizontalController) computeStatusForObjectMetric(specReplicas, statusReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus, metricSelector labels.Selector) (int32, time.Time, string, error) {
|
||||
func (a *HorizontalController) computeStatusForObjectMetric(specReplicas, statusReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus, metricSelector labels.Selector) (replicas int32, timestamp time.Time, metricName string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) {
|
||||
if metricSpec.Object.Target.Type == autoscalingv2.ValueMetricType {
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetObjectMetricReplicas(specReplicas, metricSpec.Object.Target.Value.MilliValue(), metricSpec.Object.Metric.Name, hpa.Namespace, &metricSpec.Object.DescribedObject, selector, metricSelector)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetObjectMetric", err.Error())
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetObjectMetric", "the HPA was unable to compute the replica count: %v", err)
|
||||
return 0, timestampProposal, "", err
|
||||
condition := a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err)
|
||||
return 0, timestampProposal, "", condition, err
|
||||
}
|
||||
*status = autoscalingv2.MetricStatus{
|
||||
Type: autoscalingv2.ObjectMetricSourceType,
|
||||
@ -368,13 +367,12 @@ func (a *HorizontalController) computeStatusForObjectMetric(specReplicas, status
|
||||
},
|
||||
},
|
||||
}
|
||||
return replicaCountProposal, timestampProposal, fmt.Sprintf("%s metric %s", metricSpec.Object.DescribedObject.Kind, metricSpec.Object.Metric.Name), nil
|
||||
return replicaCountProposal, timestampProposal, fmt.Sprintf("%s metric %s", metricSpec.Object.DescribedObject.Kind, metricSpec.Object.Metric.Name), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
|
||||
} else if metricSpec.Object.Target.Type == autoscalingv2.AverageValueMetricType {
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetObjectPerPodMetricReplicas(statusReplicas, metricSpec.Object.Target.AverageValue.MilliValue(), metricSpec.Object.Metric.Name, hpa.Namespace, &metricSpec.Object.DescribedObject, metricSelector)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetObjectMetric", err.Error())
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetObjectMetric", "the HPA was unable to compute the replica count: %v", err)
|
||||
return 0, time.Time{}, "", fmt.Errorf("failed to get %s object metric: %v", metricSpec.Object.Metric.Name, err)
|
||||
condition := a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err)
|
||||
return 0, time.Time{}, "", condition, fmt.Errorf("failed to get %s object metric: %v", metricSpec.Object.Metric.Name, err)
|
||||
}
|
||||
*status = autoscalingv2.MetricStatus{
|
||||
Type: autoscalingv2.ObjectMetricSourceType,
|
||||
@ -388,21 +386,20 @@ func (a *HorizontalController) computeStatusForObjectMetric(specReplicas, status
|
||||
},
|
||||
},
|
||||
}
|
||||
return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.Object.Metric.Name, metricSpec.Object.Metric.Selector), nil
|
||||
return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.Object.Metric.Name, metricSpec.Object.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
|
||||
}
|
||||
errMsg := "invalid object metric source: neither a value target nor an average value target was set"
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetObjectMetric", errMsg)
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetObjectMetric", "the HPA was unable to compute the replica count: %s", errMsg)
|
||||
return 0, time.Time{}, "", fmt.Errorf(errMsg)
|
||||
err = fmt.Errorf(errMsg)
|
||||
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err)
|
||||
return 0, time.Time{}, "", condition, err
|
||||
}
|
||||
|
||||
// computeStatusForPodsMetric computes the desired number of replicas for the specified metric of type PodsMetricSourceType.
|
||||
func (a *HorizontalController) computeStatusForPodsMetric(currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus, metricSelector labels.Selector) (int32, time.Time, string, error) {
|
||||
func (a *HorizontalController) computeStatusForPodsMetric(currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus, metricSelector labels.Selector) (replicaCountProposal int32, timestampProposal time.Time, metricNameProposal string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) {
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetMetricReplicas(currentReplicas, metricSpec.Pods.Target.AverageValue.MilliValue(), metricSpec.Pods.Metric.Name, hpa.Namespace, selector, metricSelector)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetPodsMetric", err.Error())
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetPodsMetric", "the HPA was unable to compute the replica count: %v", err)
|
||||
return 0, timestampProposal, "", err
|
||||
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetPodsMetric", err)
|
||||
return 0, timestampProposal, "", condition, err
|
||||
}
|
||||
*status = autoscalingv2.MetricStatus{
|
||||
Type: autoscalingv2.PodsMetricSourceType,
|
||||
@ -417,20 +414,19 @@ func (a *HorizontalController) computeStatusForPodsMetric(currentReplicas int32,
|
||||
},
|
||||
}
|
||||
|
||||
return replicaCountProposal, timestampProposal, fmt.Sprintf("pods metric %s", metricSpec.Pods.Metric.Name), nil
|
||||
return replicaCountProposal, timestampProposal, fmt.Sprintf("pods metric %s", metricSpec.Pods.Metric.Name), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
|
||||
}
|
||||
|
||||
// computeStatusForResourceMetric computes the desired number of replicas for the specified metric of type ResourceMetricSourceType.
|
||||
func (a *HorizontalController) computeStatusForResourceMetric(currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus) (int32, time.Time, string, error) {
|
||||
func (a *HorizontalController) computeStatusForResourceMetric(currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus) (replicaCountProposal int32, timestampProposal time.Time, metricNameProposal string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) {
|
||||
if metricSpec.Resource.Target.AverageValue != nil {
|
||||
var rawProposal int64
|
||||
replicaCountProposal, rawProposal, timestampProposal, err := a.replicaCalc.GetRawResourceReplicas(currentReplicas, metricSpec.Resource.Target.AverageValue.MilliValue(), metricSpec.Resource.Name, hpa.Namespace, selector)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetResourceMetric", err.Error())
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetResourceMetric", "the HPA was unable to compute the replica count: %v", err)
|
||||
return 0, time.Time{}, "", fmt.Errorf("failed to get %s utilization: %v", metricSpec.Resource.Name, err)
|
||||
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetResourceMetric", err)
|
||||
return 0, time.Time{}, "", condition, fmt.Errorf("failed to get %s utilization: %v", metricSpec.Resource.Name, err)
|
||||
}
|
||||
metricNameProposal := fmt.Sprintf("%s resource", metricSpec.Resource.Name)
|
||||
metricNameProposal = fmt.Sprintf("%s resource", metricSpec.Resource.Name)
|
||||
*status = autoscalingv2.MetricStatus{
|
||||
Type: autoscalingv2.ResourceMetricSourceType,
|
||||
Resource: &autoscalingv2.ResourceMetricStatus{
|
||||
@ -440,24 +436,23 @@ func (a *HorizontalController) computeStatusForResourceMetric(currentReplicas in
|
||||
},
|
||||
},
|
||||
}
|
||||
return replicaCountProposal, timestampProposal, metricNameProposal, nil
|
||||
return replicaCountProposal, timestampProposal, metricNameProposal, autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
|
||||
} else {
|
||||
if metricSpec.Resource.Target.AverageUtilization == nil {
|
||||
errMsg := "invalid resource metric source: neither a utilization target nor a value target was set"
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetResourceMetric", errMsg)
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetResourceMetric", "the HPA was unable to compute the replica count: %s", errMsg)
|
||||
return 0, time.Time{}, "", fmt.Errorf(errMsg)
|
||||
err = fmt.Errorf(errMsg)
|
||||
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetResourceMetric", err)
|
||||
return 0, time.Time{}, "", condition, fmt.Errorf(errMsg)
|
||||
}
|
||||
targetUtilization := *metricSpec.Resource.Target.AverageUtilization
|
||||
var percentageProposal int32
|
||||
var rawProposal int64
|
||||
replicaCountProposal, percentageProposal, rawProposal, timestampProposal, err := a.replicaCalc.GetResourceReplicas(currentReplicas, targetUtilization, metricSpec.Resource.Name, hpa.Namespace, selector)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetResourceMetric", err.Error())
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetResourceMetric", "the HPA was unable to compute the replica count: %v", err)
|
||||
return 0, time.Time{}, "", fmt.Errorf("failed to get %s utilization: %v", metricSpec.Resource.Name, err)
|
||||
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetResourceMetric", err)
|
||||
return 0, time.Time{}, "", condition, fmt.Errorf("failed to get %s utilization: %v", metricSpec.Resource.Name, err)
|
||||
}
|
||||
metricNameProposal := fmt.Sprintf("%s resource utilization (percentage of request)", metricSpec.Resource.Name)
|
||||
metricNameProposal = fmt.Sprintf("%s resource utilization (percentage of request)", metricSpec.Resource.Name)
|
||||
*status = autoscalingv2.MetricStatus{
|
||||
Type: autoscalingv2.ResourceMetricSourceType,
|
||||
Resource: &autoscalingv2.ResourceMetricStatus{
|
||||
@ -468,18 +463,17 @@ func (a *HorizontalController) computeStatusForResourceMetric(currentReplicas in
|
||||
},
|
||||
},
|
||||
}
|
||||
return replicaCountProposal, timestampProposal, metricNameProposal, nil
|
||||
return replicaCountProposal, timestampProposal, metricNameProposal, autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// computeStatusForExternalMetric computes the desired number of replicas for the specified metric of type ExternalMetricSourceType.
|
||||
func (a *HorizontalController) computeStatusForExternalMetric(specReplicas, statusReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus) (int32, time.Time, string, error) {
|
||||
func (a *HorizontalController) computeStatusForExternalMetric(specReplicas, statusReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus) (replicaCountProposal int32, timestampProposal time.Time, metricNameProposal string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) {
|
||||
if metricSpec.External.Target.AverageValue != nil {
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetExternalPerPodMetricReplicas(statusReplicas, metricSpec.External.Target.AverageValue.MilliValue(), metricSpec.External.Metric.Name, hpa.Namespace, metricSpec.External.Metric.Selector)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetExternalMetric", err.Error())
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetExternalMetric", "the HPA was unable to compute the replica count: %v", err)
|
||||
return 0, time.Time{}, "", fmt.Errorf("failed to get %s external metric: %v", metricSpec.External.Metric.Name, err)
|
||||
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetExternalMetric", err)
|
||||
return 0, time.Time{}, "", condition, fmt.Errorf("failed to get %s external metric: %v", metricSpec.External.Metric.Name, err)
|
||||
}
|
||||
*status = autoscalingv2.MetricStatus{
|
||||
Type: autoscalingv2.ExternalMetricSourceType,
|
||||
@ -493,14 +487,13 @@ func (a *HorizontalController) computeStatusForExternalMetric(specReplicas, stat
|
||||
},
|
||||
},
|
||||
}
|
||||
return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.External.Metric.Name, metricSpec.External.Metric.Selector), nil
|
||||
return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.External.Metric.Name, metricSpec.External.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
|
||||
}
|
||||
if metricSpec.External.Target.Value != nil {
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetExternalMetricReplicas(specReplicas, metricSpec.External.Target.Value.MilliValue(), metricSpec.External.Metric.Name, hpa.Namespace, metricSpec.External.Metric.Selector, selector)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetExternalMetric", err.Error())
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetExternalMetric", "the HPA was unable to compute the replica count: %v", err)
|
||||
return 0, time.Time{}, "", fmt.Errorf("failed to get external metric %s: %v", metricSpec.External.Metric.Name, err)
|
||||
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetExternalMetric", err)
|
||||
return 0, time.Time{}, "", condition, fmt.Errorf("failed to get external metric %s: %v", metricSpec.External.Metric.Name, err)
|
||||
}
|
||||
*status = autoscalingv2.MetricStatus{
|
||||
Type: autoscalingv2.ExternalMetricSourceType,
|
||||
@ -514,12 +507,12 @@ func (a *HorizontalController) computeStatusForExternalMetric(specReplicas, stat
|
||||
},
|
||||
},
|
||||
}
|
||||
return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.External.Metric.Name, metricSpec.External.Metric.Selector), nil
|
||||
return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.External.Metric.Name, metricSpec.External.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
|
||||
}
|
||||
errMsg := "invalid external metric source: neither a value target nor an average value target was set"
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetExternalMetric", errMsg)
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetExternalMetric", "the HPA was unable to compute the replica count: %s", errMsg)
|
||||
return 0, time.Time{}, "", fmt.Errorf(errMsg)
|
||||
err = fmt.Errorf(errMsg)
|
||||
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetExternalMetric", err)
|
||||
return 0, time.Time{}, "", condition, fmt.Errorf(errMsg)
|
||||
}
|
||||
|
||||
func (a *HorizontalController) recordInitialRecommendation(currentReplicas int32, key string) {
|
||||
@ -583,9 +576,18 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
|
||||
desiredReplicas := int32(0)
|
||||
rescaleReason := ""
|
||||
|
||||
var minReplicas int32
|
||||
|
||||
if hpa.Spec.MinReplicas != nil {
|
||||
minReplicas = *hpa.Spec.MinReplicas
|
||||
} else {
|
||||
// Default value
|
||||
minReplicas = 1
|
||||
}
|
||||
|
||||
rescale := true
|
||||
|
||||
if scale.Spec.Replicas == 0 {
|
||||
if scale.Spec.Replicas == 0 && minReplicas != 0 {
|
||||
// Autoscaling is disabled for this resource
|
||||
desiredReplicas = 0
|
||||
rescale = false
|
||||
@ -593,12 +595,9 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
|
||||
} else if currentReplicas > hpa.Spec.MaxReplicas {
|
||||
rescaleReason = "Current number of replicas above Spec.MaxReplicas"
|
||||
desiredReplicas = hpa.Spec.MaxReplicas
|
||||
} else if hpa.Spec.MinReplicas != nil && currentReplicas < *hpa.Spec.MinReplicas {
|
||||
} else if currentReplicas < minReplicas {
|
||||
rescaleReason = "Current number of replicas below Spec.MinReplicas"
|
||||
desiredReplicas = *hpa.Spec.MinReplicas
|
||||
} else if currentReplicas == 0 {
|
||||
rescaleReason = "Current number of replicas must be greater than 0"
|
||||
desiredReplicas = 1
|
||||
desiredReplicas = minReplicas
|
||||
} else {
|
||||
var metricTimestamp time.Time
|
||||
metricDesiredReplicas, metricName, metricStatuses, metricTimestamp, err = a.computeReplicasForMetrics(hpa, scale, hpa.Spec.Metrics)
|
||||
@ -624,7 +623,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
|
||||
if desiredReplicas < currentReplicas {
|
||||
rescaleReason = "All metrics below target"
|
||||
}
|
||||
desiredReplicas = a.normalizeDesiredReplicas(hpa, key, currentReplicas, desiredReplicas)
|
||||
desiredReplicas = a.normalizeDesiredReplicas(hpa, key, currentReplicas, desiredReplicas, minReplicas)
|
||||
rescale = desiredReplicas != currentReplicas
|
||||
}
|
||||
|
||||
@ -679,19 +678,13 @@ func (a *HorizontalController) stabilizeRecommendation(key string, prenormalized
|
||||
|
||||
// normalizeDesiredReplicas takes the metrics desired replicas value and normalizes it based on the appropriate conditions (i.e. < maxReplicas, >
|
||||
// minReplicas, etc...)
|
||||
func (a *HorizontalController) normalizeDesiredReplicas(hpa *autoscalingv2.HorizontalPodAutoscaler, key string, currentReplicas int32, prenormalizedDesiredReplicas int32) int32 {
|
||||
func (a *HorizontalController) normalizeDesiredReplicas(hpa *autoscalingv2.HorizontalPodAutoscaler, key string, currentReplicas int32, prenormalizedDesiredReplicas int32, minReplicas int32) int32 {
|
||||
stabilizedRecommendation := a.stabilizeRecommendation(key, prenormalizedDesiredReplicas)
|
||||
if stabilizedRecommendation != prenormalizedDesiredReplicas {
|
||||
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ScaleDownStabilized", "recent recommendations were higher than current one, applying the highest recent recommendation")
|
||||
} else {
|
||||
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ReadyForNewScale", "recommended size matches current size")
|
||||
}
|
||||
var minReplicas int32
|
||||
if hpa.Spec.MinReplicas != nil {
|
||||
minReplicas = *hpa.Spec.MinReplicas
|
||||
} else {
|
||||
minReplicas = 0
|
||||
}
|
||||
|
||||
desiredReplicas, condition, reason := convertDesiredReplicasWithRules(currentReplicas, stabilizedRecommendation, minReplicas, hpa.Spec.MaxReplicas)
|
||||
|
||||
@ -704,6 +697,16 @@ func (a *HorizontalController) normalizeDesiredReplicas(hpa *autoscalingv2.Horiz
|
||||
return desiredReplicas
|
||||
}
|
||||
|
||||
func (a *HorizontalController) getUnableComputeReplicaCountCondition(hpa *autoscalingv2.HorizontalPodAutoscaler, reason string, err error) (condition autoscalingv2.HorizontalPodAutoscalerCondition) {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, reason, err.Error())
|
||||
return autoscalingv2.HorizontalPodAutoscalerCondition{
|
||||
Type: autoscalingv2.ScalingActive,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: reason,
|
||||
Message: fmt.Sprintf("the HPA was unable to compute the replica count: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
// convertDesiredReplicas performs the actual normalization, without depending on `HorizontalController` or `HorizontalPodAutoscaler`
|
||||
func convertDesiredReplicasWithRules(currentReplicas, desiredReplicas, hpaMinReplicas, hpaMaxReplicas int32) (int32, string, string) {
|
||||
|
||||
@ -713,13 +716,8 @@ func convertDesiredReplicasWithRules(currentReplicas, desiredReplicas, hpaMinRep
|
||||
var possibleLimitingCondition string
|
||||
var possibleLimitingReason string
|
||||
|
||||
if hpaMinReplicas == 0 {
|
||||
minimumAllowedReplicas = 1
|
||||
possibleLimitingReason = "the desired replica count is zero"
|
||||
} else {
|
||||
minimumAllowedReplicas = hpaMinReplicas
|
||||
possibleLimitingReason = "the desired replica count is less than the minimum replica count"
|
||||
}
|
||||
minimumAllowedReplicas = hpaMinReplicas
|
||||
possibleLimitingReason = "the desired replica count is less than the minimum replica count"
|
||||
|
||||
// Do not upscale too much to prevent incorrect rapid increase of the number of master replicas caused by
|
||||
// bogus CPU usage report from heapster/kubelet (like in issue #32304).
|
||||
|
@ -1066,6 +1066,68 @@ func TestScaleUpCMObject(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleUpFromZeroCMObject(t *testing.T) {
|
||||
targetValue := resource.MustParse("15.0")
|
||||
tc := testCase{
|
||||
minReplicas: 0,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 0,
|
||||
expectedDesiredReplicas: 2,
|
||||
CPUTarget: 0,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.ObjectMetricSourceType,
|
||||
Object: &autoscalingv2.ObjectMetricSource{
|
||||
DescribedObject: autoscalingv2.CrossVersionObjectReference{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
Name: "some-deployment",
|
||||
},
|
||||
Metric: autoscalingv2.MetricIdentifier{
|
||||
Name: "qps",
|
||||
},
|
||||
Target: autoscalingv2.MetricTarget{
|
||||
Value: &targetValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{20000},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleUpFromZeroIgnoresToleranceCMObject(t *testing.T) {
|
||||
targetValue := resource.MustParse("1.0")
|
||||
tc := testCase{
|
||||
minReplicas: 0,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 0,
|
||||
expectedDesiredReplicas: 1,
|
||||
CPUTarget: 0,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.ObjectMetricSourceType,
|
||||
Object: &autoscalingv2.ObjectMetricSource{
|
||||
DescribedObject: autoscalingv2.CrossVersionObjectReference{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
Name: "some-deployment",
|
||||
},
|
||||
Metric: autoscalingv2.MetricIdentifier{
|
||||
Name: "qps",
|
||||
},
|
||||
Target: autoscalingv2.MetricTarget{
|
||||
Value: &targetValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{1000},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleUpPerPodCMObject(t *testing.T) {
|
||||
targetAverageValue := resource.MustParse("10.0")
|
||||
tc := testCase{
|
||||
@ -1163,6 +1225,67 @@ func TestScaleDown(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleUpOneMetricInvalid(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 3,
|
||||
expectedDesiredReplicas: 4,
|
||||
CPUTarget: 30,
|
||||
verifyCPUCurrent: true,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: "CheddarCheese",
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{300, 400, 500},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleUpFromZeroOneMetricInvalid(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 0,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 0,
|
||||
expectedDesiredReplicas: 4,
|
||||
CPUTarget: 30,
|
||||
verifyCPUCurrent: true,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: "CheddarCheese",
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{300, 400, 500},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
recommendations: []timestampedRecommendation{},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleUpBothMetricsEmpty(t *testing.T) { // Switch to missing
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 3,
|
||||
expectedDesiredReplicas: 3,
|
||||
CPUTarget: 0,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: "CheddarCheese",
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
|
||||
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"},
|
||||
{Type: autoscalingv1.ScalingActive, Status: v1.ConditionFalse, Reason: "InvalidMetricSourceType"},
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleDownStabilizeInitialSize(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
@ -1249,6 +1372,39 @@ func TestScaleDownCMObject(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleDownToZeroCMObject(t *testing.T) {
|
||||
targetValue := resource.MustParse("20.0")
|
||||
tc := testCase{
|
||||
minReplicas: 0,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 5,
|
||||
expectedDesiredReplicas: 0,
|
||||
CPUTarget: 0,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.ObjectMetricSourceType,
|
||||
Object: &autoscalingv2.ObjectMetricSource{
|
||||
DescribedObject: autoscalingv2.CrossVersionObjectReference{
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
Name: "some-deployment",
|
||||
},
|
||||
Metric: autoscalingv2.MetricIdentifier{
|
||||
Name: "qps",
|
||||
},
|
||||
Target: autoscalingv2.MetricTarget{
|
||||
Value: &targetValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{0},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
recommendations: []timestampedRecommendation{},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleDownPerPodCMObject(t *testing.T) {
|
||||
targetAverageValue := resource.MustParse("20.0")
|
||||
tc := testCase{
|
||||
@ -1308,6 +1464,32 @@ func TestScaleDownCMExternal(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleDownToZeroCMExternal(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 0,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 5,
|
||||
expectedDesiredReplicas: 0,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.ExternalMetricSourceType,
|
||||
External: &autoscalingv2.ExternalMetricSource{
|
||||
Metric: autoscalingv2.MetricIdentifier{
|
||||
Name: "qps",
|
||||
Selector: &metav1.LabelSelector{},
|
||||
},
|
||||
Target: autoscalingv2.MetricTarget{
|
||||
Value: resource.NewMilliQuantity(14400, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{0},
|
||||
recommendations: []timestampedRecommendation{},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleDownPerPodCMExternal(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
@ -1352,6 +1534,62 @@ func TestScaleDownIncludeUnreadyPods(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleDownOneMetricInvalid(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 5,
|
||||
expectedDesiredReplicas: 3,
|
||||
CPUTarget: 50,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: "CheddarCheese",
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{100, 300, 500, 250, 250},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
useMetricsAPI: true,
|
||||
recommendations: []timestampedRecommendation{},
|
||||
}
|
||||
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleDownOneMetricEmpty(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 5,
|
||||
expectedDesiredReplicas: 3,
|
||||
CPUTarget: 50,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.ExternalMetricSourceType,
|
||||
External: &autoscalingv2.ExternalMetricSource{
|
||||
Metric: autoscalingv2.MetricIdentifier{
|
||||
Name: "qps",
|
||||
Selector: &metav1.LabelSelector{},
|
||||
},
|
||||
Target: autoscalingv2.MetricTarget{
|
||||
Type: autoscalingv2.AverageValueMetricType,
|
||||
AverageValue: resource.NewMilliQuantity(1000, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{100, 300, 500, 250, 250},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
useMetricsAPI: true,
|
||||
recommendations: []timestampedRecommendation{},
|
||||
}
|
||||
_, _, _, testEMClient, _ := tc.prepareTestClient(t)
|
||||
testEMClient.PrependReactor("list", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, &emapi.ExternalMetricValueList{}, fmt.Errorf("something went wrong")
|
||||
})
|
||||
tc.testEMClient = testEMClient
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleDownIgnoreHotCpuPods(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
@ -1610,6 +1848,26 @@ func TestMinReplicas(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestZeroMinReplicasDesiredZero(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 0,
|
||||
maxReplicas: 5,
|
||||
initialReplicas: 3,
|
||||
expectedDesiredReplicas: 0,
|
||||
CPUTarget: 90,
|
||||
reportedLevels: []uint64{0, 0, 0},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
|
||||
useMetricsAPI: true,
|
||||
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
||||
Type: autoscalingv2.ScalingLimited,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "DesiredWithinRange",
|
||||
}),
|
||||
recommendations: []timestampedRecommendation{},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestMinReplicasDesiredZero(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
@ -2553,9 +2811,9 @@ func TestConvertDesiredReplicasWithRules(t *testing.T) {
|
||||
expectedDesiredReplicas: 0,
|
||||
hpaMinReplicas: 0,
|
||||
hpaMaxReplicas: 10,
|
||||
expectedConvertedDesiredReplicas: 1,
|
||||
expectedCondition: "TooFewReplicas",
|
||||
annotation: "1 is minLimit because hpaMinReplicas < 1",
|
||||
expectedConvertedDesiredReplicas: 0,
|
||||
expectedCondition: "DesiredWithinRange",
|
||||
annotation: "prenormalized desired replicas within range",
|
||||
},
|
||||
{
|
||||
currentReplicas: 20,
|
||||
@ -2717,8 +2975,9 @@ func TestNoScaleDownOneMetricInvalid(t *testing.T) {
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
useMetricsAPI: true,
|
||||
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
|
||||
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"},
|
||||
{Type: autoscalingv1.ScalingActive, Status: v1.ConditionFalse, Reason: "InvalidMetricSourceType"},
|
||||
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "ScaleDownStabilized"},
|
||||
{Type: autoscalingv1.ScalingActive, Status: v1.ConditionTrue, Reason: "ValidMetricFound"},
|
||||
{Type: autoscalingv1.ScalingLimited, Status: v1.ConditionFalse, Reason: "DesiredWithinRange"},
|
||||
},
|
||||
}
|
||||
|
||||
@ -2750,8 +3009,9 @@ func TestNoScaleDownOneMetricEmpty(t *testing.T) {
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
useMetricsAPI: true,
|
||||
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
|
||||
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"},
|
||||
{Type: autoscalingv1.ScalingActive, Status: v1.ConditionFalse, Reason: "FailedGetExternalMetric"},
|
||||
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "ScaleDownStabilized"},
|
||||
{Type: autoscalingv1.ScalingActive, Status: v1.ConditionTrue, Reason: "ValidMetricFound"},
|
||||
{Type: autoscalingv1.ScalingLimited, Status: v1.ConditionFalse, Reason: "DesiredWithinRange"},
|
||||
},
|
||||
}
|
||||
_, _, _, testEMClient, _ := tc.prepareTestClient(t)
|
||||
|
@ -241,20 +241,30 @@ func (c *ReplicaCalculator) GetObjectMetricReplicas(currentReplicas int32, targe
|
||||
}
|
||||
|
||||
usageRatio := float64(utilization) / float64(targetUtilization)
|
||||
if math.Abs(1.0-usageRatio) <= c.tolerance {
|
||||
// return the current replicas if the change would be too small
|
||||
return currentReplicas, utilization, timestamp, nil
|
||||
replicaCount, timestamp, err = c.getUsageRatioReplicaCount(currentReplicas, usageRatio, namespace, selector)
|
||||
return replicaCount, utilization, timestamp, err
|
||||
}
|
||||
|
||||
// getUsageRatioReplicaCount calculates the desired replica count based on usageRatio and ready pods count.
|
||||
// For currentReplicas=0 doesn't take into account ready pods count and tolerance to support scaling to zero pods.
|
||||
func (c *ReplicaCalculator) getUsageRatioReplicaCount(currentReplicas int32, usageRatio float64, namespace string, selector labels.Selector) (replicaCount int32, timestamp time.Time, err error) {
|
||||
if currentReplicas != 0 {
|
||||
if math.Abs(1.0-usageRatio) <= c.tolerance {
|
||||
// return the current replicas if the change would be too small
|
||||
return currentReplicas, timestamp, nil
|
||||
}
|
||||
readyPodCount := int64(0)
|
||||
readyPodCount, err = c.getReadyPodsCount(namespace, selector)
|
||||
if err != nil {
|
||||
return 0, time.Time{}, fmt.Errorf("unable to calculate ready pods: %s", err)
|
||||
}
|
||||
replicaCount = int32(math.Ceil(usageRatio * float64(readyPodCount)))
|
||||
} else {
|
||||
// Scale to zero or n pods depending on usageRatio
|
||||
replicaCount = int32(math.Ceil(usageRatio))
|
||||
}
|
||||
|
||||
readyPodCount, err := c.getReadyPodsCount(namespace, selector)
|
||||
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to calculate ready pods: %s", err)
|
||||
}
|
||||
|
||||
replicaCount = int32(math.Ceil(usageRatio * float64(readyPodCount)))
|
||||
|
||||
return replicaCount, utilization, timestamp, nil
|
||||
return replicaCount, timestamp, err
|
||||
}
|
||||
|
||||
// GetObjectPerPodMetricReplicas calculates the desired replica count based on a target metric utilization (as a milli-value)
|
||||
@ -316,19 +326,9 @@ func (c *ReplicaCalculator) GetExternalMetricReplicas(currentReplicas int32, tar
|
||||
utilization = utilization + val
|
||||
}
|
||||
|
||||
readyPodCount, err := c.getReadyPodsCount(namespace, podSelector)
|
||||
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to calculate ready pods: %s", err)
|
||||
}
|
||||
|
||||
usageRatio := float64(utilization) / float64(targetUtilization)
|
||||
if math.Abs(1.0-usageRatio) <= c.tolerance {
|
||||
// return the current replicas if the change would be too small
|
||||
return currentReplicas, utilization, timestamp, nil
|
||||
}
|
||||
|
||||
return int32(math.Ceil(usageRatio * float64(readyPodCount))), utilization, timestamp, nil
|
||||
replicaCount, timestamp, err = c.getUsageRatioReplicaCount(currentReplicas, usageRatio, namespace, podSelector)
|
||||
return replicaCount, utilization, timestamp, err
|
||||
}
|
||||
|
||||
// GetExternalPerPodMetricReplicas calculates the desired replica count based on a
|
||||
|
@ -54,6 +54,12 @@ const (
|
||||
// Enables support for Device Plugins
|
||||
DevicePlugins featuregate.Feature = "DevicePlugins"
|
||||
|
||||
// owner: @dxist
|
||||
// alpha: v1.16
|
||||
//
|
||||
// Enables support of HPA scaling to zero pods when an object or custom metric is configured.
|
||||
HPAScaleToZero featuregate.Feature = "HPAScaleToZero"
|
||||
|
||||
// owner: @Huang-Wei
|
||||
// beta: v1.13
|
||||
//
|
||||
@ -549,4 +555,5 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
// features that enable backwards compatibility but are scheduled to be removed
|
||||
// ...
|
||||
HPAScaleToZero: {Default: false, PreRelease: featuregate.Alpha},
|
||||
}
|
||||
|
@ -17,7 +17,7 @@ limitations under the License.
|
||||
package v1
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
@ -38,7 +38,11 @@ type HorizontalPodAutoscalerSpec struct {
|
||||
// reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption
|
||||
// and will set the desired number of pods by using its Scale subresource.
|
||||
ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"`
|
||||
// lower limit for the number of pods that can be set by the autoscaler, default 1.
|
||||
// minReplicas is the lower limit for the number of replicas to which the autoscaler
|
||||
// can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the
|
||||
// alpha feature gate HPAScaleToZero is enabled and at least one Object or External
|
||||
// metric is configured. Scaling is active as long as at least one metric value is
|
||||
// available.
|
||||
// +optional
|
||||
MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"`
|
||||
// upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
|
||||
|
@ -17,7 +17,7 @@ limitations under the License.
|
||||
package v2beta1
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
@ -38,8 +38,11 @@ type HorizontalPodAutoscalerSpec struct {
|
||||
// scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics
|
||||
// should be collected, as well as to actually change the replica count.
|
||||
ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"`
|
||||
// minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down.
|
||||
// It defaults to 1 pod.
|
||||
// minReplicas is the lower limit for the number of replicas to which the autoscaler
|
||||
// can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the
|
||||
// alpha feature gate HPAScaleToZero is enabled and at least one Object or External
|
||||
// metric is configured. Scaling is active as long as at least one metric value is
|
||||
// available.
|
||||
// +optional
|
||||
MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"`
|
||||
// maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up.
|
||||
|
@ -19,7 +19,7 @@ limitations under the License.
|
||||
package v2beta2
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
@ -52,8 +52,11 @@ type HorizontalPodAutoscalerSpec struct {
|
||||
// scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics
|
||||
// should be collected, as well as to actually change the replica count.
|
||||
ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"`
|
||||
// minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down.
|
||||
// It defaults to 1 pod.
|
||||
// minReplicas is the lower limit for the number of replicas to which the autoscaler
|
||||
// can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the
|
||||
// alpha feature gate HPAScaleToZero is enabled and at least one Object or External
|
||||
// metric is configured. Scaling is active as long as at least one metric value is
|
||||
// available.
|
||||
// +optional
|
||||
MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"`
|
||||
// maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up.
|
||||
|
Loading…
Reference in New Issue
Block a user