diff --git a/cmd/kube-controller-manager/app/autoscaling.go b/cmd/kube-controller-manager/app/autoscaling.go index 1b47a276470..a4dfa5cf2a1 100644 --- a/cmd/kube-controller-manager/app/autoscaling.go +++ b/cmd/kube-controller-manager/app/autoscaling.go @@ -21,13 +21,16 @@ package app import ( "context" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/dynamic" "k8s.io/client-go/scale" "k8s.io/controller-manager/controller" "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/controller/podautoscaler" "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" + "k8s.io/kubernetes/pkg/features" resourceclient "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1" "k8s.io/metrics/pkg/client/custom_metrics" @@ -92,6 +95,7 @@ func startHPAControllerWithMetricsClient(ctx context.Context, controllerContext controllerContext.ComponentConfig.HPAController.HorizontalPodAutoscalerTolerance, controllerContext.ComponentConfig.HPAController.HorizontalPodAutoscalerCPUInitializationPeriod.Duration, controllerContext.ComponentConfig.HPAController.HorizontalPodAutoscalerInitialReadinessDelay.Duration, + feature.DefaultFeatureGate.Enabled(features.HPAContainerMetrics), ).Run(ctx, int(controllerContext.ComponentConfig.HPAController.ConcurrentHorizontalPodAutoscalerSyncs)) return nil, true, nil } diff --git a/pkg/controller/podautoscaler/horizontal.go b/pkg/controller/podautoscaler/horizontal.go index 1d8c4c266c2..b01196c1d3a 100644 --- a/pkg/controller/podautoscaler/horizontal.go +++ b/pkg/controller/podautoscaler/horizontal.go @@ -108,6 +108,9 @@ type HorizontalController struct { // Storage of HPAs and their selectors. hpaSelectors *selectors.BiMultimap hpaSelectorsMux sync.Mutex + + // feature gates + containerResourceMetricsEnabled bool } // NewHorizontalController creates a new HorizontalController. @@ -124,7 +127,7 @@ func NewHorizontalController( tolerance float64, cpuInitializationPeriod, delayOfInitialReadinessStatus time.Duration, - + containerResourceMetricsEnabled bool, ) *HorizontalController { broadcaster := record.NewBroadcaster() broadcaster.StartStructuredLogging(0) @@ -132,19 +135,20 @@ func NewHorizontalController( recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "horizontal-pod-autoscaler"}) hpaController := &HorizontalController{ - eventRecorder: recorder, - scaleNamespacer: scaleNamespacer, - hpaNamespacer: hpaNamespacer, - downscaleStabilisationWindow: downscaleStabilisationWindow, - queue: workqueue.NewNamedRateLimitingQueue(NewDefaultHPARateLimiter(resyncPeriod), "horizontalpodautoscaler"), - mapper: mapper, - recommendations: map[string][]timestampedRecommendation{}, - recommendationsLock: sync.Mutex{}, - scaleUpEvents: map[string][]timestampedScaleEvent{}, - scaleUpEventsLock: sync.RWMutex{}, - scaleDownEvents: map[string][]timestampedScaleEvent{}, - scaleDownEventsLock: sync.RWMutex{}, - hpaSelectors: selectors.NewBiMultimap(), + eventRecorder: recorder, + scaleNamespacer: scaleNamespacer, + hpaNamespacer: hpaNamespacer, + downscaleStabilisationWindow: downscaleStabilisationWindow, + queue: workqueue.NewNamedRateLimitingQueue(NewDefaultHPARateLimiter(resyncPeriod), "horizontalpodautoscaler"), + mapper: mapper, + recommendations: map[string][]timestampedRecommendation{}, + recommendationsLock: sync.Mutex{}, + scaleUpEvents: map[string][]timestampedScaleEvent{}, + scaleUpEventsLock: sync.RWMutex{}, + scaleDownEvents: map[string][]timestampedScaleEvent{}, + scaleDownEventsLock: sync.RWMutex{}, + hpaSelectors: selectors.NewBiMultimap(), + containerResourceMetricsEnabled: containerResourceMetricsEnabled, } hpaInformer.Informer().AddEventHandlerWithResyncPeriod( @@ -425,6 +429,13 @@ func (a *HorizontalController) computeReplicasForMetric(ctx context.Context, hpa return 0, "", time.Time{}, condition, fmt.Errorf("failed to get %s resource metric value: %v", spec.Resource.Name, err) } case autoscalingv2.ContainerResourceMetricSourceType: + if !a.containerResourceMetricsEnabled { + // If the container resource metrics feature is disabled but the object has the one, + // that means the user enabled the feature once, created some HPAs with the container resource metrics, and disabled it finally. + // We cannot return errors in this case because that'll result in all HPAs with the container resource metric sources failing to scale down. + // Thus, here we silently ignore it and return current replica values so that it won't affect the autoscaling decision + return specReplicas, "", time.Time{}, condition, nil + } replicaCountProposal, timestampProposal, metricNameProposal, condition, err = a.computeStatusForContainerResourceMetric(ctx, specReplicas, spec, hpa, selector, status) if err != nil { return 0, "", time.Time{}, condition, fmt.Errorf("failed to get %s container metric value: %v", spec.ContainerResource.Container, err) diff --git a/pkg/controller/podautoscaler/horizontal_test.go b/pkg/controller/podautoscaler/horizontal_test.go index b3af55c5a9f..23019952577 100644 --- a/pkg/controller/podautoscaler/horizontal_test.go +++ b/pkg/controller/podautoscaler/horizontal_test.go @@ -148,6 +148,8 @@ type testCase struct { recommendations []timestampedRecommendation hpaSelectors *selectors.BiMultimap + + containerResourceMetricsEnabled bool } // Needs to be called under a lock. @@ -738,6 +740,7 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform defaultTestingTolerance, defaultTestingCPUInitializationPeriod, defaultTestingDelayOfInitialReadinessStatus, + tc.containerResourceMetricsEnabled, ) hpaController.hpaListerSynced = alwaysReady if tc.recommendations != nil { @@ -826,9 +829,41 @@ func TestScaleUpContainer(t *testing.T) { Container: "container1", }, }}, + reportedLevels: []uint64{300, 500, 700}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsAPI: true, + containerResourceMetricsEnabled: true, + } + tc.runTest(t) +} + +func TestIgnoreContainerMetric(t *testing.T) { + // when the container metric feature isn't enabled, it's ignored and HPA keeps the current replica. + tc := testCase{ + minReplicas: 2, + maxReplicas: 6, + specReplicas: 3, + statusReplicas: 3, + expectedDesiredReplicas: 3, + metricsTarget: []autoscalingv2.MetricSpec{{ + Type: autoscalingv2.ContainerResourceMetricSourceType, + ContainerResource: &autoscalingv2.ContainerResourceMetricSource{ + Name: v1.ResourceCPU, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: pointer.Int32(30), + }, + Container: "container1", + }, + }}, reportedLevels: []uint64{300, 500, 700}, reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, useMetricsAPI: true, + expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{ + Type: autoscalingv2.AbleToScale, + Status: v1.ConditionTrue, + Reason: "ReadyForNewScale", + }), } tc.runTest(t) } @@ -1359,8 +1394,9 @@ func TestScaleDownContainerResource(t *testing.T) { }, }, }}, - useMetricsAPI: true, - recommendations: []timestampedRecommendation{}, + useMetricsAPI: true, + recommendations: []timestampedRecommendation{}, + containerResourceMetricsEnabled: true, } tc.runTest(t) } @@ -4536,6 +4572,7 @@ func TestMultipleHPAs(t *testing.T) { defaultTestingTolerance, defaultTestingCPUInitializationPeriod, defaultTestingDelayOfInitialReadinessStatus, + false, ) hpaController.scaleUpEvents = scaleUpEventsMap hpaController.scaleDownEvents = scaleDownEventsMap