Merge pull request #108501 from zroubalik/hpa

add `--concurrent-horizontal-pod-autoscaler-syncs` flag to kube-controller-manager
This commit is contained in:
Kubernetes Prow Robot 2022-10-17 14:13:18 -07:00 committed by GitHub
commit 85643c0f93
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 348 additions and 9 deletions

View File

@ -517,6 +517,7 @@ API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,G
API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,GarbageCollectorControllerConfiguration,GCIgnoredResources
API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,GroupResource,Group
API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,GroupResource,Resource
API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,HPAControllerConfiguration,ConcurrentHorizontalPodAutoscalerSyncs
API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,HPAControllerConfiguration,HorizontalPodAutoscalerCPUInitializationPeriod
API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,HPAControllerConfiguration,HorizontalPodAutoscalerDownscaleForbiddenWindow
API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,HPAControllerConfiguration,HorizontalPodAutoscalerDownscaleStabilizationWindow

View File

@ -87,6 +87,6 @@ func startHPAControllerWithMetricsClient(ctx context.Context, controllerContext
controllerContext.ComponentConfig.HPAController.HorizontalPodAutoscalerTolerance,
controllerContext.ComponentConfig.HPAController.HorizontalPodAutoscalerCPUInitializationPeriod.Duration,
controllerContext.ComponentConfig.HPAController.HorizontalPodAutoscalerInitialReadinessDelay.Duration,
).Run(ctx)
).Run(ctx, int(controllerContext.ComponentConfig.HPAController.ConcurrentHorizontalPodAutoscalerSyncs))
return nil, true, nil
}

View File

@ -17,6 +17,8 @@ limitations under the License.
package options
import (
"fmt"
"github.com/spf13/pflag"
poautosclerconfig "k8s.io/kubernetes/pkg/controller/podautoscaler/config"
@ -33,6 +35,7 @@ func (o *HPAControllerOptions) AddFlags(fs *pflag.FlagSet) {
return
}
fs.Int32Var(&o.ConcurrentHorizontalPodAutoscalerSyncs, "concurrent-horizontal-pod-autoscaler-syncs", o.ConcurrentHorizontalPodAutoscalerSyncs, "The number of horizontal pod autoscaler objects that are allowed to sync concurrently. Larger number = more responsive horizontal pod autoscaler objects processing, but more CPU (and network) load.")
fs.DurationVar(&o.HorizontalPodAutoscalerSyncPeriod.Duration, "horizontal-pod-autoscaler-sync-period", o.HorizontalPodAutoscalerSyncPeriod.Duration, "The period for syncing the number of pods in horizontal pod autoscaler.")
fs.DurationVar(&o.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration, "horizontal-pod-autoscaler-upscale-delay", o.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration, "The period since last upscale, before another upscale can be performed in horizontal pod autoscaler.")
fs.MarkDeprecated("horizontal-pod-autoscaler-upscale-delay", "This flag is currently no-op and will be deleted.")
@ -50,6 +53,7 @@ func (o *HPAControllerOptions) ApplyTo(cfg *poautosclerconfig.HPAControllerConfi
return nil
}
cfg.ConcurrentHorizontalPodAutoscalerSyncs = o.ConcurrentHorizontalPodAutoscalerSyncs
cfg.HorizontalPodAutoscalerSyncPeriod = o.HorizontalPodAutoscalerSyncPeriod
cfg.HorizontalPodAutoscalerDownscaleStabilizationWindow = o.HorizontalPodAutoscalerDownscaleStabilizationWindow
cfg.HorizontalPodAutoscalerTolerance = o.HorizontalPodAutoscalerTolerance
@ -68,5 +72,8 @@ func (o *HPAControllerOptions) Validate() []error {
}
errs := []error{}
if o.ConcurrentHorizontalPodAutoscalerSyncs < 1 {
errs = append(errs, fmt.Errorf("concurrent-horizontal-pod-autoscaler-syncs must be greater than 0, but got %d", o.ConcurrentHorizontalPodAutoscalerSyncs))
}
return errs
}

View File

@ -84,6 +84,7 @@ var args = []string{
"--cluster-signing-legacy-unknown-cert-file=/cluster-signing-legacy-unknown/cert-file",
"--cluster-signing-legacy-unknown-key-file=/cluster-signing-legacy-unknown/key-file",
"--concurrent-deployment-syncs=10",
"--concurrent-horizontal-pod-autoscaler-syncs=10",
"--concurrent-statefulset-syncs=15",
"--concurrent-endpoint-syncs=10",
"--concurrent-ephemeralvolume-syncs=10",
@ -304,6 +305,7 @@ func TestAddFlags(t *testing.T) {
},
HPAController: &HPAControllerOptions{
&poautosclerconfig.HPAControllerConfiguration{
ConcurrentHorizontalPodAutoscalerSyncs: 10,
HorizontalPodAutoscalerSyncPeriod: metav1.Duration{Duration: 45 * time.Second},
HorizontalPodAutoscalerUpscaleForbiddenWindow: metav1.Duration{Duration: 1 * time.Minute},
HorizontalPodAutoscalerDownscaleForbiddenWindow: metav1.Duration{Duration: 2 * time.Minute},
@ -558,6 +560,7 @@ func TestApplyTo(t *testing.T) {
EnableGarbageCollector: false,
},
HPAController: poautosclerconfig.HPAControllerConfiguration{
ConcurrentHorizontalPodAutoscalerSyncs: 10,
HorizontalPodAutoscalerSyncPeriod: metav1.Duration{Duration: 45 * time.Second},
HorizontalPodAutoscalerUpscaleForbiddenWindow: metav1.Duration{Duration: 1 * time.Minute},
HorizontalPodAutoscalerDownscaleForbiddenWindow: metav1.Duration{Duration: 2 * time.Minute},

View File

@ -22,6 +22,9 @@ import (
// HPAControllerConfiguration contains elements describing HPAController.
type HPAControllerConfiguration struct {
// ConcurrentHorizontalPodAutoscalerSyncs is the number of HPA objects that are allowed to sync concurrently.
// Larger number = more responsive HPA processing, but more CPU (and network) load.
ConcurrentHorizontalPodAutoscalerSyncs int32
// horizontalPodAutoscalerSyncPeriod is the period for syncing the number of
// pods in horizontal pod autoscaler.
HorizontalPodAutoscalerSyncPeriod metav1.Duration

View File

@ -34,6 +34,9 @@ import (
// run it in your wrapper struct of this type in its `SetDefaults_` method.
func RecommendedDefaultHPAControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.HPAControllerConfiguration) {
zero := metav1.Duration{}
if obj.ConcurrentHorizontalPodAutoscalerSyncs == 0 {
obj.ConcurrentHorizontalPodAutoscalerSyncs = 5
}
if obj.HorizontalPodAutoscalerSyncPeriod == zero {
obj.HorizontalPodAutoscalerSyncPeriod = metav1.Duration{Duration: 15 * time.Second}
}

View File

@ -82,6 +82,7 @@ func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, ou
}
func autoConvert_v1alpha1_HPAControllerConfiguration_To_config_HPAControllerConfiguration(in *v1alpha1.HPAControllerConfiguration, out *config.HPAControllerConfiguration, s conversion.Scope) error {
out.ConcurrentHorizontalPodAutoscalerSyncs = in.ConcurrentHorizontalPodAutoscalerSyncs
out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod
out.HorizontalPodAutoscalerUpscaleForbiddenWindow = in.HorizontalPodAutoscalerUpscaleForbiddenWindow
out.HorizontalPodAutoscalerDownscaleStabilizationWindow = in.HorizontalPodAutoscalerDownscaleStabilizationWindow
@ -93,6 +94,7 @@ func autoConvert_v1alpha1_HPAControllerConfiguration_To_config_HPAControllerConf
}
func autoConvert_config_HPAControllerConfiguration_To_v1alpha1_HPAControllerConfiguration(in *config.HPAControllerConfiguration, out *v1alpha1.HPAControllerConfiguration, s conversion.Scope) error {
out.ConcurrentHorizontalPodAutoscalerSyncs = in.ConcurrentHorizontalPodAutoscalerSyncs
out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod
out.HorizontalPodAutoscalerUpscaleForbiddenWindow = in.HorizontalPodAutoscalerUpscaleForbiddenWindow
out.HorizontalPodAutoscalerDownscaleForbiddenWindow = in.HorizontalPodAutoscalerDownscaleForbiddenWindow

View File

@ -20,6 +20,7 @@ import (
"context"
"fmt"
"math"
"sync"
"time"
autoscalingv1 "k8s.io/api/autoscaling/v1"
@ -94,11 +95,14 @@ type HorizontalController struct {
queue workqueue.RateLimitingInterface
// Latest unstabilized recommendations for each autoscaler.
recommendations map[string][]timestampedRecommendation
recommendations map[string][]timestampedRecommendation
recommendationsLock sync.Mutex
// Latest autoscaler events
scaleUpEvents map[string][]timestampedScaleEvent
scaleDownEvents map[string][]timestampedScaleEvent
scaleUpEvents map[string][]timestampedScaleEvent
scaleUpEventsLock sync.RWMutex
scaleDownEvents map[string][]timestampedScaleEvent
scaleDownEventsLock sync.RWMutex
}
// NewHorizontalController creates a new HorizontalController.
@ -130,8 +134,11 @@ func NewHorizontalController(
queue: workqueue.NewNamedRateLimitingQueue(NewDefaultHPARateLimiter(resyncPeriod), "horizontalpodautoscaler"),
mapper: mapper,
recommendations: map[string][]timestampedRecommendation{},
recommendationsLock: sync.Mutex{},
scaleUpEvents: map[string][]timestampedScaleEvent{},
scaleUpEventsLock: sync.RWMutex{},
scaleDownEvents: map[string][]timestampedScaleEvent{},
scaleDownEventsLock: sync.RWMutex{},
}
hpaInformer.Informer().AddEventHandlerWithResyncPeriod(
@ -161,7 +168,7 @@ func NewHorizontalController(
}
// Run begins watching and syncing.
func (a *HorizontalController) Run(ctx context.Context) {
func (a *HorizontalController) Run(ctx context.Context, workers int) {
defer utilruntime.HandleCrash()
defer a.queue.ShutDown()
@ -172,8 +179,9 @@ func (a *HorizontalController) Run(ctx context.Context) {
return
}
// start a single worker (we may wish to start more in the future)
go wait.UntilWithContext(ctx, a.worker, time.Second)
for i := 0; i < workers; i++ {
go wait.UntilWithContext(ctx, a.worker, time.Second)
}
<-ctx.Done()
}
@ -357,9 +365,19 @@ func (a *HorizontalController) reconcileKey(ctx context.Context, key string) (de
hpa, err := a.hpaLister.HorizontalPodAutoscalers(namespace).Get(name)
if errors.IsNotFound(err) {
klog.Infof("Horizontal Pod Autoscaler %s has been deleted in %s", name, namespace)
a.recommendationsLock.Lock()
delete(a.recommendations, key)
a.recommendationsLock.Unlock()
a.scaleUpEventsLock.Lock()
delete(a.scaleUpEvents, key)
a.scaleUpEventsLock.Unlock()
a.scaleDownEventsLock.Lock()
delete(a.scaleDownEvents, key)
a.scaleDownEventsLock.Unlock()
return true, nil
}
if err != nil {
@ -564,6 +582,8 @@ func (a *HorizontalController) computeStatusForExternalMetric(specReplicas, stat
}
func (a *HorizontalController) recordInitialRecommendation(currentReplicas int32, key string) {
a.recommendationsLock.Lock()
defer a.recommendationsLock.Unlock()
if a.recommendations[key] == nil {
a.recommendations[key] = []timestampedRecommendation{{currentReplicas, time.Now()}}
}
@ -712,6 +732,9 @@ func (a *HorizontalController) stabilizeRecommendation(key string, prenormalized
foundOldSample := false
oldSampleIndex := 0
cutoff := time.Now().Add(-a.downscaleStabilisationWindow)
a.recommendationsLock.Lock()
defer a.recommendationsLock.Unlock()
for i, rec := range a.recommendations[key] {
if rec.timestamp.Before(cutoff) {
foundOldSample = true
@ -836,6 +859,9 @@ func (a *HorizontalController) storeScaleEvent(behavior *autoscalingv2.Horizonta
foundOldSample := false
if newReplicas > prevReplicas {
longestPolicyPeriod = getLongestPolicyPeriod(behavior.ScaleUp)
a.scaleUpEventsLock.Lock()
defer a.scaleUpEventsLock.Unlock()
markScaleEventsOutdated(a.scaleUpEvents[key], longestPolicyPeriod)
replicaChange := newReplicas - prevReplicas
for i, event := range a.scaleUpEvents[key] {
@ -852,6 +878,9 @@ func (a *HorizontalController) storeScaleEvent(behavior *autoscalingv2.Horizonta
}
} else {
longestPolicyPeriod = getLongestPolicyPeriod(behavior.ScaleDown)
a.scaleDownEventsLock.Lock()
defer a.scaleDownEventsLock.Unlock()
markScaleEventsOutdated(a.scaleDownEvents[key], longestPolicyPeriod)
replicaChange := prevReplicas - newReplicas
for i, event := range a.scaleDownEvents[key] {
@ -887,6 +916,8 @@ func (a *HorizontalController) stabilizeRecommendationWithBehaviors(args Normali
downCutoff := now.Add(-time.Second * time.Duration(downDelaySeconds))
// Calculate the upper and lower stabilization limits.
a.recommendationsLock.Lock()
defer a.recommendationsLock.Unlock()
for i, rec := range a.recommendations[args.Key] {
if rec.timestamp.After(upCutoff) {
upRecommendation = min(rec.recommendation, upRecommendation)
@ -934,7 +965,12 @@ func (a *HorizontalController) convertDesiredReplicasWithBehaviorRate(args Norma
var possibleLimitingReason, possibleLimitingMessage string
if args.DesiredReplicas > args.CurrentReplicas {
a.scaleUpEventsLock.RLock()
defer a.scaleUpEventsLock.RUnlock()
a.scaleDownEventsLock.RLock()
defer a.scaleDownEventsLock.RUnlock()
scaleUpLimit := calculateScaleUpLimitWithScalingRules(args.CurrentReplicas, a.scaleUpEvents[args.Key], a.scaleDownEvents[args.Key], args.ScaleUpBehavior)
if scaleUpLimit < args.CurrentReplicas {
// We shouldn't scale up further until the scaleUpEvents will be cleaned up
scaleUpLimit = args.CurrentReplicas
@ -952,7 +988,12 @@ func (a *HorizontalController) convertDesiredReplicasWithBehaviorRate(args Norma
return maximumAllowedReplicas, possibleLimitingReason, possibleLimitingMessage
}
} else if args.DesiredReplicas < args.CurrentReplicas {
a.scaleUpEventsLock.RLock()
defer a.scaleUpEventsLock.RUnlock()
a.scaleDownEventsLock.RLock()
defer a.scaleDownEventsLock.RUnlock()
scaleDownLimit := calculateScaleDownLimitWithBehaviors(args.CurrentReplicas, a.scaleUpEvents[args.Key], a.scaleDownEvents[args.Key], args.ScaleDownBehavior)
if scaleDownLimit > args.CurrentReplicas {
// We shouldn't scale down further until the scaleDownEvents will be cleaned up
scaleDownLimit = args.CurrentReplicas

View File

@ -20,6 +20,7 @@ import (
"context"
"fmt"
"math"
"strings"
"sync"
"testing"
"time"
@ -756,7 +757,7 @@ func (tc *testCase) runTestWithController(t *testing.T, hpaController *Horizonta
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
informerFactory.Start(ctx.Done())
go hpaController.Run(ctx)
go hpaController.Run(ctx, 5)
tc.Lock()
shouldWait := tc.verifyEvents
@ -4180,3 +4181,270 @@ func TestNoScaleDownOneMetricEmpty(t *testing.T) {
tc.testEMClient = testEMClient
tc.runTest(t)
}
func TestMultipleHPAs(t *testing.T) {
const hpaCount = 1000
const testNamespace = "dummy-namespace"
processed := make(chan string, hpaCount)
testClient := &fake.Clientset{}
testScaleClient := &scalefake.FakeScaleClient{}
testMetricsClient := &metricsfake.Clientset{}
hpaList := [hpaCount]autoscalingv2.HorizontalPodAutoscaler{}
scaleUpEventsMap := map[string][]timestampedScaleEvent{}
scaleDownEventsMap := map[string][]timestampedScaleEvent{}
scaleList := map[string]*autoscalingv1.Scale{}
podList := map[string]*v1.Pod{}
var minReplicas int32 = 1
var cpuTarget int32 = 10
// generate resources (HPAs, Scales, Pods...)
for i := 0; i < hpaCount; i++ {
hpaName := fmt.Sprintf("dummy-hpa-%v", i)
deploymentName := fmt.Sprintf("dummy-target-%v", i)
labelSet := map[string]string{"name": deploymentName}
selector := labels.SelectorFromSet(labelSet).String()
// generate HPAs
h := autoscalingv2.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: hpaName,
Namespace: testNamespace,
},
Spec: autoscalingv2.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{
APIVersion: "apps/v1",
Kind: "Deployment",
Name: deploymentName,
},
MinReplicas: &minReplicas,
MaxReplicas: 10,
Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{
ScaleUp: generateScalingRules(100, 60, 0, 0, 0),
ScaleDown: generateScalingRules(2, 60, 1, 60, 300),
},
Metrics: []autoscalingv2.MetricSpec{
{
Type: autoscalingv2.ResourceMetricSourceType,
Resource: &autoscalingv2.ResourceMetricSource{
Name: v1.ResourceCPU,
Target: autoscalingv2.MetricTarget{
Type: autoscalingv2.UtilizationMetricType,
AverageUtilization: &cpuTarget,
},
},
},
},
},
Status: autoscalingv2.HorizontalPodAutoscalerStatus{
CurrentReplicas: 1,
DesiredReplicas: 5,
LastScaleTime: &metav1.Time{Time: time.Now()},
},
}
hpaList[i] = h
// generate Scale
scaleList[deploymentName] = &autoscalingv1.Scale{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
Namespace: testNamespace,
},
Spec: autoscalingv1.ScaleSpec{
Replicas: 1,
},
Status: autoscalingv1.ScaleStatus{
Replicas: 1,
Selector: selector,
},
}
// generate Pods
cpuRequest := resource.MustParse("1.0")
pod := v1.Pod{
Status: v1.PodStatus{
Phase: v1.PodRunning,
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
},
},
StartTime: &metav1.Time{Time: time.Now().Add(-10 * time.Minute)},
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-0", deploymentName),
Namespace: testNamespace,
Labels: labelSet,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "container1",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(cpuRequest.MilliValue()/2, resource.DecimalSI),
},
},
},
{
Name: "container2",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(cpuRequest.MilliValue()/2, resource.DecimalSI),
},
},
},
},
},
}
podList[deploymentName] = &pod
scaleUpEventsMap[fmt.Sprintf("%s/%s", testNamespace, hpaName)] = generateEventsUniformDistribution([]int{8, 12, 9, 11}, 120)
scaleDownEventsMap[fmt.Sprintf("%s/%s", testNamespace, hpaName)] = generateEventsUniformDistribution([]int{10, 10, 10}, 120)
}
testMetricsClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
podNamePrefix := ""
labelSet := map[string]string{}
// selector should be in form: "name=dummy-target-X" where X is the number of resource
selector := action.(core.ListAction).GetListRestrictions().Labels
parsedSelector := strings.Split(selector.String(), "=")
if len(parsedSelector) > 1 {
labelSet[parsedSelector[0]] = parsedSelector[1]
podNamePrefix = parsedSelector[1]
}
podMetric := metricsapi.PodMetrics{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-0", podNamePrefix),
Namespace: testNamespace,
Labels: labelSet,
},
Timestamp: metav1.Time{Time: time.Now()},
Window: metav1.Duration{Duration: time.Minute},
Containers: []metricsapi.ContainerMetrics{
{
Name: "container1",
Usage: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(
int64(200),
resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(
int64(1024*1024/2),
resource.BinarySI),
},
},
{
Name: "container2",
Usage: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(
int64(300),
resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(
int64(1024*1024/2),
resource.BinarySI),
},
},
},
}
metrics := &metricsapi.PodMetricsList{}
metrics.Items = append(metrics.Items, podMetric)
return true, metrics, nil
})
metricsClient := metrics.NewRESTMetricsClient(
testMetricsClient.MetricsV1beta1(),
&cmfake.FakeCustomMetricsClient{},
&emfake.FakeExternalMetricsClient{},
)
testScaleClient.AddReactor("get", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
deploymentName := action.(core.GetAction).GetName()
obj := scaleList[deploymentName]
return true, obj, nil
})
testClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := &v1.PodList{}
// selector should be in form: "name=dummy-target-X" where X is the number of resource
selector := action.(core.ListAction).GetListRestrictions().Labels
parsedSelector := strings.Split(selector.String(), "=")
// list with filter
if len(parsedSelector) > 1 {
obj.Items = append(obj.Items, *podList[parsedSelector[1]])
} else {
// no filter - return all pods
for _, p := range podList {
obj.Items = append(obj.Items, *p)
}
}
return true, obj, nil
})
testClient.AddReactor("list", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := &autoscalingv2.HorizontalPodAutoscalerList{
Items: hpaList[:],
}
return true, obj, nil
})
testClient.AddReactor("update", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
handled, obj, err := func() (handled bool, ret *autoscalingv2.HorizontalPodAutoscaler, err error) {
obj := action.(core.UpdateAction).GetObject().(*autoscalingv2.HorizontalPodAutoscaler)
assert.Equal(t, testNamespace, obj.Namespace, "the HPA namespace should be as expected")
return true, obj, nil
}()
processed <- obj.Name
return handled, obj, err
})
informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc())
hpaController := NewHorizontalController(
testClient.CoreV1(),
testScaleClient,
testClient.AutoscalingV2(),
testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme),
metricsClient,
informerFactory.Autoscaling().V2().HorizontalPodAutoscalers(),
informerFactory.Core().V1().Pods(),
100*time.Millisecond,
5*time.Minute,
defaultTestingTolerance,
defaultTestingCPUInitializationPeriod,
defaultTestingDelayOfInitialReadinessStatus,
)
hpaController.scaleUpEvents = scaleUpEventsMap
hpaController.scaleDownEvents = scaleDownEventsMap
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
informerFactory.Start(ctx.Done())
go hpaController.Run(ctx, 5)
timeoutTime := time.After(15 * time.Second)
timeout := false
processedHPA := make(map[string]bool)
for timeout == false && len(processedHPA) < hpaCount {
select {
case hpaName := <-processed:
processedHPA[hpaName] = true
case <-timeoutTime:
timeout = true
}
}
assert.Equal(t, hpaCount, len(processedHPA), "Expected to process all HPAs")
}

View File

@ -50167,6 +50167,14 @@ func schema_k8sio_kube_controller_manager_config_v1alpha1_HPAControllerConfigura
Description: "HPAControllerConfiguration contains elements describing HPAController.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"ConcurrentHorizontalPodAutoscalerSyncs": {
SchemaProps: spec.SchemaProps{
Description: "ConcurrentHorizontalPodAutoscalerSyncs is the number of HPA objects that are allowed to sync concurrently. Larger number = more responsive HPA processing, but more CPU (and network) load.",
Default: 0,
Type: []string{"integer"},
Format: "int32",
},
},
"HorizontalPodAutoscalerSyncPeriod": {
SchemaProps: spec.SchemaProps{
Description: "HorizontalPodAutoscalerSyncPeriod is the period for syncing the number of pods in horizontal pod autoscaler.",
@ -50218,7 +50226,7 @@ func schema_k8sio_kube_controller_manager_config_v1alpha1_HPAControllerConfigura
},
},
},
Required: []string{"HorizontalPodAutoscalerSyncPeriod", "HorizontalPodAutoscalerUpscaleForbiddenWindow", "HorizontalPodAutoscalerDownscaleStabilizationWindow", "HorizontalPodAutoscalerDownscaleForbiddenWindow", "HorizontalPodAutoscalerTolerance", "HorizontalPodAutoscalerCPUInitializationPeriod", "HorizontalPodAutoscalerInitialReadinessDelay"},
Required: []string{"ConcurrentHorizontalPodAutoscalerSyncs", "HorizontalPodAutoscalerSyncPeriod", "HorizontalPodAutoscalerUpscaleForbiddenWindow", "HorizontalPodAutoscalerDownscaleStabilizationWindow", "HorizontalPodAutoscalerDownscaleForbiddenWindow", "HorizontalPodAutoscalerTolerance", "HorizontalPodAutoscalerCPUInitializationPeriod", "HorizontalPodAutoscalerInitialReadinessDelay"},
},
},
Dependencies: []string{

View File

@ -315,6 +315,9 @@ type GarbageCollectorControllerConfiguration struct {
// HPAControllerConfiguration contains elements describing HPAController.
type HPAControllerConfiguration struct {
// ConcurrentHorizontalPodAutoscalerSyncs is the number of HPA objects that are allowed to sync concurrently.
// Larger number = more responsive HPA processing, but more CPU (and network) load.
ConcurrentHorizontalPodAutoscalerSyncs int32
// HorizontalPodAutoscalerSyncPeriod is the period for syncing the number of
// pods in horizontal pod autoscaler.
HorizontalPodAutoscalerSyncPeriod metav1.Duration