Merge pull request #87895 from alexzimmer96/68026-lint-pkg-controller-autoscaler

Fix Golint errors in pkg/controller/podautoscaler
This commit is contained in:
Kubernetes Prow Robot 2020-03-17 16:19:53 -07:00 committed by GitHub
commit 1827fe444e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 55 additions and 55 deletions

View File

@ -70,7 +70,6 @@ pkg/controller/namespace
pkg/controller/namespace/config/v1alpha1
pkg/controller/nodeipam/config/v1alpha1
pkg/controller/nodelifecycle/config/v1alpha1
pkg/controller/podautoscaler
pkg/controller/podautoscaler/config/v1alpha1
pkg/controller/podautoscaler/metrics
pkg/controller/podgc

View File

@ -452,34 +452,31 @@ func (a *HorizontalController) computeStatusForResourceMetric(currentReplicas in
},
}
return replicaCountProposal, timestampProposal, metricNameProposal, autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
} else {
if metricSpec.Resource.Target.AverageUtilization == nil {
errMsg := "invalid resource metric source: neither a utilization target nor a value target was set"
err = fmt.Errorf(errMsg)
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetResourceMetric", err)
return 0, time.Time{}, "", condition, fmt.Errorf(errMsg)
}
targetUtilization := *metricSpec.Resource.Target.AverageUtilization
var percentageProposal int32
var rawProposal int64
replicaCountProposal, percentageProposal, rawProposal, timestampProposal, err := a.replicaCalc.GetResourceReplicas(currentReplicas, targetUtilization, metricSpec.Resource.Name, hpa.Namespace, selector)
if err != nil {
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetResourceMetric", err)
return 0, time.Time{}, "", condition, fmt.Errorf("failed to get %s utilization: %v", metricSpec.Resource.Name, err)
}
metricNameProposal = fmt.Sprintf("%s resource utilization (percentage of request)", metricSpec.Resource.Name)
*status = autoscalingv2.MetricStatus{
Type: autoscalingv2.ResourceMetricSourceType,
Resource: &autoscalingv2.ResourceMetricStatus{
Name: metricSpec.Resource.Name,
Current: autoscalingv2.MetricValueStatus{
AverageUtilization: &percentageProposal,
AverageValue: resource.NewMilliQuantity(rawProposal, resource.DecimalSI),
},
},
}
return replicaCountProposal, timestampProposal, metricNameProposal, autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
}
if metricSpec.Resource.Target.AverageUtilization == nil {
errMsg := "invalid resource metric source: neither a utilization target nor a value target was set"
err = fmt.Errorf(errMsg)
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetResourceMetric", err)
return 0, time.Time{}, "", condition, fmt.Errorf(errMsg)
}
targetUtilization := *metricSpec.Resource.Target.AverageUtilization
replicaCountProposal, percentageProposal, rawProposal, timestampProposal, err := a.replicaCalc.GetResourceReplicas(currentReplicas, targetUtilization, metricSpec.Resource.Name, hpa.Namespace, selector)
if err != nil {
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetResourceMetric", err)
return 0, time.Time{}, "", condition, fmt.Errorf("failed to get %s utilization: %v", metricSpec.Resource.Name, err)
}
metricNameProposal = fmt.Sprintf("%s resource utilization (percentage of request)", metricSpec.Resource.Name)
*status = autoscalingv2.MetricStatus{
Type: autoscalingv2.ResourceMetricSourceType,
Resource: &autoscalingv2.ResourceMetricStatus{
Name: metricSpec.Resource.Name,
Current: autoscalingv2.MetricValueStatus{
AverageUtilization: &percentageProposal,
AverageValue: resource.NewMilliQuantity(rawProposal, resource.DecimalSI),
},
},
}
return replicaCountProposal, timestampProposal, metricNameProposal, autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
}
// computeStatusForExternalMetric computes the desired number of replicas for the specified metric of type ExternalMetricSourceType.
@ -992,7 +989,7 @@ func getLongestPolicyPeriod(scalingRules *autoscalingv2.HPAScalingRules) int32 {
// calculateScaleUpLimitWithScalingRules returns the maximum number of pods that could be added for the given HPAScalingRules
func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleEvents []timestampedScaleEvent, scalingRules *autoscalingv2.HPAScalingRules) int32 {
var result int32 = 0
var result int32
var proposed int32
var selectPolicyFn func(int32, int32) int32
if *scalingRules.SelectPolicy == autoscalingv2.DisabledPolicySelect {
@ -1180,15 +1177,13 @@ func setConditionInList(inputList []autoscalingv2.HorizontalPodAutoscalerConditi
func max(a, b int32) int32 {
if a >= b {
return a
} else {
return b
}
return b
}
func min(a, b int32) int32 {
if a <= b {
return a
} else {
return b
}
return b
}

View File

@ -704,7 +704,7 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform
100*time.Millisecond, // we need non-zero resync period to avoid race conditions
defaultDownscalestabilizationWindow,
defaultTestingTolerance,
defaultTestingCpuInitializationPeriod,
defaultTestingCPUInitializationPeriod,
defaultTestingDelayOfInitialReadinessStatus,
)
hpaController.hpaListerSynced = alwaysReady
@ -715,11 +715,11 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform
return hpaController, informerFactory
}
func hotCpuCreationTime() metav1.Time {
func hotCPUCreationTime() metav1.Time {
return metav1.Time{Time: time.Now()}
}
func coolCpuCreationTime() metav1.Time {
func coolCPUCreationTime() metav1.Time {
return metav1.Time{Time: time.Now().Add(-3 * time.Minute)}
}
@ -803,7 +803,7 @@ func TestScaleUpHotCpuLessScale(t *testing.T) {
verifyCPUCurrent: true,
reportedLevels: []uint64{300, 500, 700},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
reportedPodStartTime: []metav1.Time{hotCpuCreationTime(), coolCpuCreationTime(), coolCpuCreationTime()},
reportedPodStartTime: []metav1.Time{hotCPUCreationTime(), coolCPUCreationTime(), coolCPUCreationTime()},
useMetricsAPI: true,
}
tc.runTest(t)
@ -845,7 +845,7 @@ func TestScaleUpHotCpuNoScale(t *testing.T) {
reportedLevels: []uint64{400, 500, 700},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
reportedPodStartTime: []metav1.Time{coolCpuCreationTime(), hotCpuCreationTime(), hotCpuCreationTime()},
reportedPodStartTime: []metav1.Time{coolCPUCreationTime(), hotCPUCreationTime(), hotCPUCreationTime()},
useMetricsAPI: true,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.AbleToScale,
@ -989,7 +989,7 @@ func TestScaleUpCMUnreadyAndHotCpuNoLessScale(t *testing.T) {
},
reportedLevels: []uint64{50000, 10000, 30000},
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse},
reportedPodStartTime: []metav1.Time{coolCpuCreationTime(), coolCpuCreationTime(), hotCpuCreationTime()},
reportedPodStartTime: []metav1.Time{coolCPUCreationTime(), coolCPUCreationTime(), hotCPUCreationTime()},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
}
tc.runTest(t)
@ -1019,7 +1019,7 @@ func TestScaleUpCMUnreadyandCpuHot(t *testing.T) {
},
reportedLevels: []uint64{50000, 15000, 30000},
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
reportedPodStartTime: []metav1.Time{hotCpuCreationTime(), coolCpuCreationTime(), hotCpuCreationTime()},
reportedPodStartTime: []metav1.Time{hotCPUCreationTime(), coolCPUCreationTime(), hotCPUCreationTime()},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.AbleToScale,
@ -1058,7 +1058,7 @@ func TestScaleUpHotCpuNoScaleWouldScaleDown(t *testing.T) {
},
reportedLevels: []uint64{50000, 15000, 30000},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
reportedPodStartTime: []metav1.Time{hotCpuCreationTime(), coolCpuCreationTime(), hotCpuCreationTime()},
reportedPodStartTime: []metav1.Time{hotCPUCreationTime(), coolCPUCreationTime(), hotCPUCreationTime()},
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.AbleToScale,
Status: v1.ConditionTrue,
@ -1679,7 +1679,7 @@ func TestScaleDownIgnoreHotCpuPods(t *testing.T) {
reportedLevels: []uint64{100, 300, 500, 250, 250},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsAPI: true,
reportedPodStartTime: []metav1.Time{coolCpuCreationTime(), coolCpuCreationTime(), coolCpuCreationTime(), hotCpuCreationTime(), hotCpuCreationTime()},
reportedPodStartTime: []metav1.Time{coolCPUCreationTime(), coolCPUCreationTime(), coolCPUCreationTime(), hotCPUCreationTime(), hotCPUCreationTime()},
recommendations: []timestampedRecommendation{},
}
tc.runTest(t)
@ -2822,7 +2822,7 @@ func TestAvoidUncessaryUpdates(t *testing.T) {
verifyCPUCurrent: true,
reportedLevels: []uint64{400, 500, 700},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
reportedPodStartTime: []metav1.Time{coolCpuCreationTime(), hotCpuCreationTime(), hotCpuCreationTime()},
reportedPodStartTime: []metav1.Time{coolCPUCreationTime(), hotCPUCreationTime(), hotCPUCreationTime()},
useMetricsAPI: true,
lastScaleTime: &now,
recommendations: []timestampedRecommendation{},

View File

@ -512,7 +512,7 @@ func (tc *legacyTestCase) runTest(t *testing.T) {
controller.NoResyncPeriodFunc(),
defaultDownscaleStabilisationWindow,
defaultTestingTolerance,
defaultTestingCpuInitializationPeriod,
defaultTestingCPUInitializationPeriod,
defaultTestingDelayOfInitialReadinessStatus,
)
hpaController.hpaListerSynced = alwaysReady

View File

@ -192,7 +192,7 @@ func (tc *legacyReplicaCalcTestCase) runTest(t *testing.T) {
informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc())
informer := informerFactory.Core().V1().Pods()
replicaCalc := NewReplicaCalculator(metricsClient, informer.Lister(), defaultTestingTolerance, defaultTestingCpuInitializationPeriod, defaultTestingDelayOfInitialReadinessStatus)
replicaCalc := NewReplicaCalculator(metricsClient, informer.Lister(), defaultTestingTolerance, defaultTestingCPUInitializationPeriod, defaultTestingDelayOfInitialReadinessStatus)
stop := make(chan struct{})
defer close(stop)

View File

@ -29,20 +29,24 @@ type FixedItemIntervalRateLimiter struct {
var _ workqueue.RateLimiter = &FixedItemIntervalRateLimiter{}
// NewFixedItemIntervalRateLimiter creates a new instance of a RateLimiter using a fixed interval
func NewFixedItemIntervalRateLimiter(interval time.Duration) workqueue.RateLimiter {
return &FixedItemIntervalRateLimiter{
interval: interval,
}
}
// When returns the interval of the rate limiter
func (r *FixedItemIntervalRateLimiter) When(item interface{}) time.Duration {
return r.interval
}
// NumRequeues returns back how many failures the item has had
func (r *FixedItemIntervalRateLimiter) NumRequeues(item interface{}) int {
return 1
}
// Forget indicates that an item is finished being retried.
func (r *FixedItemIntervalRateLimiter) Forget(item interface{}) {
}

View File

@ -35,10 +35,11 @@ const (
// defaultTestingTolerance is default value for calculating when to
// scale up/scale down.
defaultTestingTolerance = 0.1
defaultTestingCpuInitializationPeriod = 2 * time.Minute
defaultTestingCPUInitializationPeriod = 2 * time.Minute
defaultTestingDelayOfInitialReadinessStatus = 10 * time.Second
)
// ReplicaCalculator bundles all needed information to calculate the target amount of replicas
type ReplicaCalculator struct {
metricsClient metricsclient.MetricsClient
podLister corelisters.PodLister
@ -47,6 +48,7 @@ type ReplicaCalculator struct {
delayOfInitialReadinessStatus time.Duration
}
// NewReplicaCalculator creates a new ReplicaCalculator and passes all necessary information to the new instance
func NewReplicaCalculator(metricsClient metricsclient.MetricsClient, podLister corelisters.PodLister, tolerance float64, cpuInitializationPeriod, delayOfInitialReadinessStatus time.Duration) *ReplicaCalculator {
return &ReplicaCalculator{
metricsClient: metricsClient,

View File

@ -345,7 +345,7 @@ func (tc *replicaCalcTestCase) runTest(t *testing.T) {
informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc())
informer := informerFactory.Core().V1().Pods()
replicaCalc := NewReplicaCalculator(metricsClient, informer.Lister(), defaultTestingTolerance, defaultTestingCpuInitializationPeriod, defaultTestingDelayOfInitialReadinessStatus)
replicaCalc := NewReplicaCalculator(metricsClient, informer.Lister(), defaultTestingTolerance, defaultTestingCPUInitializationPeriod, defaultTestingDelayOfInitialReadinessStatus)
stop := make(chan struct{})
defer close(stop)
@ -480,7 +480,7 @@ func TestReplicaCalcScaleUpHotCpuLessScale(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
expectedReplicas: 4,
podStartTime: []metav1.Time{hotCpuCreationTime(), coolCpuCreationTime(), coolCpuCreationTime()},
podStartTime: []metav1.Time{hotCPUCreationTime(), coolCPUCreationTime(), coolCPUCreationTime()},
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
@ -517,7 +517,7 @@ func TestReplicaCalcScaleHotCpuNoScale(t *testing.T) {
currentReplicas: 3,
expectedReplicas: 3,
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
podStartTime: []metav1.Time{coolCpuCreationTime(), hotCpuCreationTime(), hotCpuCreationTime()},
podStartTime: []metav1.Time{coolCPUCreationTime(), hotCPUCreationTime(), hotCPUCreationTime()},
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
@ -590,7 +590,7 @@ func TestReplicaCalcScaleUpCMUnreadyHotCpuNoLessScale(t *testing.T) {
currentReplicas: 3,
expectedReplicas: 6,
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse},
podStartTime: []metav1.Time{coolCpuCreationTime(), coolCpuCreationTime(), hotCpuCreationTime()},
podStartTime: []metav1.Time{coolCPUCreationTime(), coolCPUCreationTime(), hotCPUCreationTime()},
metric: &metricInfo{
name: "qps",
levels: []int64{50000, 10000, 30000},
@ -607,7 +607,7 @@ func TestReplicaCalcScaleUpCMUnreadyHotCpuScaleWouldScaleDown(t *testing.T) {
currentReplicas: 3,
expectedReplicas: 7,
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
podStartTime: []metav1.Time{hotCpuCreationTime(), coolCpuCreationTime(), hotCpuCreationTime()},
podStartTime: []metav1.Time{hotCPUCreationTime(), coolCPUCreationTime(), hotCPUCreationTime()},
metric: &metricInfo{
name: "qps",
levels: []int64{50000, 15000, 30000},
@ -886,7 +886,7 @@ func TestReplicaCalcScaleDownIgnoreHotCpuPods(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 5,
expectedReplicas: 2,
podStartTime: []metav1.Time{coolCpuCreationTime(), coolCpuCreationTime(), coolCpuCreationTime(), hotCpuCreationTime(), hotCpuCreationTime()},
podStartTime: []metav1.Time{coolCPUCreationTime(), coolCPUCreationTime(), coolCPUCreationTime(), hotCPUCreationTime(), hotCPUCreationTime()},
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
@ -1178,7 +1178,7 @@ func TestReplicaCalcMissingMetricsHotCpuNoChange(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 3,
expectedReplicas: 3,
podStartTime: []metav1.Time{hotCpuCreationTime(), coolCpuCreationTime(), coolCpuCreationTime()},
podStartTime: []metav1.Time{hotCPUCreationTime(), coolCPUCreationTime(), coolCPUCreationTime()},
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
@ -1215,7 +1215,7 @@ func TestReplicaCalcMissingMetricsHotCpuScaleUp(t *testing.T) {
currentReplicas: 3,
expectedReplicas: 4,
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
podStartTime: []metav1.Time{hotCpuCreationTime(), coolCpuCreationTime(), coolCpuCreationTime()},
podStartTime: []metav1.Time{hotCPUCreationTime(), coolCPUCreationTime(), coolCPUCreationTime()},
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
@ -1657,7 +1657,7 @@ func TestGroupPods(t *testing.T) {
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
readyPodCount, ignoredPods, missingPods := groupPods(tc.pods, tc.metrics, tc.resource, defaultTestingCpuInitializationPeriod, defaultTestingDelayOfInitialReadinessStatus)
readyPodCount, ignoredPods, missingPods := groupPods(tc.pods, tc.metrics, tc.resource, defaultTestingCPUInitializationPeriod, defaultTestingDelayOfInitialReadinessStatus)
if readyPodCount != tc.expectReadyPodCount {
t.Errorf("%s got readyPodCount %d, expected %d", tc.name, readyPodCount, tc.expectReadyPodCount)
}