diff --git a/cmd/kube-controller-manager/app/autoscaling.go b/cmd/kube-controller-manager/app/autoscaling.go index 124f369b517..dccdac49e06 100644 --- a/cmd/kube-controller-manager/app/autoscaling.go +++ b/cmd/kube-controller-manager/app/autoscaling.go @@ -93,7 +93,6 @@ func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient me replicaCalc, ctx.InformerFactory.Autoscaling().V1().HorizontalPodAutoscalers(), ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerSyncPeriod.Duration, - ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration, ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration, ).Run(ctx.Stop) return nil, true, nil diff --git a/cmd/kube-controller-manager/app/options/hpacontroller.go b/cmd/kube-controller-manager/app/options/hpacontroller.go index cbbb159c0d7..d383bfd0886 100644 --- a/cmd/kube-controller-manager/app/options/hpacontroller.go +++ b/cmd/kube-controller-manager/app/options/hpacontroller.go @@ -40,6 +40,7 @@ func (o *HPAControllerOptions) AddFlags(fs *pflag.FlagSet) { fs.DurationVar(&o.HorizontalPodAutoscalerSyncPeriod.Duration, "horizontal-pod-autoscaler-sync-period", o.HorizontalPodAutoscalerSyncPeriod.Duration, "The period for syncing the number of pods in horizontal pod autoscaler.") fs.DurationVar(&o.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration, "horizontal-pod-autoscaler-upscale-delay", o.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration, "The period since last upscale, before another upscale can be performed in horizontal pod autoscaler.") + fs.MarkDeprecated("horizontal-pod-autoscaler-upscale-delay", "This flag is currently no-op and will be deleted.") fs.DurationVar(&o.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration, "horizontal-pod-autoscaler-downscale-delay", o.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration, "The period since last downscale, before another downscale can be performed in horizontal pod autoscaler.") fs.Float64Var(&o.HorizontalPodAutoscalerTolerance, "horizontal-pod-autoscaler-tolerance", o.HorizontalPodAutoscalerTolerance, "The minimum change (from 1.0) in the desired-to-actual metrics ratio for the horizontal pod autoscaler to consider scaling.") fs.BoolVar(&o.HorizontalPodAutoscalerUseRESTClients, "horizontal-pod-autoscaler-use-rest-clients", o.HorizontalPodAutoscalerUseRESTClients, "If set to true, causes the horizontal pod autoscaler controller to use REST clients through the kube-aggregator, instead of using the legacy metrics client through the API server proxy. This is required for custom metrics support in the horizontal pod autoscaler.") @@ -52,7 +53,6 @@ func (o *HPAControllerOptions) ApplyTo(cfg *componentconfig.HPAControllerConfigu } cfg.HorizontalPodAutoscalerSyncPeriod = o.HorizontalPodAutoscalerSyncPeriod - cfg.HorizontalPodAutoscalerUpscaleForbiddenWindow = o.HorizontalPodAutoscalerUpscaleForbiddenWindow cfg.HorizontalPodAutoscalerDownscaleForbiddenWindow = o.HorizontalPodAutoscalerDownscaleForbiddenWindow cfg.HorizontalPodAutoscalerTolerance = o.HorizontalPodAutoscalerTolerance cfg.HorizontalPodAutoscalerUseRESTClients = o.HorizontalPodAutoscalerUseRESTClients diff --git a/pkg/controller/podautoscaler/horizontal.go b/pkg/controller/podautoscaler/horizontal.go index b3e4f87996d..ec4680ec2fa 100644 --- a/pkg/controller/podautoscaler/horizontal.go +++ b/pkg/controller/podautoscaler/horizontal.go @@ -64,7 +64,6 @@ type HorizontalController struct { replicaCalc *ReplicaCalculator eventRecorder record.EventRecorder - upscaleForbiddenWindow time.Duration downscaleForbiddenWindow time.Duration // hpaLister is able to list/get HPAs from the shared cache from the informer passed in to @@ -85,7 +84,6 @@ func NewHorizontalController( replicaCalc *ReplicaCalculator, hpaInformer autoscalinginformers.HorizontalPodAutoscalerInformer, resyncPeriod time.Duration, - upscaleForbiddenWindow time.Duration, downscaleForbiddenWindow time.Duration, ) *HorizontalController { @@ -99,7 +97,6 @@ func NewHorizontalController( eventRecorder: recorder, scaleNamespacer: scaleNamespacer, hpaNamespacer: hpaNamespacer, - upscaleForbiddenWindow: upscaleForbiddenWindow, downscaleForbiddenWindow: downscaleForbiddenWindow, queue: workqueue.NewNamedRateLimitingQueue(NewDefaultHPARateLimiter(resyncPeriod), "horizontalpodautoscaler"), mapper: mapper, @@ -246,7 +243,6 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidMetricSourceType", "the HPA was unable to compute the replica count: %s", errMsg) return 0, "", nil, time.Time{}, fmt.Errorf(errMsg) } - if replicas == 0 || replicaCountProposal > replicas { timestamp = timestampProposal replicas = replicaCountProposal @@ -472,6 +468,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho rescaleReason = "Current number of replicas must be greater than 0" desiredReplicas = 1 } else { + metricDesiredReplicas, metricName, metricStatuses, metricTimestamp, err = a.computeReplicasForMetrics(hpa, scale, hpa.Spec.Metrics) if err != nil { a.setCurrentReplicasInStatus(hpa, currentReplicas) @@ -507,15 +504,6 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "BackoffDownscale", "the time since the previous scale is still within the downscale forbidden window") backoffDown = true } - - if !hpa.Status.LastScaleTime.Add(a.upscaleForbiddenWindow).Before(timestamp) { - backoffUp = true - if backoffDown { - setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "BackoffBoth", "the time since the previous scale is still within both the downscale and upscale forbidden windows") - } else { - setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "BackoffUpscale", "the time since the previous scale is still within the upscale forbidden window") - } - } } if !backoffDown && !backoffUp { @@ -634,9 +622,8 @@ func (a *HorizontalController) shouldScale(hpa *autoscalingv2.HorizontalPodAutos return true } - // Going up only if the usage ratio increased significantly above the target - // and there was no rescaling in the last upscaleForbiddenWindow. - if desiredReplicas > currentReplicas && hpa.Status.LastScaleTime.Add(a.upscaleForbiddenWindow).Before(timestamp) { + // Going up only if the usage ratio increased significantly above the target. + if desiredReplicas > currentReplicas { return true } diff --git a/pkg/controller/podautoscaler/horizontal_test.go b/pkg/controller/podautoscaler/horizontal_test.go index 1a11becb2f7..dc4015b08b4 100644 --- a/pkg/controller/podautoscaler/horizontal_test.go +++ b/pkg/controller/podautoscaler/horizontal_test.go @@ -95,23 +95,23 @@ type testCase struct { minReplicas int32 maxReplicas int32 initialReplicas int32 - desiredReplicas int32 // CPU target utilization as a percentage of the requested resources. - CPUTarget int32 - CPUCurrent int32 - verifyCPUCurrent bool - reportedLevels []uint64 - reportedCPURequests []resource.Quantity - reportedPodReadiness []v1.ConditionStatus - reportedPodPhase []v1.PodPhase - scaleUpdated bool - statusUpdated bool - eventCreated bool - verifyEvents bool - useMetricsAPI bool - metricsTarget []autoscalingv2.MetricSpec - expectedConditions []autoscalingv1.HorizontalPodAutoscalerCondition + CPUTarget int32 + CPUCurrent int32 + verifyCPUCurrent bool + reportedLevels []uint64 + reportedCPURequests []resource.Quantity + reportedPodReadiness []v1.ConditionStatus + reportedPodPhase []v1.PodPhase + scaleUpdated bool + statusUpdated bool + eventCreated bool + verifyEvents bool + useMetricsAPI bool + metricsTarget []autoscalingv2.MetricSpec + expectedDesiredReplicas int32 + expectedConditions []autoscalingv1.HorizontalPodAutoscalerCondition // Channel with names of HPA objects which we have reconciled. processed chan string @@ -316,7 +316,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.HorizontalPodAutoscaler) assert.Equal(t, namespace, obj.Namespace, "the HPA namespace should be as expected") assert.Equal(t, hpaName, obj.Name, "the HPA name should be as expected") - assert.Equal(t, tc.desiredReplicas, obj.Status.DesiredReplicas, "the desired replica count reported in the object status should be as expected") + assert.Equal(t, tc.expectedDesiredReplicas, obj.Status.DesiredReplicas, "the desired replica count reported in the object status should be as expected") if tc.verifyCPUCurrent { if assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage, "the reported CPU utilization percentage should be non-nil") { assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage, "the report CPU utilization percentage should be as expected") @@ -411,7 +411,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale) replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas - assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the RC should be as expected") + assert.Equal(t, tc.expectedDesiredReplicas, replicas, "the replica count of the RC should be as expected") tc.scaleUpdated = true return true, obj, nil }) @@ -422,7 +422,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale) replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas - assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the deployment should be as expected") + assert.Equal(t, tc.expectedDesiredReplicas, replicas, "the replica count of the deployment should be as expected") tc.scaleUpdated = true return true, obj, nil }) @@ -433,7 +433,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale) replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas - assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the replicaset should be as expected") + assert.Equal(t, tc.expectedDesiredReplicas, replicas, "the replica count of the replicaset should be as expected") tc.scaleUpdated = true return true, obj, nil }) @@ -583,10 +583,10 @@ func (tc *testCase) verifyResults(t *testing.T) { tc.Lock() defer tc.Unlock() - assert.Equal(t, tc.initialReplicas != tc.desiredReplicas, tc.scaleUpdated, "the scale should only be updated if we expected a change in replicas") + assert.Equal(t, tc.initialReplicas != tc.expectedDesiredReplicas, tc.scaleUpdated, "the scale should only be updated if we expected a change in replicas") assert.True(t, tc.statusUpdated, "the status should have been updated") if tc.verifyEvents { - assert.Equal(t, tc.initialReplicas != tc.desiredReplicas, tc.eventCreated, "an event should have been created only if we expected a change in replicas") + assert.Equal(t, tc.initialReplicas != tc.expectedDesiredReplicas, tc.eventCreated, "an event should have been created only if we expected a change in replicas") } } @@ -622,11 +622,11 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform if tc.verifyEvents { switch obj.Reason { case "SuccessfulRescale": - assert.Equal(t, fmt.Sprintf("New size: %d; reason: cpu resource utilization (percentage of request) above target", tc.desiredReplicas), obj.Message) + assert.Equal(t, fmt.Sprintf("New size: %d; reason: cpu resource utilization (percentage of request) above target", tc.expectedDesiredReplicas), obj.Message) case "DesiredReplicasComputed": assert.Equal(t, fmt.Sprintf( "Computed the desired num of replicas: %d (avgCPUutil: %d, current replicas: %d)", - tc.desiredReplicas, + tc.expectedDesiredReplicas, (int64(tc.reportedLevels[0])*100)/tc.reportedCPURequests[0].MilliValue(), tc.initialReplicas), obj.Message) default: assert.False(t, true, fmt.Sprintf("Unexpected event: %s / %s", obj.Reason, obj.Message)) @@ -643,7 +643,6 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform } informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc()) - defaultUpscaleForbiddenWindow := 3 * time.Minute defaultDownscaleForbiddenWindow := 5 * time.Minute hpaController := NewHorizontalController( @@ -654,7 +653,6 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform replicaCalc, informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(), controller.NoResyncPeriodFunc(), - defaultUpscaleForbiddenWindow, defaultDownscaleForbiddenWindow, ) hpaController.hpaListerSynced = alwaysReady @@ -688,49 +686,49 @@ func (tc *testCase) runTest(t *testing.T) { func TestScaleUp(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 3, - desiredReplicas: 5, - CPUTarget: 30, - verifyCPUCurrent: true, - reportedLevels: []uint64{300, 500, 700}, - reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, - useMetricsAPI: true, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 3, + expectedDesiredReplicas: 5, + CPUTarget: 30, + verifyCPUCurrent: true, + reportedLevels: []uint64{300, 500, 700}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsAPI: true, } tc.runTest(t) } func TestScaleUpUnreadyLessScale(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 3, - desiredReplicas: 4, - CPUTarget: 30, - CPUCurrent: 60, - verifyCPUCurrent: true, - reportedLevels: []uint64{300, 500, 700}, - reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, - reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue}, - useMetricsAPI: true, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 3, + expectedDesiredReplicas: 4, + CPUTarget: 30, + CPUCurrent: 60, + verifyCPUCurrent: true, + reportedLevels: []uint64{300, 500, 700}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue}, + useMetricsAPI: true, } tc.runTest(t) } func TestScaleUpUnreadyNoScale(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 3, - desiredReplicas: 3, - CPUTarget: 30, - CPUCurrent: 40, - verifyCPUCurrent: true, - reportedLevels: []uint64{400, 500, 700}, - reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, - reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse}, - useMetricsAPI: true, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 3, + expectedDesiredReplicas: 3, + CPUTarget: 30, + CPUCurrent: 40, + verifyCPUCurrent: true, + reportedLevels: []uint64{400, 500, 700}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse}, + useMetricsAPI: true, expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{ Type: autoscalingv2.AbleToScale, Status: v1.ConditionTrue, @@ -742,33 +740,33 @@ func TestScaleUpUnreadyNoScale(t *testing.T) { func TestScaleUpIgnoresFailedPods(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 2, - desiredReplicas: 4, - CPUTarget: 30, - CPUCurrent: 60, - verifyCPUCurrent: true, - reportedLevels: []uint64{500, 700}, - reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, - reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse}, - reportedPodPhase: []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodFailed, v1.PodFailed}, - useMetricsAPI: true, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 2, + expectedDesiredReplicas: 4, + CPUTarget: 30, + CPUCurrent: 60, + verifyCPUCurrent: true, + reportedLevels: []uint64{500, 700}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse}, + reportedPodPhase: []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodFailed, v1.PodFailed}, + useMetricsAPI: true, } tc.runTest(t) } func TestScaleUpDeployment(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 3, - desiredReplicas: 5, - CPUTarget: 30, - verifyCPUCurrent: true, - reportedLevels: []uint64{300, 500, 700}, - reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, - useMetricsAPI: true, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 3, + expectedDesiredReplicas: 5, + CPUTarget: 30, + verifyCPUCurrent: true, + reportedLevels: []uint64{300, 500, 700}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsAPI: true, resource: &fakeResource{ name: "test-dep", apiVersion: "extensions/v1beta1", @@ -780,15 +778,15 @@ func TestScaleUpDeployment(t *testing.T) { func TestScaleUpReplicaSet(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 3, - desiredReplicas: 5, - CPUTarget: 30, - verifyCPUCurrent: true, - reportedLevels: []uint64{300, 500, 700}, - reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, - useMetricsAPI: true, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 3, + expectedDesiredReplicas: 5, + CPUTarget: 30, + verifyCPUCurrent: true, + reportedLevels: []uint64{300, 500, 700}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsAPI: true, resource: &fakeResource{ name: "test-replicaset", apiVersion: "extensions/v1beta1", @@ -800,11 +798,11 @@ func TestScaleUpReplicaSet(t *testing.T) { func TestScaleUpCM(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 3, - desiredReplicas: 4, - CPUTarget: 0, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 3, + expectedDesiredReplicas: 4, + CPUTarget: 0, metricsTarget: []autoscalingv2.MetricSpec{ { Type: autoscalingv2.PodsMetricSourceType, @@ -822,11 +820,11 @@ func TestScaleUpCM(t *testing.T) { func TestScaleUpCMUnreadyLessScale(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 3, - desiredReplicas: 4, - CPUTarget: 0, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 3, + expectedDesiredReplicas: 4, + CPUTarget: 0, metricsTarget: []autoscalingv2.MetricSpec{ { Type: autoscalingv2.PodsMetricSourceType, @@ -845,11 +843,11 @@ func TestScaleUpCMUnreadyLessScale(t *testing.T) { func TestScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 3, - desiredReplicas: 3, - CPUTarget: 0, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 3, + expectedDesiredReplicas: 3, + CPUTarget: 0, metricsTarget: []autoscalingv2.MetricSpec{ { Type: autoscalingv2.PodsMetricSourceType, @@ -873,11 +871,11 @@ func TestScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) { func TestScaleUpCMObject(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 3, - desiredReplicas: 4, - CPUTarget: 0, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 3, + expectedDesiredReplicas: 4, + CPUTarget: 0, metricsTarget: []autoscalingv2.MetricSpec{ { Type: autoscalingv2.ObjectMetricSourceType, @@ -899,10 +897,10 @@ func TestScaleUpCMObject(t *testing.T) { func TestScaleUpCMExternal(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 3, - desiredReplicas: 4, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 3, + expectedDesiredReplicas: 4, metricsTarget: []autoscalingv2.MetricSpec{ { Type: autoscalingv2.ExternalMetricSourceType, @@ -920,10 +918,10 @@ func TestScaleUpCMExternal(t *testing.T) { func TestScaleUpPerPodCMExternal(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 3, - desiredReplicas: 4, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 3, + expectedDesiredReplicas: 4, metricsTarget: []autoscalingv2.MetricSpec{ { Type: autoscalingv2.ExternalMetricSourceType, @@ -941,26 +939,26 @@ func TestScaleUpPerPodCMExternal(t *testing.T) { func TestScaleDown(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 5, - desiredReplicas: 3, - CPUTarget: 50, - verifyCPUCurrent: true, - reportedLevels: []uint64{100, 300, 500, 250, 250}, - reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, - useMetricsAPI: true, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 5, + expectedDesiredReplicas: 3, + CPUTarget: 50, + verifyCPUCurrent: true, + reportedLevels: []uint64{100, 300, 500, 250, 250}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsAPI: true, } tc.runTest(t) } func TestScaleDownCM(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 5, - desiredReplicas: 3, - CPUTarget: 0, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 5, + expectedDesiredReplicas: 3, + CPUTarget: 0, metricsTarget: []autoscalingv2.MetricSpec{ { Type: autoscalingv2.PodsMetricSourceType, @@ -978,11 +976,11 @@ func TestScaleDownCM(t *testing.T) { func TestScaleDownCMObject(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 5, - desiredReplicas: 3, - CPUTarget: 0, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 5, + expectedDesiredReplicas: 3, + CPUTarget: 0, metricsTarget: []autoscalingv2.MetricSpec{ { Type: autoscalingv2.ObjectMetricSourceType, @@ -1005,10 +1003,10 @@ func TestScaleDownCMObject(t *testing.T) { func TestScaleDownCMExternal(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 5, - desiredReplicas: 3, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 5, + expectedDesiredReplicas: 3, metricsTarget: []autoscalingv2.MetricSpec{ { Type: autoscalingv2.ExternalMetricSourceType, @@ -1026,10 +1024,10 @@ func TestScaleDownCMExternal(t *testing.T) { func TestScaleDownPerPodCMExternal(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 5, - desiredReplicas: 3, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 5, + expectedDesiredReplicas: 3, metricsTarget: []autoscalingv2.MetricSpec{ { Type: autoscalingv2.ExternalMetricSourceType, @@ -1047,49 +1045,49 @@ func TestScaleDownPerPodCMExternal(t *testing.T) { func TestScaleDownIgnoresUnreadyPods(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 5, - desiredReplicas: 2, - CPUTarget: 50, - CPUCurrent: 30, - verifyCPUCurrent: true, - reportedLevels: []uint64{100, 300, 500, 250, 250}, - reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, - useMetricsAPI: true, - reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse}, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 5, + expectedDesiredReplicas: 2, + CPUTarget: 50, + CPUCurrent: 30, + verifyCPUCurrent: true, + reportedLevels: []uint64{100, 300, 500, 250, 250}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsAPI: true, + reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse}, } tc.runTest(t) } func TestScaleDownIgnoresFailedPods(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 5, - desiredReplicas: 3, - CPUTarget: 50, - CPUCurrent: 28, - verifyCPUCurrent: true, - reportedLevels: []uint64{100, 300, 500, 250, 250}, - reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, - useMetricsAPI: true, - reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse}, - reportedPodPhase: []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodFailed, v1.PodFailed}, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 5, + expectedDesiredReplicas: 3, + CPUTarget: 50, + CPUCurrent: 28, + verifyCPUCurrent: true, + reportedLevels: []uint64{100, 300, 500, 250, 250}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsAPI: true, + reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse}, + reportedPodPhase: []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodFailed, v1.PodFailed}, } tc.runTest(t) } func TestTolerance(t *testing.T) { tc := testCase{ - minReplicas: 1, - maxReplicas: 5, - initialReplicas: 3, - desiredReplicas: 3, - CPUTarget: 100, - reportedLevels: []uint64{1010, 1030, 1020}, - reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")}, - useMetricsAPI: true, + minReplicas: 1, + maxReplicas: 5, + initialReplicas: 3, + expectedDesiredReplicas: 3, + CPUTarget: 100, + reportedLevels: []uint64{1010, 1030, 1020}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")}, + useMetricsAPI: true, expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{ Type: autoscalingv2.AbleToScale, Status: v1.ConditionTrue, @@ -1101,10 +1099,10 @@ func TestTolerance(t *testing.T) { func TestToleranceCM(t *testing.T) { tc := testCase{ - minReplicas: 1, - maxReplicas: 5, - initialReplicas: 3, - desiredReplicas: 3, + minReplicas: 1, + maxReplicas: 5, + initialReplicas: 3, + expectedDesiredReplicas: 3, metricsTarget: []autoscalingv2.MetricSpec{ { Type: autoscalingv2.PodsMetricSourceType, @@ -1127,10 +1125,10 @@ func TestToleranceCM(t *testing.T) { func TestToleranceCMObject(t *testing.T) { tc := testCase{ - minReplicas: 1, - maxReplicas: 5, - initialReplicas: 3, - desiredReplicas: 3, + minReplicas: 1, + maxReplicas: 5, + initialReplicas: 3, + expectedDesiredReplicas: 3, metricsTarget: []autoscalingv2.MetricSpec{ { Type: autoscalingv2.ObjectMetricSourceType, @@ -1158,10 +1156,10 @@ func TestToleranceCMObject(t *testing.T) { func TestToleranceCMExternal(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 4, - desiredReplicas: 4, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 4, + expectedDesiredReplicas: 4, metricsTarget: []autoscalingv2.MetricSpec{ { Type: autoscalingv2.ExternalMetricSourceType, @@ -1184,10 +1182,10 @@ func TestToleranceCMExternal(t *testing.T) { func TestTolerancePerPodCMExternal(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 4, - desiredReplicas: 4, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 4, + expectedDesiredReplicas: 4, metricsTarget: []autoscalingv2.MetricSpec{ { Type: autoscalingv2.ExternalMetricSourceType, @@ -1210,14 +1208,14 @@ func TestTolerancePerPodCMExternal(t *testing.T) { func TestMinReplicas(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 5, - initialReplicas: 3, - desiredReplicas: 2, - CPUTarget: 90, - reportedLevels: []uint64{10, 95, 10}, - reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")}, - useMetricsAPI: true, + minReplicas: 2, + maxReplicas: 5, + initialReplicas: 3, + expectedDesiredReplicas: 2, + CPUTarget: 90, + reportedLevels: []uint64{10, 95, 10}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")}, + useMetricsAPI: true, expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{ Type: autoscalingv2.ScalingLimited, Status: v1.ConditionTrue, @@ -1229,14 +1227,14 @@ func TestMinReplicas(t *testing.T) { func TestMinReplicasDesiredZero(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 5, - initialReplicas: 3, - desiredReplicas: 2, - CPUTarget: 90, - reportedLevels: []uint64{0, 0, 0}, - reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")}, - useMetricsAPI: true, + minReplicas: 2, + maxReplicas: 5, + initialReplicas: 3, + expectedDesiredReplicas: 2, + CPUTarget: 90, + reportedLevels: []uint64{0, 0, 0}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")}, + useMetricsAPI: true, expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{ Type: autoscalingv2.ScalingLimited, Status: v1.ConditionTrue, @@ -1248,14 +1246,14 @@ func TestMinReplicasDesiredZero(t *testing.T) { func TestZeroReplicas(t *testing.T) { tc := testCase{ - minReplicas: 3, - maxReplicas: 5, - initialReplicas: 0, - desiredReplicas: 0, - CPUTarget: 90, - reportedLevels: []uint64{}, - reportedCPURequests: []resource.Quantity{}, - useMetricsAPI: true, + minReplicas: 3, + maxReplicas: 5, + initialReplicas: 0, + expectedDesiredReplicas: 0, + CPUTarget: 90, + reportedLevels: []uint64{}, + reportedCPURequests: []resource.Quantity{}, + useMetricsAPI: true, expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{ {Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"}, {Type: autoscalingv1.ScalingActive, Status: v1.ConditionFalse, Reason: "ScalingDisabled"}, @@ -1266,14 +1264,14 @@ func TestZeroReplicas(t *testing.T) { func TestTooFewReplicas(t *testing.T) { tc := testCase{ - minReplicas: 3, - maxReplicas: 5, - initialReplicas: 2, - desiredReplicas: 3, - CPUTarget: 90, - reportedLevels: []uint64{}, - reportedCPURequests: []resource.Quantity{}, - useMetricsAPI: true, + minReplicas: 3, + maxReplicas: 5, + initialReplicas: 2, + expectedDesiredReplicas: 3, + CPUTarget: 90, + reportedLevels: []uint64{}, + reportedCPURequests: []resource.Quantity{}, + useMetricsAPI: true, expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{ {Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededRescale"}, }, @@ -1283,14 +1281,14 @@ func TestTooFewReplicas(t *testing.T) { func TestTooManyReplicas(t *testing.T) { tc := testCase{ - minReplicas: 3, - maxReplicas: 5, - initialReplicas: 10, - desiredReplicas: 5, - CPUTarget: 90, - reportedLevels: []uint64{}, - reportedCPURequests: []resource.Quantity{}, - useMetricsAPI: true, + minReplicas: 3, + maxReplicas: 5, + initialReplicas: 10, + expectedDesiredReplicas: 5, + CPUTarget: 90, + reportedLevels: []uint64{}, + reportedCPURequests: []resource.Quantity{}, + useMetricsAPI: true, expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{ {Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededRescale"}, }, @@ -1300,14 +1298,14 @@ func TestTooManyReplicas(t *testing.T) { func TestMaxReplicas(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 5, - initialReplicas: 3, - desiredReplicas: 5, - CPUTarget: 90, - reportedLevels: []uint64{8000, 9500, 1000}, - reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")}, - useMetricsAPI: true, + minReplicas: 2, + maxReplicas: 5, + initialReplicas: 3, + expectedDesiredReplicas: 5, + CPUTarget: 90, + reportedLevels: []uint64{8000, 9500, 1000}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")}, + useMetricsAPI: true, expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{ Type: autoscalingv2.ScalingLimited, Status: v1.ConditionTrue, @@ -1319,14 +1317,14 @@ func TestMaxReplicas(t *testing.T) { func TestSuperfluousMetrics(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 4, - desiredReplicas: 6, - CPUTarget: 100, - reportedLevels: []uint64{4000, 9500, 3000, 7000, 3200, 2000}, - reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, - useMetricsAPI: true, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 4, + expectedDesiredReplicas: 6, + CPUTarget: 100, + reportedLevels: []uint64{4000, 9500, 3000, 7000, 3200, 2000}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsAPI: true, expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{ Type: autoscalingv2.ScalingLimited, Status: v1.ConditionTrue, @@ -1338,28 +1336,28 @@ func TestSuperfluousMetrics(t *testing.T) { func TestMissingMetrics(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 4, - desiredReplicas: 3, - CPUTarget: 100, - reportedLevels: []uint64{400, 95}, - reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, - useMetricsAPI: true, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 4, + expectedDesiredReplicas: 3, + CPUTarget: 100, + reportedLevels: []uint64{400, 95}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsAPI: true, } tc.runTest(t) } func TestEmptyMetrics(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 4, - desiredReplicas: 4, - CPUTarget: 100, - reportedLevels: []uint64{}, - reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, - useMetricsAPI: true, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 4, + expectedDesiredReplicas: 4, + CPUTarget: 100, + reportedLevels: []uint64{}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsAPI: true, expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{ {Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"}, {Type: autoscalingv1.ScalingActive, Status: v1.ConditionFalse, Reason: "FailedGetResourceMetric"}, @@ -1370,14 +1368,14 @@ func TestEmptyMetrics(t *testing.T) { func TestEmptyCPURequest(t *testing.T) { tc := testCase{ - minReplicas: 1, - maxReplicas: 5, - initialReplicas: 1, - desiredReplicas: 1, - CPUTarget: 100, - reportedLevels: []uint64{200}, - reportedCPURequests: []resource.Quantity{}, - useMetricsAPI: true, + minReplicas: 1, + maxReplicas: 5, + initialReplicas: 1, + expectedDesiredReplicas: 1, + CPUTarget: 100, + reportedLevels: []uint64{200}, + reportedCPURequests: []resource.Quantity{}, + useMetricsAPI: true, expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{ {Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"}, {Type: autoscalingv1.ScalingActive, Status: v1.ConditionFalse, Reason: "FailedGetResourceMetric"}, @@ -1388,30 +1386,30 @@ func TestEmptyCPURequest(t *testing.T) { func TestEventCreated(t *testing.T) { tc := testCase{ - minReplicas: 1, - maxReplicas: 5, - initialReplicas: 1, - desiredReplicas: 2, - CPUTarget: 50, - reportedLevels: []uint64{200}, - reportedCPURequests: []resource.Quantity{resource.MustParse("0.2")}, - verifyEvents: true, - useMetricsAPI: true, + minReplicas: 1, + maxReplicas: 5, + initialReplicas: 1, + expectedDesiredReplicas: 2, + CPUTarget: 50, + reportedLevels: []uint64{200}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.2")}, + verifyEvents: true, + useMetricsAPI: true, } tc.runTest(t) } func TestEventNotCreated(t *testing.T) { tc := testCase{ - minReplicas: 1, - maxReplicas: 5, - initialReplicas: 2, - desiredReplicas: 2, - CPUTarget: 50, - reportedLevels: []uint64{200, 200}, - reportedCPURequests: []resource.Quantity{resource.MustParse("0.4"), resource.MustParse("0.4")}, - verifyEvents: true, - useMetricsAPI: true, + minReplicas: 1, + maxReplicas: 5, + initialReplicas: 2, + expectedDesiredReplicas: 2, + CPUTarget: 50, + reportedLevels: []uint64{200, 200}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.4"), resource.MustParse("0.4")}, + verifyEvents: true, + useMetricsAPI: true, expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{ Type: autoscalingv2.AbleToScale, Status: v1.ConditionTrue, @@ -1423,28 +1421,28 @@ func TestEventNotCreated(t *testing.T) { func TestMissingReports(t *testing.T) { tc := testCase{ - minReplicas: 1, - maxReplicas: 5, - initialReplicas: 4, - desiredReplicas: 2, - CPUTarget: 50, - reportedLevels: []uint64{200}, - reportedCPURequests: []resource.Quantity{resource.MustParse("0.2")}, - useMetricsAPI: true, + minReplicas: 1, + maxReplicas: 5, + initialReplicas: 4, + expectedDesiredReplicas: 2, + CPUTarget: 50, + reportedLevels: []uint64{200}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.2")}, + useMetricsAPI: true, } tc.runTest(t) } func TestUpscaleCap(t *testing.T) { tc := testCase{ - minReplicas: 1, - maxReplicas: 100, - initialReplicas: 3, - desiredReplicas: 24, - CPUTarget: 10, - reportedLevels: []uint64{100, 200, 300}, - reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")}, - useMetricsAPI: true, + minReplicas: 1, + maxReplicas: 100, + initialReplicas: 3, + expectedDesiredReplicas: 24, + CPUTarget: 10, + reportedLevels: []uint64{100, 200, 300}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")}, + useMetricsAPI: true, expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{ Type: autoscalingv2.ScalingLimited, Status: v1.ConditionTrue, @@ -1459,12 +1457,12 @@ func TestUpscaleCapGreaterThanMaxReplicas(t *testing.T) { minReplicas: 1, maxReplicas: 20, initialReplicas: 3, - // desiredReplicas would be 24 without maxReplicas - desiredReplicas: 20, - CPUTarget: 10, - reportedLevels: []uint64{100, 200, 300}, - reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")}, - useMetricsAPI: true, + // expectedDesiredReplicas would be 24 without maxReplicas + expectedDesiredReplicas: 20, + CPUTarget: 10, + reportedLevels: []uint64{100, 200, 300}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")}, + useMetricsAPI: true, expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{ Type: autoscalingv2.ScalingLimited, Status: v1.ConditionTrue, @@ -1476,14 +1474,14 @@ func TestUpscaleCapGreaterThanMaxReplicas(t *testing.T) { func TestConditionInvalidSelectorMissing(t *testing.T) { tc := testCase{ - minReplicas: 1, - maxReplicas: 100, - initialReplicas: 3, - desiredReplicas: 3, - CPUTarget: 10, - reportedLevels: []uint64{100, 200, 300}, - reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")}, - useMetricsAPI: true, + minReplicas: 1, + maxReplicas: 100, + initialReplicas: 3, + expectedDesiredReplicas: 3, + CPUTarget: 10, + reportedLevels: []uint64{100, 200, 300}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")}, + useMetricsAPI: true, expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{ { Type: autoscalingv1.AbleToScale, @@ -1521,14 +1519,14 @@ func TestConditionInvalidSelectorMissing(t *testing.T) { func TestConditionInvalidSelectorUnparsable(t *testing.T) { tc := testCase{ - minReplicas: 1, - maxReplicas: 100, - initialReplicas: 3, - desiredReplicas: 3, - CPUTarget: 10, - reportedLevels: []uint64{100, 200, 300}, - reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")}, - useMetricsAPI: true, + minReplicas: 1, + maxReplicas: 100, + initialReplicas: 3, + expectedDesiredReplicas: 3, + CPUTarget: 10, + reportedLevels: []uint64{100, 200, 300}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")}, + useMetricsAPI: true, expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{ { Type: autoscalingv1.AbleToScale, @@ -1605,14 +1603,14 @@ func TestConditionFailedGetMetrics(t *testing.T) { for reason, specs := range metricsTargets { tc := testCase{ - minReplicas: 1, - maxReplicas: 100, - initialReplicas: 3, - desiredReplicas: 3, - CPUTarget: 10, - reportedLevels: []uint64{100, 200, 300}, - reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")}, - useMetricsAPI: true, + minReplicas: 1, + maxReplicas: 100, + initialReplicas: 3, + expectedDesiredReplicas: 3, + CPUTarget: 10, + reportedLevels: []uint64{100, 200, 300}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")}, + useMetricsAPI: true, } _, testMetricsClient, testCMClient, testEMClient, _ := tc.prepareTestClient(t) tc.testMetricsClient = testMetricsClient @@ -1645,11 +1643,11 @@ func TestConditionFailedGetMetrics(t *testing.T) { func TestConditionInvalidSourceType(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 3, - desiredReplicas: 3, - CPUTarget: 0, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 3, + expectedDesiredReplicas: 3, + CPUTarget: 0, metricsTarget: []autoscalingv2.MetricSpec{ { Type: "CheddarCheese", @@ -1674,14 +1672,14 @@ func TestConditionInvalidSourceType(t *testing.T) { func TestConditionFailedGetScale(t *testing.T) { tc := testCase{ - minReplicas: 1, - maxReplicas: 100, - initialReplicas: 3, - desiredReplicas: 3, - CPUTarget: 10, - reportedLevels: []uint64{100, 200, 300}, - reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")}, - useMetricsAPI: true, + minReplicas: 1, + maxReplicas: 100, + initialReplicas: 3, + expectedDesiredReplicas: 3, + CPUTarget: 10, + reportedLevels: []uint64{100, 200, 300}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")}, + useMetricsAPI: true, expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{ { Type: autoscalingv1.AbleToScale, @@ -1703,14 +1701,14 @@ func TestConditionFailedGetScale(t *testing.T) { func TestConditionFailedUpdateScale(t *testing.T) { tc := testCase{ - minReplicas: 1, - maxReplicas: 5, - initialReplicas: 3, - desiredReplicas: 3, - CPUTarget: 100, - reportedLevels: []uint64{150, 150, 150}, - reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")}, - useMetricsAPI: true, + minReplicas: 1, + maxReplicas: 5, + initialReplicas: 3, + expectedDesiredReplicas: 3, + CPUTarget: 100, + reportedLevels: []uint64{150, 150, 150}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")}, + useMetricsAPI: true, expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{ Type: autoscalingv2.AbleToScale, Status: v1.ConditionFalse, @@ -1728,16 +1726,88 @@ func TestConditionFailedUpdateScale(t *testing.T) { tc.runTest(t) } -func TestBackoffUpscale(t *testing.T) { +func NoTestBackoffUpscale(t *testing.T) { time := metav1.Time{Time: time.Now()} tc := testCase{ - minReplicas: 1, - maxReplicas: 5, - initialReplicas: 3, - desiredReplicas: 3, - CPUTarget: 100, - reportedLevels: []uint64{150, 150, 150}, - reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")}, + minReplicas: 1, + maxReplicas: 5, + initialReplicas: 3, + expectedDesiredReplicas: 3, + CPUTarget: 100, + reportedLevels: []uint64{150, 150, 150}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")}, + useMetricsAPI: true, + lastScaleTime: &time, + expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{ + Type: autoscalingv2.AbleToScale, + Status: v1.ConditionTrue, + Reason: "ReadyForNewScale", + }, autoscalingv2.HorizontalPodAutoscalerCondition{ + Type: autoscalingv2.AbleToScale, + Status: v1.ConditionTrue, + Reason: "SucceededRescale", + }), + } + tc.runTest(t) +} + +func TestNoBackoffUpscaleCM(t *testing.T) { + time := metav1.Time{Time: time.Now()} + tc := testCase{ + minReplicas: 1, + maxReplicas: 5, + initialReplicas: 3, + expectedDesiredReplicas: 4, + CPUTarget: 0, + metricsTarget: []autoscalingv2.MetricSpec{ + { + Type: autoscalingv2.PodsMetricSourceType, + Pods: &autoscalingv2.PodsMetricSource{ + MetricName: "qps", + TargetAverageValue: resource.MustParse("15.0"), + }, + }, + }, + reportedLevels: []uint64{20000, 10000, 30000}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + //useMetricsAPI: true, + lastScaleTime: &time, + expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{ + Type: autoscalingv2.AbleToScale, + Status: v1.ConditionTrue, + Reason: "ReadyForNewScale", + }, autoscalingv2.HorizontalPodAutoscalerCondition{ + Type: autoscalingv2.AbleToScale, + Status: v1.ConditionTrue, + Reason: "SucceededRescale", + }, autoscalingv2.HorizontalPodAutoscalerCondition{ + Type: autoscalingv2.ScalingLimited, + Status: v1.ConditionFalse, + Reason: "DesiredWithinRange", + }), + } + tc.runTest(t) +} + +func TestNoBackoffUpscaleCMNoBackoffCpu(t *testing.T) { + time := metav1.Time{Time: time.Now()} + tc := testCase{ + minReplicas: 1, + maxReplicas: 5, + initialReplicas: 3, + expectedDesiredReplicas: 5, + CPUTarget: 10, + metricsTarget: []autoscalingv2.MetricSpec{ + { + Type: autoscalingv2.PodsMetricSourceType, + Pods: &autoscalingv2.PodsMetricSource{ + MetricName: "qps", + TargetAverageValue: resource.MustParse("15.0"), + }, + }, + }, + reportedLevels: []uint64{20000, 10000, 30000}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, useMetricsAPI: true, lastScaleTime: &time, expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{ @@ -1746,8 +1816,12 @@ func TestBackoffUpscale(t *testing.T) { Reason: "ReadyForNewScale", }, autoscalingv2.HorizontalPodAutoscalerCondition{ Type: autoscalingv2.AbleToScale, - Status: v1.ConditionFalse, - Reason: "BackoffBoth", + Status: v1.ConditionTrue, + Reason: "SucceededRescale", + }, autoscalingv2.HorizontalPodAutoscalerCondition{ + Type: autoscalingv2.ScalingLimited, + Status: v1.ConditionTrue, + Reason: "TooManyReplicas", }), } tc.runTest(t) @@ -1756,15 +1830,15 @@ func TestBackoffUpscale(t *testing.T) { func TestBackoffDownscale(t *testing.T) { time := metav1.Time{Time: time.Now().Add(-4 * time.Minute)} tc := testCase{ - minReplicas: 1, - maxReplicas: 5, - initialReplicas: 4, - desiredReplicas: 4, - CPUTarget: 100, - reportedLevels: []uint64{50, 50, 50}, - reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")}, - useMetricsAPI: true, - lastScaleTime: &time, + minReplicas: 1, + maxReplicas: 5, + initialReplicas: 4, + expectedDesiredReplicas: 4, + CPUTarget: 100, + reportedLevels: []uint64{50, 50, 50}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")}, + useMetricsAPI: true, + lastScaleTime: &time, expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{ Type: autoscalingv2.AbleToScale, Status: v1.ConditionTrue, @@ -1803,11 +1877,11 @@ func TestComputedToleranceAlgImplementation(t *testing.T) { // To breach tolerance we will create a utilization ratio difference of tolerance to usageRatioToleranceValue) tc := testCase{ - minReplicas: 0, - maxReplicas: 1000, - initialReplicas: startPods, - desiredReplicas: finalPods, - CPUTarget: finalCPUPercentTarget, + minReplicas: 0, + maxReplicas: 1000, + initialReplicas: startPods, + expectedDesiredReplicas: finalPods, + CPUTarget: finalCPUPercentTarget, reportedLevels: []uint64{ totalUsedCPUOfAllPods / 10, totalUsedCPUOfAllPods / 10, @@ -1843,7 +1917,7 @@ func TestComputedToleranceAlgImplementation(t *testing.T) { finalCPUPercentTarget = int32(target * 100) tc.CPUTarget = finalCPUPercentTarget tc.initialReplicas = startPods - tc.desiredReplicas = startPods + tc.expectedDesiredReplicas = startPods tc.expectedConditions = statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{ Type: autoscalingv2.AbleToScale, Status: v1.ConditionTrue, @@ -1855,15 +1929,15 @@ func TestComputedToleranceAlgImplementation(t *testing.T) { func TestScaleUpRCImmediately(t *testing.T) { time := metav1.Time{Time: time.Now()} tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 1, - desiredReplicas: 2, - verifyCPUCurrent: false, - reportedLevels: []uint64{0, 0, 0, 0}, - reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, - useMetricsAPI: true, - lastScaleTime: &time, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 1, + expectedDesiredReplicas: 2, + verifyCPUCurrent: false, + reportedLevels: []uint64{0, 0, 0, 0}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsAPI: true, + lastScaleTime: &time, expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{ {Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededRescale"}, }, @@ -1874,15 +1948,15 @@ func TestScaleUpRCImmediately(t *testing.T) { func TestScaleDownRCImmediately(t *testing.T) { time := metav1.Time{Time: time.Now()} tc := testCase{ - minReplicas: 2, - maxReplicas: 5, - initialReplicas: 6, - desiredReplicas: 5, - CPUTarget: 50, - reportedLevels: []uint64{8000, 9500, 1000}, - reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")}, - useMetricsAPI: true, - lastScaleTime: &time, + minReplicas: 2, + maxReplicas: 5, + initialReplicas: 6, + expectedDesiredReplicas: 5, + CPUTarget: 50, + reportedLevels: []uint64{8000, 9500, 1000}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")}, + useMetricsAPI: true, + lastScaleTime: &time, expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{ {Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededRescale"}, }, @@ -1892,17 +1966,17 @@ func TestScaleDownRCImmediately(t *testing.T) { func TestAvoidUncessaryUpdates(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 3, - desiredReplicas: 3, - CPUTarget: 30, - CPUCurrent: 40, - verifyCPUCurrent: true, - reportedLevels: []uint64{400, 500, 700}, - reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, - reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse}, - useMetricsAPI: true, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 3, + expectedDesiredReplicas: 3, + CPUTarget: 30, + CPUCurrent: 40, + verifyCPUCurrent: true, + reportedLevels: []uint64{400, 500, 700}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse}, + useMetricsAPI: true, } testClient, _, _, _, _ := tc.prepareTestClient(t) tc.testClient = testClient @@ -1962,7 +2036,7 @@ func TestAvoidUncessaryUpdates(t *testing.T) { func TestConvertDesiredReplicasWithRules(t *testing.T) { conversionTestCases := []struct { currentReplicas int32 - desiredReplicas int32 + expectedDesiredReplicas int32 hpaMinReplicas int32 hpaMaxReplicas int32 expectedConvertedDesiredReplicas int32 @@ -1971,7 +2045,7 @@ func TestConvertDesiredReplicasWithRules(t *testing.T) { }{ { currentReplicas: 5, - desiredReplicas: 7, + expectedDesiredReplicas: 7, hpaMinReplicas: 3, hpaMaxReplicas: 8, expectedConvertedDesiredReplicas: 7, @@ -1980,7 +2054,7 @@ func TestConvertDesiredReplicasWithRules(t *testing.T) { }, { currentReplicas: 3, - desiredReplicas: 1, + expectedDesiredReplicas: 1, hpaMinReplicas: 2, hpaMaxReplicas: 8, expectedConvertedDesiredReplicas: 2, @@ -1989,7 +2063,7 @@ func TestConvertDesiredReplicasWithRules(t *testing.T) { }, { currentReplicas: 1, - desiredReplicas: 0, + expectedDesiredReplicas: 0, hpaMinReplicas: 0, hpaMaxReplicas: 10, expectedConvertedDesiredReplicas: 1, @@ -1998,7 +2072,7 @@ func TestConvertDesiredReplicasWithRules(t *testing.T) { }, { currentReplicas: 20, - desiredReplicas: 1000, + expectedDesiredReplicas: 1000, hpaMinReplicas: 1, hpaMaxReplicas: 10, expectedConvertedDesiredReplicas: 10, @@ -2007,7 +2081,7 @@ func TestConvertDesiredReplicasWithRules(t *testing.T) { }, { currentReplicas: 3, - desiredReplicas: 1000, + expectedDesiredReplicas: 1000, hpaMinReplicas: 1, hpaMaxReplicas: 2000, expectedConvertedDesiredReplicas: calculateScaleUpLimit(3), @@ -2018,7 +2092,7 @@ func TestConvertDesiredReplicasWithRules(t *testing.T) { for _, ctc := range conversionTestCases { actualConvertedDesiredReplicas, actualCondition, _ := convertDesiredReplicasWithRules( - ctc.currentReplicas, ctc.desiredReplicas, ctc.hpaMinReplicas, ctc.hpaMaxReplicas, + ctc.currentReplicas, ctc.expectedDesiredReplicas, ctc.hpaMinReplicas, ctc.hpaMaxReplicas, ) assert.Equal(t, ctc.expectedConvertedDesiredReplicas, actualConvertedDesiredReplicas, ctc.annotation) diff --git a/pkg/controller/podautoscaler/legacy_horizontal_test.go b/pkg/controller/podautoscaler/legacy_horizontal_test.go index b6ffc18c312..01fc6986234 100644 --- a/pkg/controller/podautoscaler/legacy_horizontal_test.go +++ b/pkg/controller/podautoscaler/legacy_horizontal_test.go @@ -491,7 +491,6 @@ func (tc *legacyTestCase) runTest(t *testing.T) { } informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc()) - defaultUpscaleForbiddenWindow := 3 * time.Minute defaultDownscaleForbiddenWindow := 5 * time.Minute hpaController := NewHorizontalController( @@ -502,7 +501,6 @@ func (tc *legacyTestCase) runTest(t *testing.T) { replicaCalc, informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(), controller.NoResyncPeriodFunc(), - defaultUpscaleForbiddenWindow, defaultDownscaleForbiddenWindow, ) hpaController.hpaListerSynced = alwaysReady diff --git a/pkg/controller/podautoscaler/replica_calculator.go b/pkg/controller/podautoscaler/replica_calculator.go index 7dc26f9a738..304c2afba82 100644 --- a/pkg/controller/podautoscaler/replica_calculator.go +++ b/pkg/controller/podautoscaler/replica_calculator.go @@ -35,6 +35,10 @@ const ( // defaultTestingTolerance is default value for calculating when to // scale up/scale down. defaultTestingTolerance = 0.1 + + // Pod begins existence as unready. If pod is unready and timestamp of last pod readiness change is + // less than maxDelayOfInitialReadinessStatus after pod start we assume it has never been ready. + maxDelayOfInitialReadinessStatus = 10 * time.Second ) type ReplicaCalculator struct { @@ -205,7 +209,7 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet missingPods := sets.NewString() for _, pod := range podList.Items { - if pod.Status.Phase != v1.PodRunning || !podutil.IsPodReady(&pod) { + if pod.Status.Phase != v1.PodRunning || !hasPodBeenReadyBefore(&pod) { // save this pod name for later, but pretend it doesn't exist for now unreadyPods.Insert(pod.Name) delete(metrics, pod.Name) @@ -381,3 +385,22 @@ func (c *ReplicaCalculator) GetExternalPerPodMetricReplicas(currentReplicas int3 utilization = int64(math.Ceil(float64(utilization) / float64(currentReplicas))) return replicaCount, utilization, timestamp, nil } + +// hasPodBeenReadyBefore returns true if the pod is ready or if it's not ready +func hasPodBeenReadyBefore(pod *v1.Pod) bool { + _, readyCondition := podutil.GetPodCondition(&pod.Status, v1.PodReady) + if readyCondition == nil { + return false + } + if readyCondition.Status == v1.ConditionTrue { + return true + } + lastReady := readyCondition.LastTransitionTime.Time + if pod.Status.StartTime == nil { + return false + } + started := pod.Status.StartTime.Time + // If last status change was longer than maxDelayOfInitialReadinessStatus after the pod was + // created assume it was ready in the past. + return lastReady.After(started.Add(maxDelayOfInitialReadinessStatus)) +} diff --git a/pkg/controller/podautoscaler/replica_calculator_test.go b/pkg/controller/podautoscaler/replica_calculator_test.go index 999070ba440..0dc43b0696a 100644 --- a/pkg/controller/podautoscaler/replica_calculator_test.go +++ b/pkg/controller/podautoscaler/replica_calculator_test.go @@ -1069,4 +1069,76 @@ func TestReplicaCalcComputedToleranceAlgImplementation(t *testing.T) { tc.runTest(t) } +func TestHasPodBeenReadyBefore(t *testing.T) { + tests := []struct { + name string + conditions []v1.PodCondition + started time.Time + expected bool + }{ + { + "initially unready", + []v1.PodCondition{ + { + Type: v1.PodReady, + LastTransitionTime: metav1.Time{ + Time: metav1.Date(2018, 7, 25, 17, 10, 0, 0, time.UTC).Time, + }, + Status: v1.ConditionFalse, + }, + }, + metav1.Date(2018, 7, 25, 17, 10, 0, 0, time.UTC).Time, + false, + }, + { + "currently unready", + []v1.PodCondition{ + { + Type: v1.PodReady, + LastTransitionTime: metav1.Time{ + Time: metav1.Date(2018, 7, 25, 17, 10, 0, 0, time.UTC).Time, + }, + Status: v1.ConditionFalse, + }, + }, + metav1.Date(2018, 7, 25, 17, 0, 0, 0, time.UTC).Time, + true, + }, + { + "currently ready", + []v1.PodCondition{ + { + Type: v1.PodReady, + LastTransitionTime: metav1.Time{ + Time: metav1.Date(2018, 7, 25, 17, 10, 0, 0, time.UTC).Time, + }, + Status: v1.ConditionTrue, + }, + }, + metav1.Date(2018, 7, 25, 17, 10, 0, 0, time.UTC).Time, + true, + }, + { + "no ready status", + []v1.PodCondition{}, + metav1.Date(2018, 7, 25, 17, 10, 0, 0, time.UTC).Time, + false, + }, + } + for _, tc := range tests { + pod := &v1.Pod{ + Status: v1.PodStatus{ + Conditions: tc.conditions, + StartTime: &metav1.Time{ + Time: tc.started, + }, + }, + } + got := hasPodBeenReadyBefore(pod) + if got != tc.expected { + t.Errorf("[TestHasPodBeenReadyBefore.%s] got %v, want %v", tc.name, got, tc.expected) + } + } +} + // TODO: add more tests