From 61681433869a92072b6175c478be2c3b056aa7bc Mon Sep 17 00:00:00 2001 From: Adrian Moisey Date: Tue, 31 Dec 2024 15:09:57 +0200 Subject: [PATCH 1/2] Remove use of deprecated functions Removes use of deprecated functions in k8s.io/utils/pointer and k8s.io/apimachinery/pkg/util/sets --- .../podautoscaler/horizontal_test.go | 14 +-- .../podautoscaler/replica_calculator.go | 10 +-- .../podautoscaler/replica_calculator_test.go | 90 +++++++++---------- 3 files changed, 57 insertions(+), 57 deletions(-) diff --git a/pkg/controller/podautoscaler/horizontal_test.go b/pkg/controller/podautoscaler/horizontal_test.go index 65184b95bdc..8f7b534365a 100644 --- a/pkg/controller/podautoscaler/horizontal_test.go +++ b/pkg/controller/podautoscaler/horizontal_test.go @@ -54,7 +54,7 @@ import ( metricsfake "k8s.io/metrics/pkg/client/clientset/versioned/fake" cmfake "k8s.io/metrics/pkg/client/custom_metrics/fake" emfake "k8s.io/metrics/pkg/client/external_metrics/fake" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "github.com/stretchr/testify/assert" @@ -876,7 +876,7 @@ func (m *mockMonitor) ObserveMetricComputationResult(action monitor.ActionLabel, // waitUntilRecorded waits for the HPA controller to reconcile at least once. func (m *mockMonitor) waitUntilRecorded(t *testing.T) { - if err := wait.Poll(20*time.Millisecond, 100*time.Millisecond, func() (done bool, err error) { + if err := wait.PollUntilContextTimeout(context.Background(), 20*time.Millisecond, 100*time.Millisecond, true, func(ctx context.Context) (done bool, err error) { m.RWMutex.RLock() defer m.RWMutex.RUnlock() if len(m.reconciliationActionLabels) == 0 || len(m.reconciliationErrorLabels) == 0 { @@ -925,7 +925,7 @@ func TestScaleUpContainer(t *testing.T) { Name: v1.ResourceCPU, Target: autoscalingv2.MetricTarget{ Type: autoscalingv2.UtilizationMetricType, - AverageUtilization: pointer.Int32(30), + AverageUtilization: ptr.To(int32(30)), }, Container: "container1", }, @@ -1619,7 +1619,7 @@ func TestScaleDownContainerResource(t *testing.T) { Name: v1.ResourceCPU, Target: autoscalingv2.MetricTarget{ Type: autoscalingv2.UtilizationMetricType, - AverageUtilization: pointer.Int32(50), + AverageUtilization: ptr.To(int32(50)), }, }, }}, @@ -3848,7 +3848,7 @@ func TestCalculateScaleUpLimitWithScalingRules(t *testing.T) { policy := autoscalingv2.MinChangePolicySelect calculated := calculateScaleUpLimitWithScalingRules(1, []timestampedScaleEvent{}, []timestampedScaleEvent{}, &autoscalingv2.HPAScalingRules{ - StabilizationWindowSeconds: pointer.Int32(300), + StabilizationWindowSeconds: ptr.To(int32(300)), SelectPolicy: &policy, Policies: []autoscalingv2.HPAScalingPolicy{ { @@ -3870,7 +3870,7 @@ func TestCalculateScaleDownLimitWithBehaviors(t *testing.T) { policy := autoscalingv2.MinChangePolicySelect calculated := calculateScaleDownLimitWithBehaviors(5, []timestampedScaleEvent{}, []timestampedScaleEvent{}, &autoscalingv2.HPAScalingRules{ - StabilizationWindowSeconds: pointer.Int32(300), + StabilizationWindowSeconds: ptr.To(int32(300)), SelectPolicy: &policy, Policies: []autoscalingv2.HPAScalingPolicy{ { @@ -3891,7 +3891,7 @@ func TestCalculateScaleDownLimitWithBehaviors(t *testing.T) { func generateScalingRules(pods, podsPeriod, percent, percentPeriod, stabilizationWindow int32) *autoscalingv2.HPAScalingRules { policy := autoscalingv2.MaxChangePolicySelect directionBehavior := autoscalingv2.HPAScalingRules{ - StabilizationWindowSeconds: pointer.Int32(stabilizationWindow), + StabilizationWindowSeconds: ptr.To(int32(stabilizationWindow)), SelectPolicy: &policy, } if pods != 0 { diff --git a/pkg/controller/podautoscaler/replica_calculator.go b/pkg/controller/podautoscaler/replica_calculator.go index 1acd9c439e8..f4f89b544a6 100644 --- a/pkg/controller/podautoscaler/replica_calculator.go +++ b/pkg/controller/podautoscaler/replica_calculator.go @@ -375,10 +375,10 @@ func (c *ReplicaCalculator) GetExternalPerPodMetricReplicas(statusReplicas int32 return replicaCount, usage, timestamp, nil } -func groupPods(pods []*v1.Pod, metrics metricsclient.PodMetricsInfo, resource v1.ResourceName, cpuInitializationPeriod, delayOfInitialReadinessStatus time.Duration) (readyPodCount int, unreadyPods, missingPods, ignoredPods sets.String) { - missingPods = sets.NewString() - unreadyPods = sets.NewString() - ignoredPods = sets.NewString() +func groupPods(pods []*v1.Pod, metrics metricsclient.PodMetricsInfo, resource v1.ResourceName, cpuInitializationPeriod, delayOfInitialReadinessStatus time.Duration) (readyPodCount int, unreadyPods, missingPods, ignoredPods sets.Set[string]) { + missingPods = sets.New[string]() + unreadyPods = sets.New[string]() + ignoredPods = sets.New[string]() for _, pod := range pods { if pod.DeletionTimestamp != nil || pod.Status.Phase == v1.PodFailed { ignoredPods.Insert(pod.Name) @@ -446,7 +446,7 @@ func calculatePodRequests(pods []*v1.Pod, container string, resource v1.Resource return requests, nil } -func removeMetricsForPods(metrics metricsclient.PodMetricsInfo, pods sets.String) { +func removeMetricsForPods(metrics metricsclient.PodMetricsInfo, pods sets.Set[string]) { for _, pod := range pods.UnsortedList() { delete(metrics, pod) } diff --git a/pkg/controller/podautoscaler/replica_calculator_test.go b/pkg/controller/podautoscaler/replica_calculator_test.go index c90871bd4e8..2ca484d96e7 100644 --- a/pkg/controller/podautoscaler/replica_calculator_test.go +++ b/pkg/controller/podautoscaler/replica_calculator_test.go @@ -1614,9 +1614,9 @@ func TestGroupPods(t *testing.T) { metrics metricsclient.PodMetricsInfo resource v1.ResourceName expectReadyPodCount int - expectUnreadyPods sets.String - expectMissingPods sets.String - expectIgnoredPods sets.String + expectUnreadyPods sets.Set[string] + expectMissingPods sets.Set[string] + expectIgnoredPods sets.Set[string] }{ { name: "void", @@ -1624,9 +1624,9 @@ func TestGroupPods(t *testing.T) { metrics: metricsclient.PodMetricsInfo{}, resource: v1.ResourceCPU, expectReadyPodCount: 0, - expectUnreadyPods: sets.NewString(), - expectMissingPods: sets.NewString(), - expectIgnoredPods: sets.NewString(), + expectUnreadyPods: sets.New[string](), + expectMissingPods: sets.New[string](), + expectIgnoredPods: sets.New[string](), }, { name: "count in a ready pod - memory", pods: []*v1.Pod{ @@ -1644,9 +1644,9 @@ func TestGroupPods(t *testing.T) { }, resource: v1.ResourceMemory, expectReadyPodCount: 1, - expectUnreadyPods: sets.NewString(), - expectMissingPods: sets.NewString(), - expectIgnoredPods: sets.NewString(), + expectUnreadyPods: sets.New[string](), + expectMissingPods: sets.New[string](), + expectIgnoredPods: sets.New[string](), }, { name: "unready a pod without ready condition - CPU", pods: []*v1.Pod{ @@ -1667,9 +1667,9 @@ func TestGroupPods(t *testing.T) { }, resource: v1.ResourceCPU, expectReadyPodCount: 0, - expectUnreadyPods: sets.NewString("lucretius"), - expectMissingPods: sets.NewString(), - expectIgnoredPods: sets.NewString(), + expectUnreadyPods: sets.New[string]("lucretius"), + expectMissingPods: sets.New[string](), + expectIgnoredPods: sets.New[string](), }, { name: "count in a ready pod with fresh metrics during initialization period - CPU", pods: []*v1.Pod{ @@ -1697,9 +1697,9 @@ func TestGroupPods(t *testing.T) { }, resource: v1.ResourceCPU, expectReadyPodCount: 1, - expectUnreadyPods: sets.NewString(), - expectMissingPods: sets.NewString(), - expectIgnoredPods: sets.NewString(), + expectUnreadyPods: sets.New[string](), + expectMissingPods: sets.New[string](), + expectIgnoredPods: sets.New[string](), }, { name: "unready a ready pod without fresh metrics during initialization period - CPU", pods: []*v1.Pod{ @@ -1727,9 +1727,9 @@ func TestGroupPods(t *testing.T) { }, resource: v1.ResourceCPU, expectReadyPodCount: 0, - expectUnreadyPods: sets.NewString("bentham"), - expectMissingPods: sets.NewString(), - expectIgnoredPods: sets.NewString(), + expectUnreadyPods: sets.New[string]("bentham"), + expectMissingPods: sets.New[string](), + expectIgnoredPods: sets.New[string](), }, { name: "unready an unready pod during initialization period - CPU", pods: []*v1.Pod{ @@ -1757,9 +1757,9 @@ func TestGroupPods(t *testing.T) { }, resource: v1.ResourceCPU, expectReadyPodCount: 0, - expectUnreadyPods: sets.NewString("lucretius"), - expectMissingPods: sets.NewString(), - expectIgnoredPods: sets.NewString(), + expectUnreadyPods: sets.New[string]("lucretius"), + expectMissingPods: sets.New[string](), + expectIgnoredPods: sets.New[string](), }, { name: "count in a ready pod without fresh metrics after initialization period - CPU", pods: []*v1.Pod{ @@ -1787,9 +1787,9 @@ func TestGroupPods(t *testing.T) { }, resource: v1.ResourceCPU, expectReadyPodCount: 1, - expectUnreadyPods: sets.NewString(), - expectMissingPods: sets.NewString(), - expectIgnoredPods: sets.NewString(), + expectUnreadyPods: sets.New[string](), + expectMissingPods: sets.New[string](), + expectIgnoredPods: sets.New[string](), }, { name: "count in an unready pod that was ready after initialization period - CPU", pods: []*v1.Pod{ @@ -1817,9 +1817,9 @@ func TestGroupPods(t *testing.T) { }, resource: v1.ResourceCPU, expectReadyPodCount: 1, - expectUnreadyPods: sets.NewString(), - expectMissingPods: sets.NewString(), - expectIgnoredPods: sets.NewString(), + expectUnreadyPods: sets.New[string](), + expectMissingPods: sets.New[string](), + expectIgnoredPods: sets.New[string](), }, { name: "unready pod that has never been ready after initialization period - CPU", pods: []*v1.Pod{ @@ -1847,9 +1847,9 @@ func TestGroupPods(t *testing.T) { }, resource: v1.ResourceCPU, expectReadyPodCount: 1, - expectUnreadyPods: sets.NewString(), - expectMissingPods: sets.NewString(), - expectIgnoredPods: sets.NewString(), + expectUnreadyPods: sets.New[string](), + expectMissingPods: sets.New[string](), + expectIgnoredPods: sets.New[string](), }, { name: "a missing pod", pods: []*v1.Pod{ @@ -1868,9 +1868,9 @@ func TestGroupPods(t *testing.T) { metrics: metricsclient.PodMetricsInfo{}, resource: v1.ResourceCPU, expectReadyPodCount: 0, - expectUnreadyPods: sets.NewString(), - expectMissingPods: sets.NewString("epicurus"), - expectIgnoredPods: sets.NewString(), + expectUnreadyPods: sets.New[string](), + expectMissingPods: sets.New[string]("epicurus"), + expectIgnoredPods: sets.New[string](), }, { name: "several pods", pods: []*v1.Pod{ @@ -1921,9 +1921,9 @@ func TestGroupPods(t *testing.T) { }, resource: v1.ResourceCPU, expectReadyPodCount: 1, - expectUnreadyPods: sets.NewString("lucretius"), - expectMissingPods: sets.NewString("epicurus"), - expectIgnoredPods: sets.NewString(), + expectUnreadyPods: sets.New[string]("lucretius"), + expectMissingPods: sets.New[string]("epicurus"), + expectIgnoredPods: sets.New[string](), }, { name: "pending pods are unreadied", pods: []*v1.Pod{ @@ -1939,9 +1939,9 @@ func TestGroupPods(t *testing.T) { metrics: metricsclient.PodMetricsInfo{}, resource: v1.ResourceCPU, expectReadyPodCount: 0, - expectUnreadyPods: sets.NewString("unscheduled"), - expectMissingPods: sets.NewString(), - expectIgnoredPods: sets.NewString(), + expectUnreadyPods: sets.New[string]("unscheduled"), + expectMissingPods: sets.New[string](), + expectIgnoredPods: sets.New[string](), }, { name: "ignore pods with deletion timestamps", pods: []*v1.Pod{ @@ -1960,9 +1960,9 @@ func TestGroupPods(t *testing.T) { }, resource: v1.ResourceCPU, expectReadyPodCount: 0, - expectUnreadyPods: sets.NewString(), - expectMissingPods: sets.NewString(), - expectIgnoredPods: sets.NewString("deleted"), + expectUnreadyPods: sets.New[string](), + expectMissingPods: sets.New[string](), + expectIgnoredPods: sets.New[string]("deleted"), }, { name: "ignore pods in a failed state", pods: []*v1.Pod{ @@ -1980,9 +1980,9 @@ func TestGroupPods(t *testing.T) { }, resource: v1.ResourceCPU, expectReadyPodCount: 0, - expectUnreadyPods: sets.NewString(), - expectMissingPods: sets.NewString(), - expectIgnoredPods: sets.NewString("failed"), + expectUnreadyPods: sets.New[string](), + expectMissingPods: sets.New[string](), + expectIgnoredPods: sets.New[string]("failed"), }, } for _, tc := range tests { From 42a5e5f4255236d441b523a176ceace6c488ffdb Mon Sep 17 00:00:00 2001 From: Adrian Moisey Date: Tue, 7 Jan 2025 16:25:55 +0200 Subject: [PATCH 2/2] Pass context down to wait.PollUntilContextTimeout --- pkg/controller/podautoscaler/horizontal_test.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/controller/podautoscaler/horizontal_test.go b/pkg/controller/podautoscaler/horizontal_test.go index 8f7b534365a..0a959bfecba 100644 --- a/pkg/controller/podautoscaler/horizontal_test.go +++ b/pkg/controller/podautoscaler/horizontal_test.go @@ -675,7 +675,7 @@ func findCpuUtilization(metricStatus []autoscalingv2.MetricStatus) (utilization return nil } -func (tc *testCase) verifyResults(t *testing.T, m *mockMonitor) { +func (tc *testCase) verifyResults(ctx context.Context, t *testing.T, m *mockMonitor) { tc.Lock() defer tc.Unlock() @@ -685,12 +685,12 @@ func (tc *testCase) verifyResults(t *testing.T, m *mockMonitor) { assert.Equal(t, tc.specReplicas != tc.expectedDesiredReplicas, tc.eventCreated, "an event should have been created only if we expected a change in replicas") } - tc.verifyRecordedMetric(t, m) + tc.verifyRecordedMetric(ctx, t, m) } -func (tc *testCase) verifyRecordedMetric(t *testing.T, m *mockMonitor) { +func (tc *testCase) verifyRecordedMetric(ctx context.Context, t *testing.T, m *mockMonitor) { // First, wait for the reconciliation completed at least once. - m.waitUntilRecorded(t) + m.waitUntilRecorded(ctx, t) assert.Equal(t, tc.expectedReportedReconciliationActionLabel, m.reconciliationActionLabels[0], "the reconciliation action should be recorded in monitor expectedly") assert.Equal(t, tc.expectedReportedReconciliationErrorLabel, m.reconciliationErrorLabels[0], "the reconciliation error should be recorded in monitor expectedly") @@ -833,7 +833,7 @@ func (tc *testCase) runTestWithController(t *testing.T, hpaController *Horizonta if !ok { t.Fatalf("test HPA controller should have mockMonitor, but actually not") } - tc.verifyResults(t, m) + tc.verifyResults(ctx, t, m) } func (tc *testCase) runTest(t *testing.T) { @@ -875,8 +875,8 @@ func (m *mockMonitor) ObserveMetricComputationResult(action monitor.ActionLabel, } // waitUntilRecorded waits for the HPA controller to reconcile at least once. -func (m *mockMonitor) waitUntilRecorded(t *testing.T) { - if err := wait.PollUntilContextTimeout(context.Background(), 20*time.Millisecond, 100*time.Millisecond, true, func(ctx context.Context) (done bool, err error) { +func (m *mockMonitor) waitUntilRecorded(ctx context.Context, t *testing.T) { + if err := wait.PollUntilContextTimeout(ctx, 20*time.Millisecond, 100*time.Millisecond, true, func(ctx context.Context) (done bool, err error) { m.RWMutex.RLock() defer m.RWMutex.RUnlock() if len(m.reconciliationActionLabels) == 0 || len(m.reconciliationErrorLabels) == 0 {