diff --git a/test/integration/disruption/disruption_test.go b/test/integration/disruption/disruption_test.go index 6c6bda5eb11..97626970b5d 100644 --- a/test/integration/disruption/disruption_test.go +++ b/test/integration/disruption/disruption_test.go @@ -59,7 +59,7 @@ import ( "k8s.io/kubernetes/test/integration/util" "k8s.io/kubernetes/test/utils/ktesting" "k8s.io/utils/clock" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) const stalePodDisruptionTimeout = 3 * time.Second @@ -585,7 +585,7 @@ func TestPatchCompatibility(t *testing.T) { version: "v1", patchType: types.ApplyPatchType, patch: `{"apiVersion":"policy/v1","kind":"PodDisruptionBudget","spec":{"selector":{"matchLabels":{"patchmatch":"true"},"matchExpressions":[{"key":"patchexpression","operator":"In","values":["true"]}]}}}`, - force: pointer.Bool(true), + force: ptr.To(true), fieldManager: "test", // entire selector is replaced (because structType=atomic) expectSelector: &metav1.LabelSelector{ diff --git a/test/integration/garbagecollector/garbage_collector_test.go b/test/integration/garbagecollector/garbage_collector_test.go index cfe01ba5739..7cf72a2b0a9 100644 --- a/test/integration/garbagecollector/garbage_collector_test.go +++ b/test/integration/garbagecollector/garbage_collector_test.go @@ -52,7 +52,7 @@ import ( "k8s.io/kubernetes/pkg/controller/garbagecollector" "k8s.io/kubernetes/test/integration" "k8s.io/kubernetes/test/integration/framework" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func getForegroundOptions() metav1.DeleteOptions { @@ -355,7 +355,7 @@ func testCrossNamespaceReferences(t *testing.T, watchCache bool) { } for i := 0; i < validChildrenCount; i++ { _, err := clientSet.CoreV1().Secrets(namespaceB).Create(context.TODO(), &v1.Secret{ObjectMeta: metav1.ObjectMeta{GenerateName: "child-", OwnerReferences: []metav1.OwnerReference{ - {Name: "parent", Kind: "ConfigMap", APIVersion: "v1", UID: parent.UID, Controller: pointer.BoolPtr(false)}, + {Name: "parent", Kind: "ConfigMap", APIVersion: "v1", UID: parent.UID, Controller: ptr.To(false)}, }}}, metav1.CreateOptions{}) if err != nil { t.Fatal(err) @@ -369,7 +369,7 @@ func testCrossNamespaceReferences(t *testing.T, watchCache bool) { for i := 0; i < 25; i++ { invalidOwnerReferences = append(invalidOwnerReferences, metav1.OwnerReference{Name: "invalid", UID: types.UID(fmt.Sprintf("invalid-%d", i)), APIVersion: "test/v1", Kind: fmt.Sprintf("invalid%d", i)}) } - invalidOwnerReferences = append(invalidOwnerReferences, metav1.OwnerReference{Name: "invalid", UID: parent.UID, APIVersion: "v1", Kind: "Pod", Controller: pointer.BoolPtr(false)}) + invalidOwnerReferences = append(invalidOwnerReferences, metav1.OwnerReference{Name: "invalid", UID: parent.UID, APIVersion: "v1", Kind: "Pod", Controller: ptr.To(false)}) for i := 0; i < workers; i++ { _, err := clientSet.CoreV1().ConfigMaps(namespaceA).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{GenerateName: "invalid-child-", OwnerReferences: invalidOwnerReferences}}, metav1.CreateOptions{}) @@ -384,7 +384,7 @@ func testCrossNamespaceReferences(t *testing.T, watchCache bool) { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"single-bad-reference": "true"}, GenerateName: "invalid-child-b-", - OwnerReferences: []metav1.OwnerReference{{Name: "invalid", UID: parent.UID, APIVersion: "v1", Kind: "Pod", Controller: pointer.BoolPtr(false)}}, + OwnerReferences: []metav1.OwnerReference{{Name: "invalid", UID: parent.UID, APIVersion: "v1", Kind: "Pod", Controller: ptr.To(false)}}, }, }, metav1.CreateOptions{}) if err != nil { @@ -434,7 +434,7 @@ func testCrossNamespaceReferences(t *testing.T, watchCache bool) { invalidChild, err := clientSet.CoreV1().Secrets(namespaceA).Create(context.TODO(), &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "invalid-child-c-", - OwnerReferences: []metav1.OwnerReference{{Name: "invalid", UID: parent.UID, APIVersion: "v1", Kind: "Pod", Controller: pointer.BoolPtr(false)}}, + OwnerReferences: []metav1.OwnerReference{{Name: "invalid", UID: parent.UID, APIVersion: "v1", Kind: "Pod", Controller: ptr.To(false)}}, }, }, metav1.CreateOptions{}) if err != nil { diff --git a/test/integration/job/job_test.go b/test/integration/job/job_test.go index 8222f0f5d1f..135ae3c7ced 100644 --- a/test/integration/job/job_test.go +++ b/test/integration/job/job_test.go @@ -57,7 +57,6 @@ import ( "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/test/integration/framework" "k8s.io/kubernetes/test/integration/util" - "k8s.io/utils/pointer" "k8s.io/utils/ptr" ) @@ -87,8 +86,8 @@ func TestMetricsOnSuccesses(t *testing.T) { "non-indexed job": { job: &batchv1.Job{ Spec: batchv1.JobSpec{ - Completions: pointer.Int32(2), - Parallelism: pointer.Int32(2), + Completions: ptr.To[int32](2), + Parallelism: ptr.To[int32](2), CompletionMode: &nonIndexedCompletion, }, }, @@ -104,8 +103,8 @@ func TestMetricsOnSuccesses(t *testing.T) { "indexed job": { job: &batchv1.Job{ Spec: batchv1.JobSpec{ - Completions: pointer.Int32(2), - Parallelism: pointer.Int32(2), + Completions: ptr.To[int32](2), + Parallelism: ptr.To[int32](2), CompletionMode: &indexedCompletion, }, }, @@ -133,7 +132,7 @@ func TestMetricsOnSuccesses(t *testing.T) { } validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: int(*jobObj.Spec.Parallelism), - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) if err, _ := setJobPodsPhase(ctx, clientSet, jobObj, v1.PodSucceeded, int(*jobObj.Spec.Parallelism)); err != nil { t.Fatalf("Failed setting phase %s on Job Pod: %v", v1.PodSucceeded, err) @@ -165,9 +164,9 @@ func TestJobFinishedNumReasonMetric(t *testing.T) { enableJobPodFailurePolicy: true, job: batchv1.Job{ Spec: batchv1.JobSpec{ - Completions: pointer.Int32(1), - Parallelism: pointer.Int32(1), - BackoffLimit: pointer.Int32(1), + Completions: ptr.To[int32](1), + Parallelism: ptr.To[int32](1), + BackoffLimit: ptr.To[int32](1), PodFailurePolicy: &batchv1.PodFailurePolicy{ Rules: []batchv1.PodFailurePolicyRule{ { @@ -202,9 +201,9 @@ func TestJobFinishedNumReasonMetric(t *testing.T) { enableJobPodFailurePolicy: true, job: batchv1.Job{ Spec: batchv1.JobSpec{ - Completions: pointer.Int32(1), - Parallelism: pointer.Int32(1), - BackoffLimit: pointer.Int32(0), + Completions: ptr.To[int32](1), + Parallelism: ptr.To[int32](1), + BackoffLimit: ptr.To[int32](0), PodFailurePolicy: &batchv1.PodFailurePolicy{ Rules: []batchv1.PodFailurePolicyRule{ { @@ -238,9 +237,9 @@ func TestJobFinishedNumReasonMetric(t *testing.T) { "non-indexed job; failed": { job: batchv1.Job{ Spec: batchv1.JobSpec{ - Completions: pointer.Int32(1), - Parallelism: pointer.Int32(1), - BackoffLimit: pointer.Int32(0), + Completions: ptr.To[int32](1), + Parallelism: ptr.To[int32](1), + BackoffLimit: ptr.To[int32](0), }, }, podStatus: v1.PodStatus{ @@ -267,7 +266,7 @@ func TestJobFinishedNumReasonMetric(t *testing.T) { } validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: int(*jobObj.Spec.Parallelism), - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) op := func(p *v1.Pod) bool { @@ -344,8 +343,8 @@ func TestJobPodFailurePolicyWithFailedPodDeletedDuringControllerRestart(t *testi }, }, }, - Parallelism: pointer.Int32(int32(count)), - Completions: pointer.Int32(int32(count)), + Parallelism: ptr.To(int32(count)), + Completions: ptr.To(int32(count)), PodFailurePolicy: &batchv1.PodFailurePolicy{ Rules: []batchv1.PodFailurePolicyRule{ { @@ -394,7 +393,7 @@ func TestJobPodFailurePolicyWithFailedPodDeletedDuringControllerRestart(t *testi } validateJobPodsStatus(ctx, t, cs, jobObj, podsByStatus{ Active: count, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) jobPods, err := getJobPods(ctx, t, cs, jobObj, func(s v1.PodStatus) bool { @@ -447,7 +446,7 @@ func TestJobPodFailurePolicyWithFailedPodDeletedDuringControllerRestart(t *testi // Delete the failed pod to make sure it is not used by the second instance of the controller ctx, cancel = context.WithCancel(context.Background()) - err = cs.CoreV1().Pods(failedPod.Namespace).Delete(ctx, failedPod.Name, metav1.DeleteOptions{GracePeriodSeconds: pointer.Int64(0)}) + err = cs.CoreV1().Pods(failedPod.Namespace).Delete(ctx, failedPod.Name, metav1.DeleteOptions{GracePeriodSeconds: ptr.To[int64](0)}) if err != nil { t.Fatalf("Error: '%v' while deleting pod: '%v'", err, klog.KObj(failedPod)) } @@ -646,7 +645,7 @@ func TestJobPodFailurePolicy(t *testing.T) { } validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 1, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) op := func(p *v1.Pod) bool { @@ -666,7 +665,7 @@ func TestJobPodFailurePolicy(t *testing.T) { validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: test.wantActive, Failed: test.wantFailed, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) if test.wantJobConditionType == batchv1.JobComplete { @@ -699,9 +698,9 @@ func TestBackoffLimitPerIndex_DelayedPodDeletion(t *testing.T) { jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{ Spec: batchv1.JobSpec{ - Parallelism: pointer.Int32(1), - Completions: pointer.Int32(1), - BackoffLimitPerIndex: pointer.Int32(1), + Parallelism: ptr.To[int32](1), + Completions: ptr.To[int32](1), + BackoffLimitPerIndex: ptr.To[int32](1), CompletionMode: completionModePtr(batchv1.IndexedCompletion), }, }) @@ -710,9 +709,9 @@ func TestBackoffLimitPerIndex_DelayedPodDeletion(t *testing.T) { } validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 1, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) - validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0), "", pointer.String("")) + validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0), "", ptr.To("")) // First pod from index 0 failed. if err := setJobPhaseForIndex(ctx, clientSet, jobObj, v1.PodFailed, 0); err != nil { @@ -730,9 +729,9 @@ func TestBackoffLimitPerIndex_DelayedPodDeletion(t *testing.T) { validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 1, Failed: 1, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) - validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0), "", pointer.String("")) + validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0), "", ptr.To("")) // Verify the replacement pod is created and has the index-failure-count // annotation bumped. @@ -754,7 +753,7 @@ func TestBackoffLimitPerIndex_DelayedPodDeletion(t *testing.T) { Active: 0, Succeeded: 1, Failed: 1, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) validateJobSucceeded(ctx, t, clientSet, jobObj) } @@ -773,9 +772,9 @@ func TestBackoffLimitPerIndex_Reenabling(t *testing.T) { jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{ Spec: batchv1.JobSpec{ - Parallelism: pointer.Int32(3), - Completions: pointer.Int32(3), - BackoffLimitPerIndex: pointer.Int32(0), + Parallelism: ptr.To[int32](3), + Completions: ptr.To[int32](3), + BackoffLimitPerIndex: ptr.To[int32](0), CompletionMode: completionModePtr(batchv1.IndexedCompletion), }, }) @@ -784,9 +783,9 @@ func TestBackoffLimitPerIndex_Reenabling(t *testing.T) { } validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 3, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) - validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1, 2), "", pointer.String("")) + validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1, 2), "", ptr.To("")) // First pod from index 0 failed if err := setJobPhaseForIndex(ctx, clientSet, jobObj, v1.PodFailed, 0); err != nil { @@ -795,9 +794,9 @@ func TestBackoffLimitPerIndex_Reenabling(t *testing.T) { validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 2, Failed: 1, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) - validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(1, 2), "", pointer.String("0")) + validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(1, 2), "", ptr.To("0")) // Disable the feature defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobBackoffLimitPerIndex, false)() @@ -809,7 +808,7 @@ func TestBackoffLimitPerIndex_Reenabling(t *testing.T) { validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 3, Failed: 2, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1, 2), "", nil) @@ -826,9 +825,9 @@ func TestBackoffLimitPerIndex_Reenabling(t *testing.T) { validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 2, Failed: 3, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) - validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1), "", pointer.String("2")) + validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1), "", ptr.To("2")) // mark remaining pods are Succeeded and verify Job status if err, _ := setJobPodsPhase(ctx, clientSet, jobObj, v1.PodSucceeded, 2); err != nil { @@ -858,9 +857,9 @@ func TestBackoffLimitPerIndex_JobPodsCreatedWithExponentialBackoff(t *testing.T) jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{ Spec: batchv1.JobSpec{ - Completions: pointer.Int32(2), - Parallelism: pointer.Int32(2), - BackoffLimitPerIndex: pointer.Int32(2), + Completions: ptr.To[int32](2), + Parallelism: ptr.To[int32](2), + BackoffLimitPerIndex: ptr.To[int32](2), CompletionMode: completionModePtr(batchv1.IndexedCompletion), }, }) @@ -869,9 +868,9 @@ func TestBackoffLimitPerIndex_JobPodsCreatedWithExponentialBackoff(t *testing.T) } validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 2, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) - validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1), "", pointer.String("")) + validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1), "", ptr.To("")) // Fail the first pod for index 0 if err := setJobPhaseForIndex(ctx, clientSet, jobObj, v1.PodFailed, 0); err != nil { @@ -880,9 +879,9 @@ func TestBackoffLimitPerIndex_JobPodsCreatedWithExponentialBackoff(t *testing.T) validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 2, Failed: 1, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) - validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1), "", pointer.String("")) + validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1), "", ptr.To("")) // Fail the second pod for index 0 if err := setJobPhaseForIndex(ctx, clientSet, jobObj, v1.PodFailed, 0); err != nil { @@ -891,9 +890,9 @@ func TestBackoffLimitPerIndex_JobPodsCreatedWithExponentialBackoff(t *testing.T) validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 2, Failed: 2, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) - validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1), "", pointer.String("")) + validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1), "", ptr.To("")) // Fail the first pod for index 1 if err := setJobPhaseForIndex(ctx, clientSet, jobObj, v1.PodFailed, 1); err != nil { @@ -902,9 +901,9 @@ func TestBackoffLimitPerIndex_JobPodsCreatedWithExponentialBackoff(t *testing.T) validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 2, Failed: 3, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) - validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1), "", pointer.String("")) + validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1), "", ptr.To("")) // Succeed the third pod for index 0 if err := setJobPhaseForIndex(ctx, clientSet, jobObj, v1.PodSucceeded, 0); err != nil { @@ -914,9 +913,9 @@ func TestBackoffLimitPerIndex_JobPodsCreatedWithExponentialBackoff(t *testing.T) Active: 1, Failed: 3, Succeeded: 1, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) - validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(1), "0", pointer.String("")) + validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(1), "0", ptr.To("")) // Fail the second pod for index 1 if err := setJobPhaseForIndex(ctx, clientSet, jobObj, v1.PodFailed, 1); err != nil { @@ -926,9 +925,9 @@ func TestBackoffLimitPerIndex_JobPodsCreatedWithExponentialBackoff(t *testing.T) Active: 1, Failed: 4, Succeeded: 1, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) - validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(1), "0", pointer.String("")) + validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(1), "0", ptr.To("")) // Succeed the third pod for index 1 if err := setJobPhaseForIndex(ctx, clientSet, jobObj, v1.PodSucceeded, 1); err != nil { @@ -938,9 +937,9 @@ func TestBackoffLimitPerIndex_JobPodsCreatedWithExponentialBackoff(t *testing.T) Active: 0, Failed: 4, Succeeded: 2, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) - validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New[int](), "0,1", pointer.String("")) + validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New[int](), "0,1", ptr.To("")) validateJobSucceeded(ctx, t, clientSet, jobObj) for index := 0; index < int(*jobObj.Spec.Completions); index++ { @@ -989,10 +988,10 @@ func TestBackoffLimitPerIndex(t *testing.T) { "job succeeded": { job: batchv1.Job{ Spec: batchv1.JobSpec{ - Parallelism: pointer.Int32(2), - Completions: pointer.Int32(2), + Parallelism: ptr.To[int32](2), + Completions: ptr.To[int32](2), CompletionMode: completionModePtr(batchv1.IndexedCompletion), - BackoffLimitPerIndex: pointer.Int32(1), + BackoffLimitPerIndex: ptr.To[int32](1), Template: podTemplateSpec, }, }, @@ -1004,8 +1003,8 @@ func TestBackoffLimitPerIndex(t *testing.T) { wantActive: 2, wantFailed: 1, wantActiveIndexes: sets.New(0, 1), - wantFailedIndexes: pointer.String(""), - wantReplacementPodFailureCount: pointer.Int(1), + wantFailedIndexes: ptr.To(""), + wantReplacementPodFailureCount: ptr.To(1), }, }, wantJobConditionType: batchv1.JobComplete, @@ -1013,10 +1012,10 @@ func TestBackoffLimitPerIndex(t *testing.T) { "job index fails due to exceeding backoff limit per index": { job: batchv1.Job{ Spec: batchv1.JobSpec{ - Parallelism: pointer.Int32(2), - Completions: pointer.Int32(2), + Parallelism: ptr.To[int32](2), + Completions: ptr.To[int32](2), CompletionMode: completionModePtr(batchv1.IndexedCompletion), - BackoffLimitPerIndex: pointer.Int32(2), + BackoffLimitPerIndex: ptr.To[int32](2), Template: podTemplateSpec, }, }, @@ -1028,8 +1027,8 @@ func TestBackoffLimitPerIndex(t *testing.T) { wantActive: 2, wantFailed: 1, wantActiveIndexes: sets.New(0, 1), - wantFailedIndexes: pointer.String(""), - wantReplacementPodFailureCount: pointer.Int(1), + wantFailedIndexes: ptr.To(""), + wantReplacementPodFailureCount: ptr.To(1), }, { status: v1.PodStatus{ @@ -1038,8 +1037,8 @@ func TestBackoffLimitPerIndex(t *testing.T) { wantActive: 2, wantFailed: 2, wantActiveIndexes: sets.New(0, 1), - wantFailedIndexes: pointer.String(""), - wantReplacementPodFailureCount: pointer.Int(2), + wantFailedIndexes: ptr.To(""), + wantReplacementPodFailureCount: ptr.To(2), }, { status: v1.PodStatus{ @@ -1048,7 +1047,7 @@ func TestBackoffLimitPerIndex(t *testing.T) { wantActive: 1, wantFailed: 3, wantActiveIndexes: sets.New(1), - wantFailedIndexes: pointer.String("0"), + wantFailedIndexes: ptr.To("0"), }, }, wantJobConditionType: batchv1.JobFailed, @@ -1056,11 +1055,11 @@ func TestBackoffLimitPerIndex(t *testing.T) { "job index fails due to exceeding the global backoff limit first": { job: batchv1.Job{ Spec: batchv1.JobSpec{ - Parallelism: pointer.Int32(3), - Completions: pointer.Int32(3), + Parallelism: ptr.To[int32](3), + Completions: ptr.To[int32](3), CompletionMode: completionModePtr(batchv1.IndexedCompletion), - BackoffLimitPerIndex: pointer.Int32(1), - BackoffLimit: pointer.Int32(2), + BackoffLimitPerIndex: ptr.To[int32](1), + BackoffLimit: ptr.To[int32](2), Template: podTemplateSpec, }, }, @@ -1073,7 +1072,7 @@ func TestBackoffLimitPerIndex(t *testing.T) { wantActive: 3, wantFailed: 1, wantActiveIndexes: sets.New(0, 1, 2), - wantFailedIndexes: pointer.String(""), + wantFailedIndexes: ptr.To(""), }, { index: 1, @@ -1083,7 +1082,7 @@ func TestBackoffLimitPerIndex(t *testing.T) { wantActive: 3, wantFailed: 2, wantActiveIndexes: sets.New(0, 1, 2), - wantFailedIndexes: pointer.String(""), + wantFailedIndexes: ptr.To(""), }, { index: 2, @@ -1091,7 +1090,7 @@ func TestBackoffLimitPerIndex(t *testing.T) { Phase: v1.PodFailed, }, wantFailed: 5, - wantFailedIndexes: pointer.String(""), + wantFailedIndexes: ptr.To(""), }, }, wantJobConditionType: batchv1.JobFailed, @@ -1099,10 +1098,10 @@ func TestBackoffLimitPerIndex(t *testing.T) { "job continues execution after a failed index, the job is marked Failed due to the failed index": { job: batchv1.Job{ Spec: batchv1.JobSpec{ - Parallelism: pointer.Int32(2), - Completions: pointer.Int32(2), + Parallelism: ptr.To[int32](2), + Completions: ptr.To[int32](2), CompletionMode: completionModePtr(batchv1.IndexedCompletion), - BackoffLimitPerIndex: pointer.Int32(0), + BackoffLimitPerIndex: ptr.To[int32](0), Template: podTemplateSpec, }, }, @@ -1115,7 +1114,7 @@ func TestBackoffLimitPerIndex(t *testing.T) { wantActive: 1, wantFailed: 1, wantActiveIndexes: sets.New(1), - wantFailedIndexes: pointer.String("0"), + wantFailedIndexes: ptr.To("0"), }, { index: 1, @@ -1124,7 +1123,7 @@ func TestBackoffLimitPerIndex(t *testing.T) { }, wantFailed: 1, wantSucceeded: 1, - wantFailedIndexes: pointer.String("0"), + wantFailedIndexes: ptr.To("0"), wantCompletedIndexes: "1", }, }, @@ -1133,11 +1132,11 @@ func TestBackoffLimitPerIndex(t *testing.T) { "job execution terminated early due to exceeding max failed indexes": { job: batchv1.Job{ Spec: batchv1.JobSpec{ - Parallelism: pointer.Int32(3), - Completions: pointer.Int32(3), + Parallelism: ptr.To[int32](3), + Completions: ptr.To[int32](3), CompletionMode: completionModePtr(batchv1.IndexedCompletion), - BackoffLimitPerIndex: pointer.Int32(0), - MaxFailedIndexes: pointer.Int32(1), + BackoffLimitPerIndex: ptr.To[int32](0), + MaxFailedIndexes: ptr.To[int32](1), Template: podTemplateSpec, }, }, @@ -1150,7 +1149,7 @@ func TestBackoffLimitPerIndex(t *testing.T) { wantActive: 2, wantFailed: 1, wantActiveIndexes: sets.New(1, 2), - wantFailedIndexes: pointer.String("0"), + wantFailedIndexes: ptr.To("0"), }, { index: 1, @@ -1159,7 +1158,7 @@ func TestBackoffLimitPerIndex(t *testing.T) { }, wantActive: 0, wantFailed: 3, - wantFailedIndexes: pointer.String("0,1"), + wantFailedIndexes: ptr.To("0,1"), }, }, wantJobConditionType: batchv1.JobFailed, @@ -1167,10 +1166,10 @@ func TestBackoffLimitPerIndex(t *testing.T) { "pod failure matching pod failure policy rule with FailIndex action": { job: batchv1.Job{ Spec: batchv1.JobSpec{ - Parallelism: pointer.Int32(2), - Completions: pointer.Int32(2), + Parallelism: ptr.To[int32](2), + Completions: ptr.To[int32](2), CompletionMode: completionModePtr(batchv1.IndexedCompletion), - BackoffLimitPerIndex: pointer.Int32(1), + BackoffLimitPerIndex: ptr.To[int32](1), Template: podTemplateSpec, PodFailurePolicy: &batchv1.PodFailurePolicy{ Rules: []batchv1.PodFailurePolicyRule{ @@ -1212,7 +1211,7 @@ func TestBackoffLimitPerIndex(t *testing.T) { wantActive: 1, wantFailed: 1, wantActiveIndexes: sets.New(1), - wantFailedIndexes: pointer.String("0"), + wantFailedIndexes: ptr.To("0"), }, { index: 1, @@ -1226,7 +1225,7 @@ func TestBackoffLimitPerIndex(t *testing.T) { }, }, wantFailed: 2, - wantFailedIndexes: pointer.String("0,1"), + wantFailedIndexes: ptr.To("0,1"), }, }, wantJobConditionType: batchv1.JobFailed, @@ -1250,7 +1249,7 @@ func TestBackoffLimitPerIndex(t *testing.T) { } validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: int(*test.job.Spec.Parallelism), - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) for _, podTermination := range test.podTerminations { pod, err := getActivePodForIndex(ctx, clientSet, jobObj, podTermination.index) @@ -1265,7 +1264,7 @@ func TestBackoffLimitPerIndex(t *testing.T) { Active: podTermination.wantActive, Succeeded: podTermination.wantSucceeded, Failed: podTermination.wantFailed, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) validateIndexedJobPods(ctx, t, clientSet, jobObj, podTermination.wantActiveIndexes, podTermination.wantCompletedIndexes, podTermination.wantFailedIndexes) if podTermination.wantReplacementPodFailureCount != nil { @@ -1328,7 +1327,7 @@ func TestNonParallelJob(t *testing.T) { } validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 1, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) // Restarting controller. @@ -1342,7 +1341,7 @@ func TestNonParallelJob(t *testing.T) { validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 1, Failed: 1, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) // Restarting controller. @@ -1357,7 +1356,7 @@ func TestNonParallelJob(t *testing.T) { validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Failed: 1, Succeeded: 1, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) validateFinishedPodsNoFinalizer(ctx, t, clientSet, jobObj) } @@ -1384,7 +1383,7 @@ func TestParallelJob(t *testing.T) { jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{ Spec: batchv1.JobSpec{ - Parallelism: pointer.Int32(5), + Parallelism: ptr.To[int32](5), }, }) if err != nil { @@ -1392,7 +1391,7 @@ func TestParallelJob(t *testing.T) { } want := podsByStatus{Active: 5} if tc.enableReadyPods { - want.Ready = pointer.Int32(0) + want.Ready = ptr.To[int32](0) } validateJobPodsStatus(ctx, t, clientSet, jobObj, want) @@ -1414,7 +1413,7 @@ func TestParallelJob(t *testing.T) { Failed: 2, } if tc.enableReadyPods { - want.Ready = pointer.Int32(0) + want.Ready = ptr.To[int32](0) } validateJobPodsStatus(ctx, t, clientSet, jobObj, want) // Once one Pod succeeds, no more Pods are created, even if some fail. @@ -1427,7 +1426,7 @@ func TestParallelJob(t *testing.T) { Active: 4, } if tc.enableReadyPods { - want.Ready = pointer.Int32(0) + want.Ready = ptr.To[int32](0) } validateJobPodsStatus(ctx, t, clientSet, jobObj, want) if err, _ := setJobPodsPhase(ctx, clientSet, jobObj, v1.PodFailed, 2); err != nil { @@ -1439,7 +1438,7 @@ func TestParallelJob(t *testing.T) { Active: 2, } if tc.enableReadyPods { - want.Ready = pointer.Int32(0) + want.Ready = ptr.To[int32](0) } validateJobPodsStatus(ctx, t, clientSet, jobObj, want) // No more Pods are created after remaining Pods succeed. @@ -1452,7 +1451,7 @@ func TestParallelJob(t *testing.T) { Succeeded: 3, } if tc.enableReadyPods { - want.Ready = pointer.Int32(0) + want.Ready = ptr.To[int32](0) } validateJobPodsStatus(ctx, t, clientSet, jobObj, want) validateFinishedPodsNoFinalizer(ctx, t, clientSet, jobObj) @@ -1469,8 +1468,8 @@ func TestParallelJobParallelism(t *testing.T) { jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{ Spec: batchv1.JobSpec{ - BackoffLimit: pointer.Int32(2), - Parallelism: pointer.Int32(5), + BackoffLimit: ptr.To[int32](2), + Parallelism: ptr.To[int32](5), }, }) if err != nil { @@ -1478,7 +1477,7 @@ func TestParallelJobParallelism(t *testing.T) { } validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 5, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) // Reduce parallelism by a number greater than backoffLimit. @@ -1489,7 +1488,7 @@ func TestParallelJobParallelism(t *testing.T) { } validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 2, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) // Increase parallelism again. @@ -1500,7 +1499,7 @@ func TestParallelJobParallelism(t *testing.T) { } validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 4, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) // Succeed Job @@ -1510,7 +1509,7 @@ func TestParallelJobParallelism(t *testing.T) { validateJobSucceeded(ctx, t, clientSet, jobObj) validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Succeeded: 4, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) validateFinishedPodsNoFinalizer(ctx, t, clientSet, jobObj) } @@ -1539,8 +1538,8 @@ func TestParallelJobWithCompletions(t *testing.T) { jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{ Spec: batchv1.JobSpec{ - Parallelism: pointer.Int32(54), - Completions: pointer.Int32(56), + Parallelism: ptr.To[int32](54), + Completions: ptr.To[int32](56), }, }) if err != nil { @@ -1548,7 +1547,7 @@ func TestParallelJobWithCompletions(t *testing.T) { } want := podsByStatus{Active: 54} if tc.enableReadyPods { - want.Ready = pointer.Int32(0) + want.Ready = ptr.To[int32](0) } validateJobPodsStatus(ctx, t, clientSet, jobObj, want) @@ -1557,7 +1556,7 @@ func TestParallelJobWithCompletions(t *testing.T) { t.Fatalf("Failed Marking Pods as ready: %v", err) } if tc.enableReadyPods { - want.Ready = pointer.Int32(52) + want.Ready = ptr.To[int32](52) } validateJobPodsStatus(ctx, t, clientSet, jobObj, want) @@ -1570,7 +1569,7 @@ func TestParallelJobWithCompletions(t *testing.T) { Failed: 2, } if tc.enableReadyPods { - want.Ready = pointer.Int32(50) + want.Ready = ptr.To[int32](50) } validateJobPodsStatus(ctx, t, clientSet, jobObj, want) // Pods are created until the number of succeeded Pods equals completions. @@ -1583,7 +1582,7 @@ func TestParallelJobWithCompletions(t *testing.T) { Active: 3, } if tc.enableReadyPods { - want.Ready = pointer.Int32(0) + want.Ready = ptr.To[int32](0) } validateJobPodsStatus(ctx, t, clientSet, jobObj, want) // No more Pods are created after the Job completes. @@ -1596,7 +1595,7 @@ func TestParallelJobWithCompletions(t *testing.T) { Succeeded: 56, } if tc.enableReadyPods { - want.Ready = pointer.Int32(0) + want.Ready = ptr.To[int32](0) } validateJobPodsStatus(ctx, t, clientSet, jobObj, want) validateFinishedPodsNoFinalizer(ctx, t, clientSet, jobObj) @@ -1615,8 +1614,8 @@ func TestIndexedJob(t *testing.T) { mode := batchv1.IndexedCompletion jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{ Spec: batchv1.JobSpec{ - Parallelism: pointer.Int32(3), - Completions: pointer.Int32(4), + Parallelism: ptr.To[int32](3), + Completions: ptr.To[int32](4), CompletionMode: &mode, }, }) @@ -1625,7 +1624,7 @@ func TestIndexedJob(t *testing.T) { } validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 3, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1, 2), "", nil) @@ -1636,7 +1635,7 @@ func TestIndexedJob(t *testing.T) { validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 3, Succeeded: 1, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 2, 3), "1", nil) @@ -1648,7 +1647,7 @@ func TestIndexedJob(t *testing.T) { Active: 3, Failed: 1, Succeeded: 1, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 2, 3), "1", nil) @@ -1660,7 +1659,7 @@ func TestIndexedJob(t *testing.T) { Active: 0, Failed: 1, Succeeded: 4, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) validateIndexedJobPods(ctx, t, clientSet, jobObj, nil, "0-3", nil) validateJobSucceeded(ctx, t, clientSet, jobObj) @@ -1676,8 +1675,8 @@ func TestJobPodReplacementPolicy(t *testing.T) { return &obj } jobSpecIndexedDefault := &batchv1.JobSpec{ - Parallelism: pointer.Int32Ptr(podCount), - Completions: pointer.Int32Ptr(podCount), + Parallelism: ptr.To(podCount), + Completions: ptr.To(podCount), CompletionMode: &indexedCompletion, } cases := map[string]struct { @@ -1699,49 +1698,49 @@ func TestJobPodReplacementPolicy(t *testing.T) { podReplacementPolicyEnabled: true, deletePods: true, jobSpec: jobSpecIndexedDefault, - wantTerminating: pointer.Int32(podCount), + wantTerminating: ptr.To(podCount), wantFailed: int(podCount), }, "feature flag true, delete pods, verify terminating status and recreate upon terminating": { podReplacementPolicyEnabled: true, deletePods: true, jobSpec: &batchv1.JobSpec{ - Parallelism: pointer.Int32Ptr(podCount), - Completions: pointer.Int32Ptr(podCount), + Parallelism: ptr.To(podCount), + Completions: ptr.To(podCount), CompletionMode: &indexedCompletion, PodReplacementPolicy: podReplacementPolicy(batchv1.TerminatingOrFailed), }, - wantTerminating: pointer.Int32(podCount), + wantTerminating: ptr.To(podCount), wantFailed: int(podCount), }, "feature flag true, delete pods, verify terminating status and recreate once failed": { podReplacementPolicyEnabled: true, deletePods: true, jobSpec: &batchv1.JobSpec{ - Parallelism: pointer.Int32Ptr(podCount), - Completions: pointer.Int32Ptr(podCount), - CompletionMode: &indexedCompletion, + Parallelism: ptr.To(podCount), + Completions: ptr.To(podCount), + CompletionMode: &nonIndexedCompletion, PodReplacementPolicy: podReplacementPolicy(batchv1.Failed), }, - wantTerminating: pointer.Int32(podCount), + wantTerminating: ptr.To(podCount), }, "feature flag true with NonIndexedJob, delete pods, verify terminating status and recreate once failed": { podReplacementPolicyEnabled: true, deletePods: true, jobSpec: &batchv1.JobSpec{ - Parallelism: pointer.Int32Ptr(podCount), - Completions: pointer.Int32Ptr(podCount), + Parallelism: ptr.To(podCount), + Completions: ptr.To(podCount), CompletionMode: &nonIndexedCompletion, PodReplacementPolicy: podReplacementPolicy(batchv1.Failed), }, - wantTerminating: pointer.Int32(podCount), + wantTerminating: ptr.To(podCount), }, "feature flag false, podFailurePolicy enabled, delete pods, verify terminating status and recreate once failed": { podReplacementPolicyEnabled: false, deletePods: true, jobSpec: &batchv1.JobSpec{ - Parallelism: pointer.Int32Ptr(podCount), - Completions: pointer.Int32Ptr(podCount), + Parallelism: ptr.To(podCount), + Completions: ptr.To(podCount), CompletionMode: &nonIndexedCompletion, PodReplacementPolicy: podReplacementPolicy(batchv1.Failed), PodFailurePolicy: &batchv1.PodFailurePolicy{ @@ -1840,7 +1839,7 @@ func TestJobPodReplacementPolicy(t *testing.T) { Terminating: tc.wantTerminating, Failed: tc.wantFailed, Active: tc.wantActive, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) }) } @@ -1865,7 +1864,7 @@ func TestElasticIndexedJob(t *testing.T) { "feature flag off, mutation not allowed": { jobUpdates: []jobUpdate{ { - completions: pointer.Int32Ptr(4), + completions: ptr.To[int32](4), }, }, wantErr: apierrors.NewInvalid( @@ -1879,7 +1878,7 @@ func TestElasticIndexedJob(t *testing.T) { jobUpdates: []jobUpdate{ { // Scale up completions 3->4 then succeed indexes 0-3 - completions: pointer.Int32Ptr(4), + completions: ptr.To[int32](4), succeedIndexes: []int{0, 1, 2, 3}, wantSucceededIndexes: "0-3", }, @@ -1900,7 +1899,7 @@ func TestElasticIndexedJob(t *testing.T) { // Scale down completions 3->1, verify prev failure out of range still counts // but succeeded out of range does not. { - completions: pointer.Int32Ptr(1), + completions: ptr.To[int32](1), succeedIndexes: []int{0}, wantSucceededIndexes: "0", wantFailed: 1, @@ -1919,13 +1918,13 @@ func TestElasticIndexedJob(t *testing.T) { }, // Scale completions down 3->2 to exclude previously succeeded index. { - completions: pointer.Int32Ptr(2), + completions: ptr.To[int32](2), wantRemainingIndexes: sets.New(0, 1), wantActivePods: 2, }, // Scale completions back up to include previously succeeded index that was temporarily out of range. { - completions: pointer.Int32Ptr(3), + completions: ptr.To[int32](3), succeedIndexes: []int{0, 1, 2}, wantSucceededIndexes: "0-2", }, @@ -1935,7 +1934,7 @@ func TestElasticIndexedJob(t *testing.T) { featureGate: true, jobUpdates: []jobUpdate{ { - completions: pointer.Int32Ptr(0), + completions: ptr.To[int32](0), }, }, }, @@ -1955,8 +1954,8 @@ func TestElasticIndexedJob(t *testing.T) { mode := batchv1.IndexedCompletion jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{ Spec: batchv1.JobSpec{ - Parallelism: pointer.Int32Ptr(initialCompletions), - Completions: pointer.Int32Ptr(initialCompletions), + Parallelism: ptr.To(initialCompletions), + Completions: ptr.To(initialCompletions), CompletionMode: &mode, }, }) @@ -1971,7 +1970,7 @@ func TestElasticIndexedJob(t *testing.T) { if err != nil { return false, err } - if job.Status.Active == int32(initialCompletions) { + if job.Status.Active == initialCompletions { return true, nil } return false, nil @@ -2012,7 +2011,7 @@ func TestElasticIndexedJob(t *testing.T) { Active: update.wantActivePods, Succeeded: len(update.succeedIndexes), Failed: update.wantFailed, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) validateIndexedJobPods(ctx, t, clientSet, jobObj, update.wantRemainingIndexes, update.wantSucceededIndexes, nil) } @@ -2048,8 +2047,8 @@ func BenchmarkLargeIndexedJob(b *testing.B) { Name: fmt.Sprintf("npods-%d-%d", nPods, n), }, Spec: batchv1.JobSpec{ - Parallelism: pointer.Int32(nPods), - Completions: pointer.Int32(nPods), + Parallelism: ptr.To(nPods), + Completions: ptr.To(nPods), CompletionMode: &mode, }, }) @@ -2121,7 +2120,7 @@ func TestOrphanPodsFinalizersClearedWithGC(t *testing.T) { jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{ Spec: batchv1.JobSpec{ - Parallelism: pointer.Int32(2), + Parallelism: ptr.To[int32](2), }, }) if err != nil { @@ -2129,7 +2128,7 @@ func TestOrphanPodsFinalizersClearedWithGC(t *testing.T) { } validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 2, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) // Delete Job. The GC should delete the pods in cascade. @@ -2162,9 +2161,9 @@ func TestFinalizersClearedWhenBackoffLimitExceeded(t *testing.T) { jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{ Spec: batchv1.JobSpec{ CompletionMode: &mode, - Completions: pointer.Int32(100), - Parallelism: pointer.Int32(100), - BackoffLimit: pointer.Int32(0), + Completions: ptr.To[int32](100), + Parallelism: ptr.To[int32](100), + BackoffLimit: ptr.To[int32](0), }, }) if err != nil { @@ -2200,7 +2199,7 @@ func TestJobPodsCreatedWithExponentialBackoff(t *testing.T) { } validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 1, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) // Fail the first pod @@ -2209,7 +2208,7 @@ func TestJobPodsCreatedWithExponentialBackoff(t *testing.T) { } validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 1, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), Failed: 1, }) @@ -2219,7 +2218,7 @@ func TestJobPodsCreatedWithExponentialBackoff(t *testing.T) { } validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 1, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), Failed: 2, }) @@ -2283,9 +2282,9 @@ func TestJobFailedWithInterrupts(t *testing.T) { }() jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{ Spec: batchv1.JobSpec{ - Completions: pointer.Int32(10), - Parallelism: pointer.Int32(10), - BackoffLimit: pointer.Int32(0), + Completions: ptr.To[int32](10), + Parallelism: ptr.To[int32](10), + BackoffLimit: ptr.To[int32](0), Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{ NodeName: "foo", // Scheduled pods are not deleted immediately. @@ -2298,7 +2297,7 @@ func TestJobFailedWithInterrupts(t *testing.T) { } validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 10, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) t.Log("Finishing pods") if err, _ := setJobPodsPhase(ctx, clientSet, jobObj, v1.PodFailed, 1); err != nil { @@ -2355,7 +2354,7 @@ func TestOrphanPodsFinalizersClearedOnRestart(t *testing.T) { jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{ Spec: batchv1.JobSpec{ - Parallelism: pointer.Int32(1), + Parallelism: ptr.To[int32](1), }, }) if err != nil { @@ -2363,7 +2362,7 @@ func TestOrphanPodsFinalizersClearedOnRestart(t *testing.T) { } validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ Active: 1, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) // Step 2: Delete the Job while the controller is stopped. @@ -2419,9 +2418,9 @@ func TestSuspendJob(t *testing.T) { parallelism := int32(2) job, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{ Spec: batchv1.JobSpec{ - Parallelism: pointer.Int32(parallelism), - Completions: pointer.Int32(4), - Suspend: pointer.BoolPtr(tc.create.flag), + Parallelism: ptr.To(parallelism), + Completions: ptr.To[int32](4), + Suspend: ptr.To(tc.create.flag), }, }) if err != nil { @@ -2431,7 +2430,7 @@ func TestSuspendJob(t *testing.T) { validate := func(s string, active int, status v1.ConditionStatus, reason string) { validateJobPodsStatus(ctx, t, clientSet, job, podsByStatus{ Active: active, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) job, err = clientSet.BatchV1().Jobs(ns.Name).Get(ctx, job.Name, metav1.GetOptions{}) if err != nil { @@ -2446,7 +2445,7 @@ func TestSuspendJob(t *testing.T) { } validate("create", tc.create.wantActive, tc.create.wantStatus, tc.create.wantReason) - job.Spec.Suspend = pointer.BoolPtr(tc.update.flag) + job.Spec.Suspend = ptr.To(tc.update.flag) job, err = clientSet.BatchV1().Jobs(ns.Name).Update(ctx, job, metav1.UpdateOptions{}) if err != nil { t.Fatalf("Failed to update Job: %v", err) @@ -2464,9 +2463,9 @@ func TestSuspendJobControllerRestart(t *testing.T) { job, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{ Spec: batchv1.JobSpec{ - Parallelism: pointer.Int32(2), - Completions: pointer.Int32(4), - Suspend: pointer.BoolPtr(true), + Parallelism: ptr.To[int32](2), + Completions: ptr.To[int32](4), + Suspend: ptr.To(true), }, }) if err != nil { @@ -2474,7 +2473,7 @@ func TestSuspendJobControllerRestart(t *testing.T) { } validateJobPodsStatus(ctx, t, clientSet, job, podsByStatus{ Active: 0, - Ready: pointer.Int32(0), + Ready: ptr.To[int32](0), }) } @@ -2485,8 +2484,8 @@ func TestNodeSelectorUpdate(t *testing.T) { defer cancel() job, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{Spec: batchv1.JobSpec{ - Parallelism: pointer.Int32(1), - Suspend: pointer.BoolPtr(true), + Parallelism: ptr.To[int32](1), + Suspend: ptr.To(true), }}) if err != nil { t.Fatalf("Failed to create Job: %v", err) @@ -2499,7 +2498,7 @@ func TestNodeSelectorUpdate(t *testing.T) { nodeSelector := map[string]string{"foo": "bar"} if _, err := updateJob(ctx, jobClient, jobName, func(j *batchv1.Job) { j.Spec.Template.Spec.NodeSelector = nodeSelector - j.Spec.Suspend = pointer.BoolPtr(false) + j.Spec.Suspend = ptr.To(false) }); err != nil { t.Errorf("Unexpected error: %v", err) } diff --git a/test/integration/podgc/podgc_test.go b/test/integration/podgc/podgc_test.go index 236e350762d..0f30bacf1be 100644 --- a/test/integration/podgc/podgc_test.go +++ b/test/integration/podgc/podgc_test.go @@ -34,7 +34,7 @@ import ( "k8s.io/kubernetes/pkg/controller/podgc" "k8s.io/kubernetes/pkg/features" testutils "k8s.io/kubernetes/test/integration/util" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) // TestPodGcOrphanedPodsWithFinalizer tests deletion of orphaned pods @@ -256,7 +256,7 @@ func TestTerminatingOnOutOfServiceNode(t *testing.T) { } // trigger termination of the pod, but with long grace period so that it is not removed immediately - err = cs.CoreV1().Pods(testCtx.NS.Name).Delete(testCtx.Ctx, pod.Name, metav1.DeleteOptions{GracePeriodSeconds: pointer.Int64(300)}) + err = cs.CoreV1().Pods(testCtx.NS.Name).Delete(testCtx.Ctx, pod.Name, metav1.DeleteOptions{GracePeriodSeconds: ptr.To[int64](300)}) if err != nil { t.Fatalf("Error: '%v' while deleting pod: '%v'", err, klog.KObj(pod)) } diff --git a/test/integration/replicaset/replicaset_test.go b/test/integration/replicaset/replicaset_test.go index 2c2d4510e18..1c70cbd1aaa 100644 --- a/test/integration/replicaset/replicaset_test.go +++ b/test/integration/replicaset/replicaset_test.go @@ -48,7 +48,7 @@ import ( "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/test/integration/framework" testutil "k8s.io/kubernetes/test/utils" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) const ( @@ -681,7 +681,7 @@ func TestPodDeletionCost(t *testing.T) { // Change RS's number of replics to 1 rsClient := c.AppsV1().ReplicaSets(ns.Name) updateRS(t, rsClient, rs.Name, func(rs *apps.ReplicaSet) { - rs.Spec.Replicas = pointer.Int32(1) + rs.Spec.Replicas = ptr.To[int32](1) }) // Poll until ReplicaSet is downscaled to 1. diff --git a/test/integration/util/util.go b/test/integration/util/util.go index e8d860b8ada..fbd8e902abe 100644 --- a/test/integration/util/util.go +++ b/test/integration/util/util.go @@ -67,7 +67,7 @@ import ( "k8s.io/kubernetes/test/integration/framework" imageutils "k8s.io/kubernetes/test/utils/image" "k8s.io/kubernetes/test/utils/ktesting" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) // ShutdownFunc represents the function handle to be called, typically in a defer handler, to shutdown a running module @@ -652,7 +652,7 @@ func InitTestSchedulerWithNS(t *testing.T, nsPrefix string, opts ...scheduler.Op func InitTestDisablePreemption(t *testing.T, nsPrefix string) *TestContext { cfg := configtesting.V1ToInternalWithDefaults(t, kubeschedulerconfigv1.KubeSchedulerConfiguration{ Profiles: []kubeschedulerconfigv1.KubeSchedulerProfile{{ - SchedulerName: pointer.String(v1.DefaultSchedulerName), + SchedulerName: ptr.To(v1.DefaultSchedulerName), Plugins: &kubeschedulerconfigv1.Plugins{ PostFilter: kubeschedulerconfigv1.PluginSet{ Disabled: []kubeschedulerconfigv1.Plugin{