Replace a function closure

Replace more closures with pointer conversion

Replace deprecated Int32Ptr to Int32
This commit is contained in:
Yuan Chen 2023-02-22 18:45:16 -08:00
parent 53b8170b97
commit a24aef6510
13 changed files with 66 additions and 65 deletions

View File

@ -50,6 +50,7 @@ import (
"k8s.io/kubernetes/test/utils/crd"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
"k8s.io/utils/pointer"
"github.com/onsi/ginkgo/v2"
)
@ -1992,7 +1993,7 @@ func newTestReplicationControllerForQuota(name, image string, replicas int32) *v
Name: name,
},
Spec: v1.ReplicationControllerSpec{
Replicas: func(i int32) *int32 { return &i }(replicas),
Replicas: pointer.Int32(replicas),
Selector: map[string]string{
"name": name,
},

View File

@ -372,17 +372,17 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
policyIgnore := admissionregistrationv1.Ignore
ginkgo.By("Setting timeout (1s) shorter than webhook latency (5s)")
slowWebhookCleanup := registerSlowWebhook(ctx, f, markersNamespaceName, f.UniqueName, certCtx, &policyFail, pointer.Int32Ptr(1), servicePort)
slowWebhookCleanup := registerSlowWebhook(ctx, f, markersNamespaceName, f.UniqueName, certCtx, &policyFail, pointer.Int32(1), servicePort)
testSlowWebhookTimeoutFailEarly(ctx, f)
slowWebhookCleanup(ctx)
ginkgo.By("Having no error when timeout is shorter than webhook latency and failure policy is ignore")
slowWebhookCleanup = registerSlowWebhook(ctx, f, markersNamespaceName, f.UniqueName, certCtx, &policyIgnore, pointer.Int32Ptr(1), servicePort)
slowWebhookCleanup = registerSlowWebhook(ctx, f, markersNamespaceName, f.UniqueName, certCtx, &policyIgnore, pointer.Int32(1), servicePort)
testSlowWebhookTimeoutNoError(ctx, f)
slowWebhookCleanup(ctx)
ginkgo.By("Having no error when timeout is longer than webhook latency")
slowWebhookCleanup = registerSlowWebhook(ctx, f, markersNamespaceName, f.UniqueName, certCtx, &policyFail, pointer.Int32Ptr(10), servicePort)
slowWebhookCleanup = registerSlowWebhook(ctx, f, markersNamespaceName, f.UniqueName, certCtx, &policyFail, pointer.Int32(10), servicePort)
testSlowWebhookTimeoutNoError(ctx, f)
slowWebhookCleanup(ctx)
@ -907,7 +907,7 @@ func registerWebhookForAttachingPod(ctx context.Context, f *framework.Framework,
Namespace: namespace,
Name: serviceName,
Path: strPtr("/pods/attach"),
Port: pointer.Int32Ptr(servicePort),
Port: pointer.Int32(servicePort),
},
CABundle: certCtx.signingCert,
},
@ -997,7 +997,7 @@ func registerMutatingWebhookForPod(ctx context.Context, f *framework.Framework,
Namespace: namespace,
Name: serviceName,
Path: strPtr("/mutating-pods"),
Port: pointer.Int32Ptr(servicePort),
Port: pointer.Int32(servicePort),
},
CABundle: certCtx.signingCert,
},
@ -1186,7 +1186,7 @@ func failingWebhook(namespace, name string, servicePort int32) admissionregistra
Namespace: namespace,
Name: serviceName,
Path: strPtr("/configmaps"),
Port: pointer.Int32Ptr(servicePort),
Port: pointer.Int32(servicePort),
},
// Without CA bundle, the call to webhook always fails
CABundle: nil,
@ -1293,7 +1293,7 @@ func registerValidatingWebhookForWebhookConfigurations(ctx context.Context, f *f
Namespace: namespace,
Name: serviceName,
Path: strPtr("/always-deny"),
Port: pointer.Int32Ptr(servicePort),
Port: pointer.Int32(servicePort),
},
CABundle: certCtx.signingCert,
},
@ -1351,7 +1351,7 @@ func registerMutatingWebhookForWebhookConfigurations(ctx context.Context, f *fra
Namespace: namespace,
Name: serviceName,
Path: strPtr("/add-label"),
Port: pointer.Int32Ptr(servicePort),
Port: pointer.Int32(servicePort),
},
CABundle: certCtx.signingCert,
},
@ -1411,7 +1411,7 @@ func testWebhooksForWebhookConfigurations(ctx context.Context, f *framework.Fram
// but because the failure policy is ignore, it will
// have no effect on admission requests.
Path: strPtr(""),
Port: pointer.Int32Ptr(servicePort),
Port: pointer.Int32(servicePort),
},
CABundle: nil,
},
@ -1467,7 +1467,7 @@ func testWebhooksForWebhookConfigurations(ctx context.Context, f *framework.Fram
// but because the failure policy is ignore, it will
// have no effect on admission requests.
Path: strPtr(""),
Port: pointer.Int32Ptr(servicePort),
Port: pointer.Int32(servicePort),
},
CABundle: nil,
},
@ -1663,7 +1663,7 @@ func registerWebhookForCustomResource(ctx context.Context, f *framework.Framewor
Namespace: namespace,
Name: serviceName,
Path: strPtr("/custom-resource"),
Port: pointer.Int32Ptr(servicePort),
Port: pointer.Int32(servicePort),
},
CABundle: certCtx.signingCert,
},
@ -1712,7 +1712,7 @@ func registerMutatingWebhookForCustomResource(ctx context.Context, f *framework.
Namespace: namespace,
Name: serviceName,
Path: strPtr("/mutating-custom-resource"),
Port: pointer.Int32Ptr(servicePort),
Port: pointer.Int32(servicePort),
},
CABundle: certCtx.signingCert,
},
@ -1738,7 +1738,7 @@ func registerMutatingWebhookForCustomResource(ctx context.Context, f *framework.
Namespace: namespace,
Name: serviceName,
Path: strPtr("/mutating-custom-resource"),
Port: pointer.Int32Ptr(servicePort),
Port: pointer.Int32(servicePort),
},
CABundle: certCtx.signingCert,
},
@ -1969,7 +1969,7 @@ func registerValidatingWebhookForCRD(ctx context.Context, f *framework.Framework
Namespace: namespace,
Name: serviceName,
Path: strPtr("/crd"),
Port: pointer.Int32Ptr(servicePort),
Port: pointer.Int32(servicePort),
},
CABundle: certCtx.signingCert,
},
@ -2094,7 +2094,7 @@ func registerSlowWebhook(ctx context.Context, f *framework.Framework, markersNam
Namespace: namespace,
Name: serviceName,
Path: strPtr("/always-allow-delay-5s"),
Port: pointer.Int32Ptr(servicePort),
Port: pointer.Int32(servicePort),
},
CABundle: certCtx.signingCert,
},
@ -2243,7 +2243,7 @@ func newDenyPodWebhookFixture(f *framework.Framework, certCtx *certContext, serv
Namespace: f.Namespace.Name,
Name: serviceName,
Path: strPtr("/pods"),
Port: pointer.Int32Ptr(servicePort),
Port: pointer.Int32(servicePort),
},
CABundle: certCtx.signingCert,
},
@ -2284,7 +2284,7 @@ func newDenyConfigMapWebhookFixture(f *framework.Framework, certCtx *certContext
Namespace: f.Namespace.Name,
Name: serviceName,
Path: strPtr("/configmaps"),
Port: pointer.Int32Ptr(servicePort),
Port: pointer.Int32(servicePort),
},
CABundle: certCtx.signingCert,
},
@ -2310,7 +2310,7 @@ func newMutateConfigMapWebhookFixture(f *framework.Framework, certCtx *certConte
Namespace: f.Namespace.Name,
Name: serviceName,
Path: strPtr("/mutating-configmaps"),
Port: pointer.Int32Ptr(servicePort),
Port: pointer.Int32(servicePort),
},
CABundle: certCtx.signingCert,
},
@ -2384,7 +2384,7 @@ func newValidatingIsReadyWebhookFixture(f *framework.Framework, certCtx *certCon
Namespace: f.Namespace.Name,
Name: serviceName,
Path: strPtr("/always-deny"),
Port: pointer.Int32Ptr(servicePort),
Port: pointer.Int32(servicePort),
},
CABundle: certCtx.signingCert,
},
@ -2423,7 +2423,7 @@ func newMutatingIsReadyWebhookFixture(f *framework.Framework, certCtx *certConte
Namespace: f.Namespace.Name,
Name: serviceName,
Path: strPtr("/always-deny"),
Port: pointer.Int32Ptr(servicePort),
Port: pointer.Int32(servicePort),
},
CABundle: certCtx.signingCert,
},

View File

@ -832,7 +832,7 @@ func testDeploymentCleanUpPolicy(ctx context.Context, f *framework.Framework) {
}
rsName := "test-cleanup-controller"
replicas := int32(1)
revisionHistoryLimit := utilpointer.Int32Ptr(0)
revisionHistoryLimit := utilpointer.Int32(0)
_, err := c.AppsV1().ReplicaSets(ns).Create(ctx, newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil), metav1.CreateOptions{})
framework.ExpectNoError(err)

View File

@ -46,6 +46,7 @@ import (
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
"github.com/onsi/gomega/format"
"k8s.io/utils/pointer"
)
var _ = SIGDescribe("ReplicationController", func() {
@ -458,7 +459,7 @@ func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageNa
Name: rsName,
},
Spec: v1.ReplicationControllerSpec{
Replicas: func(i int32) *int32 { return &i }(replicas),
Replicas: pointer.Int32(replicas),
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: rcPodLabels,

View File

@ -54,6 +54,7 @@ import (
e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
"k8s.io/utils/pointer"
)
const (
@ -322,10 +323,8 @@ var _ = SIGDescribe("StatefulSet", func() {
Type: appsv1.RollingUpdateStatefulSetStrategyType,
RollingUpdate: func() *appsv1.RollingUpdateStatefulSetStrategy {
return &appsv1.RollingUpdateStatefulSetStrategy{
Partition: func() *int32 {
i := int32(3)
return &i
}()}
Partition: pointer.Int32(3),
}
}(),
}
ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{})
@ -377,10 +376,8 @@ var _ = SIGDescribe("StatefulSet", func() {
Type: appsv1.RollingUpdateStatefulSetStrategyType,
RollingUpdate: func() *appsv1.RollingUpdateStatefulSetStrategy {
return &appsv1.RollingUpdateStatefulSetStrategy{
Partition: func() *int32 {
i := int32(2)
return &i
}()}
Partition: pointer.Int32(2),
}
}(),
}
ss, err = updateStatefulSetWithRetries(ctx, c, ns, ss.Name, func(update *appsv1.StatefulSet) {
@ -388,10 +385,8 @@ var _ = SIGDescribe("StatefulSet", func() {
Type: appsv1.RollingUpdateStatefulSetStrategyType,
RollingUpdate: func() *appsv1.RollingUpdateStatefulSetStrategy {
return &appsv1.RollingUpdateStatefulSetStrategy{
Partition: func() *int32 {
i := int32(2)
return &i
}()}
Partition: pointer.Int32(2),
}
}(),
}
})

View File

@ -31,6 +31,7 @@ import (
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
testutils "k8s.io/kubernetes/test/utils"
"k8s.io/utils/pointer"
)
// ByNameContainer returns a ReplicationController with specified name and container
@ -53,7 +54,7 @@ func ByNameContainer(name string, replicas int32, labels map[string]string, c v1
Name: name,
},
Spec: v1.ReplicationControllerSpec{
Replicas: func(i int32) *int32 { return &i }(replicas),
Replicas: pointer.Int32(replicas),
Selector: map[string]string{
"name": name,
},

View File

@ -33,6 +33,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
"k8s.io/utils/pointer"
)
// NewStatefulSet creates a new Webserver StatefulSet for testing. The StatefulSet is named name, is in namespace ns,
@ -70,7 +71,7 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Replicas: func(i int32) *int32 { return &i }(replicas),
Replicas: pointer.Int32(replicas),
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,

View File

@ -46,6 +46,7 @@ import (
corev1ac "k8s.io/client-go/applyconfigurations/core/v1"
metav1ac "k8s.io/client-go/applyconfigurations/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/utils/pointer"
"k8s.io/component-base/version"
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
@ -141,7 +142,7 @@ func TestAtomicPut(t *testing.T) {
},
},
Spec: v1.ReplicationControllerSpec{
Replicas: func(i int32) *int32 { return &i }(0),
Replicas: pointer.Int32(0),
Selector: map[string]string{
"foo": "bar",
},

View File

@ -271,7 +271,7 @@ func newTestLeaseWithDeprecatedLabels(acquireTime time.Time, namespace string) *
},
Spec: coordinationv1.LeaseSpec{
HolderIdentity: pointer.StringPtr(testLeaseName),
LeaseDurationSeconds: pointer.Int32Ptr(3600),
LeaseDurationSeconds: pointer.Int32(3600),
AcquireTime: &metav1.MicroTime{Time: acquireTime},
RenewTime: &metav1.MicroTime{Time: acquireTime},
},

View File

@ -875,7 +875,7 @@ func TestSpecReplicasChange(t *testing.T) {
var oldGeneration int64
tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) {
oldGeneration = update.Generation
update.Spec.RevisionHistoryLimit = pointer.Int32Ptr(4)
update.Spec.RevisionHistoryLimit = pointer.Int32(4)
})
if err != nil {
t.Fatalf("failed updating deployment %q: %v", tester.deployment.Name, err)
@ -904,7 +904,7 @@ func TestDeploymentAvailableCondition(t *testing.T) {
// Assign a high value to the deployment's minReadySeconds
tester.deployment.Spec.MinReadySeconds = 3600
// progressDeadlineSeconds must be greater than minReadySeconds
tester.deployment.Spec.ProgressDeadlineSeconds = pointer.Int32Ptr(7200)
tester.deployment.Spec.ProgressDeadlineSeconds = pointer.Int32(7200)
var err error
tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(context.TODO(), tester.deployment, metav1.CreateOptions{})
if err != nil {

View File

@ -747,7 +747,7 @@ func TestParallelJob(t *testing.T) {
jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{
Spec: batchv1.JobSpec{
Parallelism: pointer.Int32Ptr(5),
Parallelism: pointer.Int32(5),
},
})
if err != nil {
@ -755,7 +755,7 @@ func TestParallelJob(t *testing.T) {
}
want := podsByStatus{Active: 5}
if tc.enableReadyPods {
want.Ready = pointer.Int32Ptr(0)
want.Ready = pointer.Int32(0)
}
validateJobPodsStatus(ctx, t, clientSet, jobObj, want)
@ -833,7 +833,7 @@ func TestParallelJobParallelism(t *testing.T) {
jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{
Spec: batchv1.JobSpec{
BackoffLimit: pointer.Int32(2),
Parallelism: pointer.Int32Ptr(5),
Parallelism: pointer.Int32(5),
},
})
if err != nil {
@ -901,8 +901,8 @@ func TestParallelJobWithCompletions(t *testing.T) {
jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{
Spec: batchv1.JobSpec{
Parallelism: pointer.Int32Ptr(54),
Completions: pointer.Int32Ptr(56),
Parallelism: pointer.Int32(54),
Completions: pointer.Int32(56),
},
})
if err != nil {
@ -910,7 +910,7 @@ func TestParallelJobWithCompletions(t *testing.T) {
}
want := podsByStatus{Active: 54}
if tc.enableReadyPods {
want.Ready = pointer.Int32Ptr(0)
want.Ready = pointer.Int32(0)
}
validateJobPodsStatus(ctx, t, clientSet, jobObj, want)
@ -976,8 +976,8 @@ func TestIndexedJob(t *testing.T) {
mode := batchv1.IndexedCompletion
jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{
Spec: batchv1.JobSpec{
Parallelism: pointer.Int32Ptr(3),
Completions: pointer.Int32Ptr(4),
Parallelism: pointer.Int32(3),
Completions: pointer.Int32(4),
CompletionMode: &mode,
},
})
@ -1231,8 +1231,8 @@ func BenchmarkLargeIndexedJob(b *testing.B) {
Name: fmt.Sprintf("npods-%d-%d", nPods, n),
},
Spec: batchv1.JobSpec{
Parallelism: pointer.Int32Ptr(nPods),
Completions: pointer.Int32Ptr(nPods),
Parallelism: pointer.Int32(nPods),
Completions: pointer.Int32(nPods),
CompletionMode: &mode,
},
})
@ -1304,7 +1304,7 @@ func TestOrphanPodsFinalizersClearedWithGC(t *testing.T) {
jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{
Spec: batchv1.JobSpec{
Parallelism: pointer.Int32Ptr(2),
Parallelism: pointer.Int32(2),
},
})
if err != nil {
@ -1450,7 +1450,7 @@ func TestOrphanPodsFinalizersClearedOnRestart(t *testing.T) {
jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{
Spec: batchv1.JobSpec{
Parallelism: pointer.Int32Ptr(1),
Parallelism: pointer.Int32(1),
},
})
if err != nil {
@ -1514,8 +1514,8 @@ func TestSuspendJob(t *testing.T) {
parallelism := int32(2)
job, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{
Spec: batchv1.JobSpec{
Parallelism: pointer.Int32Ptr(parallelism),
Completions: pointer.Int32Ptr(4),
Parallelism: pointer.Int32(parallelism),
Completions: pointer.Int32(4),
Suspend: pointer.BoolPtr(tc.create.flag),
},
})
@ -1559,8 +1559,8 @@ func TestSuspendJobControllerRestart(t *testing.T) {
job, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{
Spec: batchv1.JobSpec{
Parallelism: pointer.Int32Ptr(2),
Completions: pointer.Int32Ptr(4),
Parallelism: pointer.Int32(2),
Completions: pointer.Int32(4),
Suspend: pointer.BoolPtr(true),
},
})
@ -1587,7 +1587,7 @@ func TestNodeSelectorUpdate(t *testing.T) {
defer cancel()
job, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{Spec: batchv1.JobSpec{
Parallelism: pointer.Int32Ptr(1),
Parallelism: pointer.Int32(1),
Suspend: pointer.BoolPtr(true),
}})
if err != nil {

View File

@ -678,7 +678,7 @@ func TestPodDeletionCost(t *testing.T) {
// Change RS's number of replics to 1
rsClient := c.AppsV1().ReplicaSets(ns.Name)
updateRS(t, rsClient, rs.Name, func(rs *apps.ReplicaSet) {
rs.Spec.Replicas = pointer.Int32Ptr(1)
rs.Spec.Replicas = pointer.Int32(1)
})
// Poll until ReplicaSet is downscaled to 1.

View File

@ -49,6 +49,7 @@ import (
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
api "k8s.io/kubernetes/pkg/apis/core"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/utils/pointer"
"k8s.io/klog/v2"
)
@ -318,7 +319,7 @@ func (config *DeploymentConfig) create() error {
Name: config.Name,
},
Spec: apps.DeploymentSpec{
Replicas: func(i int) *int32 { x := int32(i); return &x }(config.Replicas),
Replicas: pointer.Int32(int32(config.Replicas)),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"name": config.Name,
@ -404,7 +405,7 @@ func (config *ReplicaSetConfig) create() error {
Name: config.Name,
},
Spec: apps.ReplicaSetSpec{
Replicas: func(i int) *int32 { x := int32(i); return &x }(config.Replicas),
Replicas: pointer.Int32(int32(config.Replicas)),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"name": config.Name,
@ -486,8 +487,8 @@ func (config *JobConfig) create() error {
Name: config.Name,
},
Spec: batch.JobSpec{
Parallelism: func(i int) *int32 { x := int32(i); return &x }(config.Replicas),
Completions: func(i int) *int32 { x := int32(i); return &x }(config.Replicas),
Parallelism: pointer.Int32(int32(config.Replicas)),
Completions: pointer.Int32(int32(config.Replicas)),
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"name": config.Name},
@ -598,7 +599,7 @@ func (config *RCConfig) create() error {
Name: config.Name,
},
Spec: v1.ReplicationControllerSpec{
Replicas: func(i int) *int32 { x := int32(i); return &x }(config.Replicas),
Replicas: pointer.Int32(int32(config.Replicas)),
Selector: map[string]string{
"name": config.Name,
},
@ -1458,7 +1459,7 @@ func createController(client clientset.Interface, controllerName, namespace stri
Name: controllerName,
},
Spec: v1.ReplicationControllerSpec{
Replicas: func(i int) *int32 { x := int32(i); return &x }(podCount),
Replicas: pointer.Int32(int32(podCount)),
Selector: map[string]string{"name": controllerName},
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{