From 99903071469e7de882f8e7baef6237edc8f39d84 Mon Sep 17 00:00:00 2001 From: Stephen Kitt Date: Mon, 11 Sep 2023 16:40:18 +0200 Subject: [PATCH] kube-scheduler: drop deprecated pointer package This replaces deprecated k8s.io/utils/pointer functions with their ptr equivalent. Signed-off-by: Stephen Kitt --- .../app/options/options_test.go | 4 +- .../apis/config/scheme/scheme_test.go | 8 +- .../apis/config/v1/default_plugins.go | 16 +- .../apis/config/v1/default_plugins_test.go | 72 +++---- pkg/scheduler/apis/config/v1/defaults.go | 24 +-- pkg/scheduler/apis/config/v1/defaults_test.go | 184 +++++++++--------- .../apis/config/validation/validation_test.go | 14 +- .../plugins/nodevolumelimits/csi_test.go | 4 +- .../plugins/nodevolumelimits/non_csi_test.go | 6 +- .../podtopologyspread/filtering_test.go | 10 +- .../plugins/podtopologyspread/scoring_test.go | 46 ++--- .../plugins/volumebinding/test_utils.go | 4 +- .../framework/runtime/framework_test.go | 6 +- .../metrics/resources/resources_test.go | 7 +- pkg/scheduler/schedule_one_test.go | 14 +- pkg/scheduler/scheduler_test.go | 4 +- pkg/scheduler/testing/wrappers.go | 10 +- 17 files changed, 215 insertions(+), 218 deletions(-) diff --git a/cmd/kube-scheduler/app/options/options_test.go b/cmd/kube-scheduler/app/options/options_test.go index d46d04053e0..cb721e39b83 100644 --- a/cmd/kube-scheduler/app/options/options_test.go +++ b/cmd/kube-scheduler/app/options/options_test.go @@ -41,7 +41,7 @@ import ( configtesting "k8s.io/kubernetes/pkg/scheduler/apis/config/testing" "k8s.io/kubernetes/pkg/scheduler/apis/config/testing/defaults" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func TestSchedulerOptions(t *testing.T) { @@ -279,7 +279,7 @@ profiles: defaultPodInitialBackoffSeconds := int64(1) defaultPodMaxBackoffSeconds := int64(10) - defaultPercentageOfNodesToScore := pointer.Int32(0) + defaultPercentageOfNodesToScore := ptr.To[int32](0) testcases := []struct { name string diff --git a/pkg/scheduler/apis/config/scheme/scheme_test.go b/pkg/scheduler/apis/config/scheme/scheme_test.go index f1278c4e3bf..05d684ca668 100644 --- a/pkg/scheduler/apis/config/scheme/scheme_test.go +++ b/pkg/scheduler/apis/config/scheme/scheme_test.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/kube-scheduler/config/v1" "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/apis/config/testing/defaults" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/yaml" ) @@ -196,7 +196,7 @@ profiles: wantProfiles: []config.KubeSchedulerProfile{ { SchedulerName: "default-scheduler", - PercentageOfNodesToScore: pointer.Int32(20), + PercentageOfNodesToScore: ptr.To[int32](20), Plugins: defaults.PluginsV1, PluginConfig: defaults.PluginConfigsV1, }, @@ -525,7 +525,7 @@ func TestCodecsEncodePluginConfig(t *testing.T) { Name: "InterPodAffinity", Args: runtime.RawExtension{ Object: &v1.InterPodAffinityArgs{ - HardPodAffinityWeight: pointer.Int32(5), + HardPodAffinityWeight: ptr.To[int32](5), }, }, }, @@ -533,7 +533,7 @@ func TestCodecsEncodePluginConfig(t *testing.T) { Name: "VolumeBinding", Args: runtime.RawExtension{ Object: &v1.VolumeBindingArgs{ - BindTimeoutSeconds: pointer.Int64(300), + BindTimeoutSeconds: ptr.To[int64](300), Shape: []v1.UtilizationShapePoint{ { Utilization: 0, diff --git a/pkg/scheduler/apis/config/v1/default_plugins.go b/pkg/scheduler/apis/config/v1/default_plugins.go index a7d5a602619..509cb57a274 100644 --- a/pkg/scheduler/apis/config/v1/default_plugins.go +++ b/pkg/scheduler/apis/config/v1/default_plugins.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/kube-scheduler/config/v1" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) // getDefaultPlugins returns the default set of plugins. @@ -34,10 +34,10 @@ func getDefaultPlugins() *v1.Plugins { {Name: names.PrioritySort}, {Name: names.NodeUnschedulable}, {Name: names.NodeName}, - {Name: names.TaintToleration, Weight: pointer.Int32(3)}, - {Name: names.NodeAffinity, Weight: pointer.Int32(2)}, + {Name: names.TaintToleration, Weight: ptr.To[int32](3)}, + {Name: names.NodeAffinity, Weight: ptr.To[int32](2)}, {Name: names.NodePorts}, - {Name: names.NodeResourcesFit, Weight: pointer.Int32(1)}, + {Name: names.NodeResourcesFit, Weight: ptr.To[int32](1)}, {Name: names.VolumeRestrictions}, {Name: names.EBSLimits}, {Name: names.GCEPDLimits}, @@ -45,11 +45,11 @@ func getDefaultPlugins() *v1.Plugins { {Name: names.AzureDiskLimits}, {Name: names.VolumeBinding}, {Name: names.VolumeZone}, - {Name: names.PodTopologySpread, Weight: pointer.Int32(2)}, - {Name: names.InterPodAffinity, Weight: pointer.Int32(2)}, + {Name: names.PodTopologySpread, Weight: ptr.To[int32](2)}, + {Name: names.InterPodAffinity, Weight: ptr.To[int32](2)}, {Name: names.DefaultPreemption}, - {Name: names.NodeResourcesBalancedAllocation, Weight: pointer.Int32(1)}, - {Name: names.ImageLocality, Weight: pointer.Int32(1)}, + {Name: names.NodeResourcesBalancedAllocation, Weight: ptr.To[int32](1)}, + {Name: names.ImageLocality, Weight: ptr.To[int32](1)}, {Name: names.DefaultBinder}, }, }, diff --git a/pkg/scheduler/apis/config/v1/default_plugins_test.go b/pkg/scheduler/apis/config/v1/default_plugins_test.go index a56a4306ea4..f05d5243400 100644 --- a/pkg/scheduler/apis/config/v1/default_plugins_test.go +++ b/pkg/scheduler/apis/config/v1/default_plugins_test.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/kube-scheduler/config/v1" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func TestApplyFeatureGates(t *testing.T) { @@ -47,10 +47,10 @@ func TestApplyFeatureGates(t *testing.T) { {Name: names.PrioritySort}, {Name: names.NodeUnschedulable}, {Name: names.NodeName}, - {Name: names.TaintToleration, Weight: pointer.Int32(3)}, - {Name: names.NodeAffinity, Weight: pointer.Int32(2)}, + {Name: names.TaintToleration, Weight: ptr.To[int32](3)}, + {Name: names.NodeAffinity, Weight: ptr.To[int32](2)}, {Name: names.NodePorts}, - {Name: names.NodeResourcesFit, Weight: pointer.Int32(1)}, + {Name: names.NodeResourcesFit, Weight: ptr.To[int32](1)}, {Name: names.VolumeRestrictions}, {Name: names.EBSLimits}, {Name: names.GCEPDLimits}, @@ -58,11 +58,11 @@ func TestApplyFeatureGates(t *testing.T) { {Name: names.AzureDiskLimits}, {Name: names.VolumeBinding}, {Name: names.VolumeZone}, - {Name: names.PodTopologySpread, Weight: pointer.Int32(2)}, - {Name: names.InterPodAffinity, Weight: pointer.Int32(2)}, + {Name: names.PodTopologySpread, Weight: ptr.To[int32](2)}, + {Name: names.InterPodAffinity, Weight: ptr.To[int32](2)}, {Name: names.DefaultPreemption}, - {Name: names.NodeResourcesBalancedAllocation, Weight: pointer.Int32(1)}, - {Name: names.ImageLocality, Weight: pointer.Int32(1)}, + {Name: names.NodeResourcesBalancedAllocation, Weight: ptr.To[int32](1)}, + {Name: names.ImageLocality, Weight: ptr.To[int32](1)}, {Name: names.DefaultBinder}, }, }, @@ -79,10 +79,10 @@ func TestApplyFeatureGates(t *testing.T) { {Name: names.PrioritySort}, {Name: names.NodeUnschedulable}, {Name: names.NodeName}, - {Name: names.TaintToleration, Weight: pointer.Int32(3)}, - {Name: names.NodeAffinity, Weight: pointer.Int32(2)}, + {Name: names.TaintToleration, Weight: ptr.To[int32](3)}, + {Name: names.NodeAffinity, Weight: ptr.To[int32](2)}, {Name: names.NodePorts}, - {Name: names.NodeResourcesFit, Weight: pointer.Int32(1)}, + {Name: names.NodeResourcesFit, Weight: ptr.To[int32](1)}, {Name: names.VolumeRestrictions}, {Name: names.EBSLimits}, {Name: names.GCEPDLimits}, @@ -90,11 +90,11 @@ func TestApplyFeatureGates(t *testing.T) { {Name: names.AzureDiskLimits}, {Name: names.VolumeBinding}, {Name: names.VolumeZone}, - {Name: names.PodTopologySpread, Weight: pointer.Int32(2)}, - {Name: names.InterPodAffinity, Weight: pointer.Int32(2)}, + {Name: names.PodTopologySpread, Weight: ptr.To[int32](2)}, + {Name: names.InterPodAffinity, Weight: ptr.To[int32](2)}, {Name: names.DefaultPreemption}, - {Name: names.NodeResourcesBalancedAllocation, Weight: pointer.Int32(1)}, - {Name: names.ImageLocality, Weight: pointer.Int32(1)}, + {Name: names.NodeResourcesBalancedAllocation, Weight: ptr.To[int32](1)}, + {Name: names.ImageLocality, Weight: ptr.To[int32](1)}, {Name: names.DefaultBinder}, {Name: names.SchedulingGates}, }, @@ -112,10 +112,10 @@ func TestApplyFeatureGates(t *testing.T) { {Name: names.PrioritySort}, {Name: names.NodeUnschedulable}, {Name: names.NodeName}, - {Name: names.TaintToleration, Weight: pointer.Int32(3)}, - {Name: names.NodeAffinity, Weight: pointer.Int32(2)}, + {Name: names.TaintToleration, Weight: ptr.To[int32](3)}, + {Name: names.NodeAffinity, Weight: ptr.To[int32](2)}, {Name: names.NodePorts}, - {Name: names.NodeResourcesFit, Weight: pointer.Int32(1)}, + {Name: names.NodeResourcesFit, Weight: ptr.To[int32](1)}, {Name: names.VolumeRestrictions}, {Name: names.EBSLimits}, {Name: names.GCEPDLimits}, @@ -123,12 +123,12 @@ func TestApplyFeatureGates(t *testing.T) { {Name: names.AzureDiskLimits}, {Name: names.VolumeBinding}, {Name: names.VolumeZone}, - {Name: names.PodTopologySpread, Weight: pointer.Int32(2)}, - {Name: names.InterPodAffinity, Weight: pointer.Int32(2)}, + {Name: names.PodTopologySpread, Weight: ptr.To[int32](2)}, + {Name: names.InterPodAffinity, Weight: ptr.To[int32](2)}, {Name: names.DynamicResources}, {Name: names.DefaultPreemption}, - {Name: names.NodeResourcesBalancedAllocation, Weight: pointer.Int32(1)}, - {Name: names.ImageLocality, Weight: pointer.Int32(1)}, + {Name: names.NodeResourcesBalancedAllocation, Weight: ptr.To[int32](1)}, + {Name: names.ImageLocality, Weight: ptr.To[int32](1)}, {Name: names.DefaultBinder}, {Name: names.SchedulingGates}, }, @@ -312,8 +312,8 @@ func TestMergePlugins(t *testing.T) { customPlugins: &v1.Plugins{ Filter: v1.PluginSet{ Enabled: []v1.Plugin{ - {Name: "Plugin1", Weight: pointer.Int32(2)}, - {Name: "Plugin3", Weight: pointer.Int32(3)}, + {Name: "Plugin1", Weight: ptr.To[int32](2)}, + {Name: "Plugin3", Weight: ptr.To[int32](3)}, }, }, }, @@ -329,9 +329,9 @@ func TestMergePlugins(t *testing.T) { expectedPlugins: &v1.Plugins{ Filter: v1.PluginSet{ Enabled: []v1.Plugin{ - {Name: "Plugin1", Weight: pointer.Int32(2)}, + {Name: "Plugin1", Weight: ptr.To[int32](2)}, {Name: "Plugin2"}, - {Name: "Plugin3", Weight: pointer.Int32(3)}, + {Name: "Plugin3", Weight: ptr.To[int32](3)}, }, }, }, @@ -341,8 +341,8 @@ func TestMergePlugins(t *testing.T) { customPlugins: &v1.Plugins{ Filter: v1.PluginSet{ Enabled: []v1.Plugin{ - {Name: "Plugin2", Weight: pointer.Int32(2)}, - {Name: "Plugin1", Weight: pointer.Int32(1)}, + {Name: "Plugin2", Weight: ptr.To[int32](2)}, + {Name: "Plugin1", Weight: ptr.To[int32](1)}, }, }, }, @@ -358,8 +358,8 @@ func TestMergePlugins(t *testing.T) { expectedPlugins: &v1.Plugins{ Filter: v1.PluginSet{ Enabled: []v1.Plugin{ - {Name: "Plugin1", Weight: pointer.Int32(1)}, - {Name: "Plugin2", Weight: pointer.Int32(2)}, + {Name: "Plugin1", Weight: ptr.To[int32](1)}, + {Name: "Plugin2", Weight: ptr.To[int32](2)}, {Name: "Plugin3"}, }, }, @@ -371,9 +371,9 @@ func TestMergePlugins(t *testing.T) { Filter: v1.PluginSet{ Enabled: []v1.Plugin{ {Name: "Plugin1"}, - {Name: "Plugin2", Weight: pointer.Int32(2)}, + {Name: "Plugin2", Weight: ptr.To[int32](2)}, {Name: "Plugin3"}, - {Name: "Plugin2", Weight: pointer.Int32(4)}, + {Name: "Plugin2", Weight: ptr.To[int32](4)}, }, }, }, @@ -390,9 +390,9 @@ func TestMergePlugins(t *testing.T) { Filter: v1.PluginSet{ Enabled: []v1.Plugin{ {Name: "Plugin1"}, - {Name: "Plugin2", Weight: pointer.Int32(4)}, + {Name: "Plugin2", Weight: ptr.To[int32](4)}, {Name: "Plugin3"}, - {Name: "Plugin2", Weight: pointer.Int32(2)}, + {Name: "Plugin2", Weight: ptr.To[int32](2)}, }, }, }, @@ -473,7 +473,7 @@ func TestMergePlugins(t *testing.T) { customPlugins: &v1.Plugins{ MultiPoint: v1.PluginSet{ Enabled: []v1.Plugin{ - {Name: "DefaultPlugin", Weight: pointer.Int32(5)}, + {Name: "DefaultPlugin", Weight: ptr.To[int32](5)}, }, }, }, @@ -487,7 +487,7 @@ func TestMergePlugins(t *testing.T) { expectedPlugins: &v1.Plugins{ MultiPoint: v1.PluginSet{ Enabled: []v1.Plugin{ - {Name: "DefaultPlugin", Weight: pointer.Int32(5)}, + {Name: "DefaultPlugin", Weight: ptr.To[int32](5)}, }, }, }, diff --git a/pkg/scheduler/apis/config/v1/defaults.go b/pkg/scheduler/apis/config/v1/defaults.go index 6746f23a962..c3775adcfe2 100644 --- a/pkg/scheduler/apis/config/v1/defaults.go +++ b/pkg/scheduler/apis/config/v1/defaults.go @@ -26,7 +26,7 @@ import ( configv1 "k8s.io/kube-scheduler/config/v1" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var defaultResourceSpec = []configv1.ResourceSpec{ @@ -105,7 +105,7 @@ func setDefaults_KubeSchedulerProfile(logger klog.Logger, prof *configv1.KubeSch func SetDefaults_KubeSchedulerConfiguration(obj *configv1.KubeSchedulerConfiguration) { logger := klog.TODO() // called by generated code that doesn't pass a logger. See #115724 if obj.Parallelism == nil { - obj.Parallelism = pointer.Int32(16) + obj.Parallelism = ptr.To[int32](16) } if len(obj.Profiles) == 0 { @@ -114,7 +114,7 @@ func SetDefaults_KubeSchedulerConfiguration(obj *configv1.KubeSchedulerConfigura // Only apply a default scheduler name when there is a single profile. // Validation will ensure that every profile has a non-empty unique name. if len(obj.Profiles) == 1 && obj.Profiles[0].SchedulerName == nil { - obj.Profiles[0].SchedulerName = pointer.String(v1.DefaultSchedulerName) + obj.Profiles[0].SchedulerName = ptr.To(v1.DefaultSchedulerName) } // Add the default set of plugins and apply the configuration. @@ -124,7 +124,7 @@ func SetDefaults_KubeSchedulerConfiguration(obj *configv1.KubeSchedulerConfigura } if obj.PercentageOfNodesToScore == nil { - obj.PercentageOfNodesToScore = pointer.Int32(config.DefaultPercentageOfNodesToScore) + obj.PercentageOfNodesToScore = ptr.To[int32](config.DefaultPercentageOfNodesToScore) } if len(obj.LeaderElection.ResourceLock) == 0 { @@ -155,42 +155,42 @@ func SetDefaults_KubeSchedulerConfiguration(obj *configv1.KubeSchedulerConfigura componentbaseconfigv1alpha1.RecommendedDefaultLeaderElectionConfiguration(&obj.LeaderElection) if obj.PodInitialBackoffSeconds == nil { - obj.PodInitialBackoffSeconds = pointer.Int64(1) + obj.PodInitialBackoffSeconds = ptr.To[int64](1) } if obj.PodMaxBackoffSeconds == nil { - obj.PodMaxBackoffSeconds = pointer.Int64(10) + obj.PodMaxBackoffSeconds = ptr.To[int64](10) } // Enable profiling by default in the scheduler if obj.EnableProfiling == nil { - obj.EnableProfiling = pointer.Bool(true) + obj.EnableProfiling = ptr.To(true) } // Enable contention profiling by default if profiling is enabled if *obj.EnableProfiling && obj.EnableContentionProfiling == nil { - obj.EnableContentionProfiling = pointer.Bool(true) + obj.EnableContentionProfiling = ptr.To(true) } } func SetDefaults_DefaultPreemptionArgs(obj *configv1.DefaultPreemptionArgs) { if obj.MinCandidateNodesPercentage == nil { - obj.MinCandidateNodesPercentage = pointer.Int32(10) + obj.MinCandidateNodesPercentage = ptr.To[int32](10) } if obj.MinCandidateNodesAbsolute == nil { - obj.MinCandidateNodesAbsolute = pointer.Int32(100) + obj.MinCandidateNodesAbsolute = ptr.To[int32](100) } } func SetDefaults_InterPodAffinityArgs(obj *configv1.InterPodAffinityArgs) { if obj.HardPodAffinityWeight == nil { - obj.HardPodAffinityWeight = pointer.Int32(1) + obj.HardPodAffinityWeight = ptr.To[int32](1) } } func SetDefaults_VolumeBindingArgs(obj *configv1.VolumeBindingArgs) { if obj.BindTimeoutSeconds == nil { - obj.BindTimeoutSeconds = pointer.Int64(600) + obj.BindTimeoutSeconds = ptr.To[int64](600) } if len(obj.Shape) == 0 && feature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority) { obj.Shape = []configv1.UtilizationShapePoint{ diff --git a/pkg/scheduler/apis/config/v1/defaults_test.go b/pkg/scheduler/apis/config/v1/defaults_test.go index d2861e8f640..354737817c1 100644 --- a/pkg/scheduler/apis/config/v1/defaults_test.go +++ b/pkg/scheduler/apis/config/v1/defaults_test.go @@ -34,7 +34,7 @@ import ( "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var pluginConfigs = []configv1.PluginConfig{ @@ -46,8 +46,8 @@ var pluginConfigs = []configv1.PluginConfig{ Kind: "DefaultPreemptionArgs", APIVersion: "kubescheduler.config.k8s.io/v1", }, - MinCandidateNodesPercentage: pointer.Int32(10), - MinCandidateNodesAbsolute: pointer.Int32(100), + MinCandidateNodesPercentage: ptr.To[int32](10), + MinCandidateNodesAbsolute: ptr.To[int32](100), }}, }, { @@ -58,7 +58,7 @@ var pluginConfigs = []configv1.PluginConfig{ Kind: "InterPodAffinityArgs", APIVersion: "kubescheduler.config.k8s.io/v1", }, - HardPodAffinityWeight: pointer.Int32(1), + HardPodAffinityWeight: ptr.To[int32](1), }}, }, { @@ -110,7 +110,7 @@ var pluginConfigs = []configv1.PluginConfig{ Kind: "VolumeBindingArgs", APIVersion: "kubescheduler.config.k8s.io/v1", }, - BindTimeoutSeconds: pointer.Int64(600), + BindTimeoutSeconds: ptr.To[int64](600), }}, }, } @@ -126,13 +126,13 @@ func TestSchedulerDefaults(t *testing.T) { name: "empty config", config: &configv1.KubeSchedulerConfiguration{}, expected: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(16), + Parallelism: ptr.To[int32](16), DebuggingConfiguration: componentbaseconfig.DebuggingConfiguration{ EnableProfiling: &enable, EnableContentionProfiling: &enable, }, LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ - LeaderElect: pointer.Bool(true), + LeaderElect: ptr.To(true), LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, @@ -145,14 +145,14 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), - PodInitialBackoffSeconds: pointer.Int64(1), - PodMaxBackoffSeconds: pointer.Int64(10), + PercentageOfNodesToScore: ptr.To[int32](config.DefaultPercentageOfNodesToScore), + PodInitialBackoffSeconds: ptr.To[int64](1), + PodMaxBackoffSeconds: ptr.To[int64](10), Profiles: []configv1.KubeSchedulerProfile{ { Plugins: getDefaultPlugins(), PluginConfig: pluginConfigs, - SchedulerName: pointer.String("default-scheduler"), + SchedulerName: ptr.To("default-scheduler"), }, }, }, @@ -163,13 +163,13 @@ func TestSchedulerDefaults(t *testing.T) { Profiles: []configv1.KubeSchedulerProfile{{}}, }, expected: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(16), + Parallelism: ptr.To[int32](16), DebuggingConfiguration: componentbaseconfig.DebuggingConfiguration{ EnableProfiling: &enable, EnableContentionProfiling: &enable, }, LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ - LeaderElect: pointer.Bool(true), + LeaderElect: ptr.To(true), LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, @@ -182,12 +182,12 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), - PodInitialBackoffSeconds: pointer.Int64(1), - PodMaxBackoffSeconds: pointer.Int64(10), + PercentageOfNodesToScore: ptr.To[int32](config.DefaultPercentageOfNodesToScore), + PodInitialBackoffSeconds: ptr.To[int64](1), + PodMaxBackoffSeconds: ptr.To[int64](10), Profiles: []configv1.KubeSchedulerProfile{ { - SchedulerName: pointer.String("default-scheduler"), + SchedulerName: ptr.To("default-scheduler"), Plugins: getDefaultPlugins(), PluginConfig: pluginConfigs}, }, @@ -196,7 +196,7 @@ func TestSchedulerDefaults(t *testing.T) { { name: "two profiles", config: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(16), + Parallelism: ptr.To[int32](16), Profiles: []configv1.KubeSchedulerProfile{ { PluginConfig: []configv1.PluginConfig{ @@ -204,7 +204,7 @@ func TestSchedulerDefaults(t *testing.T) { }, }, { - SchedulerName: pointer.String("custom-scheduler"), + SchedulerName: ptr.To("custom-scheduler"), Plugins: &configv1.Plugins{ Bind: configv1.PluginSet{ Enabled: []configv1.Plugin{ @@ -219,13 +219,13 @@ func TestSchedulerDefaults(t *testing.T) { }, }, expected: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(16), + Parallelism: ptr.To[int32](16), DebuggingConfiguration: componentbaseconfig.DebuggingConfiguration{ EnableProfiling: &enable, EnableContentionProfiling: &enable, }, LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ - LeaderElect: pointer.Bool(true), + LeaderElect: ptr.To(true), LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, @@ -238,9 +238,9 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), - PodInitialBackoffSeconds: pointer.Int64(1), - PodMaxBackoffSeconds: pointer.Int64(10), + PercentageOfNodesToScore: ptr.To[int32](config.DefaultPercentageOfNodesToScore), + PodInitialBackoffSeconds: ptr.To[int64](1), + PodMaxBackoffSeconds: ptr.To[int64](10), Profiles: []configv1.KubeSchedulerProfile{ { Plugins: getDefaultPlugins(), @@ -254,8 +254,8 @@ func TestSchedulerDefaults(t *testing.T) { Kind: "DefaultPreemptionArgs", APIVersion: "kubescheduler.config.k8s.io/v1", }, - MinCandidateNodesPercentage: pointer.Int32(10), - MinCandidateNodesAbsolute: pointer.Int32(100), + MinCandidateNodesPercentage: ptr.To[int32](10), + MinCandidateNodesAbsolute: ptr.To[int32](100), }}, }, { @@ -266,7 +266,7 @@ func TestSchedulerDefaults(t *testing.T) { Kind: "InterPodAffinityArgs", APIVersion: "kubescheduler.config.k8s.io/v1", }, - HardPodAffinityWeight: pointer.Int32(1), + HardPodAffinityWeight: ptr.To[int32](1), }}, }, { @@ -318,23 +318,23 @@ func TestSchedulerDefaults(t *testing.T) { Kind: "VolumeBindingArgs", APIVersion: "kubescheduler.config.k8s.io/v1", }, - BindTimeoutSeconds: pointer.Int64(600), + BindTimeoutSeconds: ptr.To[int64](600), }}, }, }, }, { - SchedulerName: pointer.String("custom-scheduler"), + SchedulerName: ptr.To("custom-scheduler"), Plugins: &configv1.Plugins{ MultiPoint: configv1.PluginSet{ Enabled: []configv1.Plugin{ {Name: names.PrioritySort}, {Name: names.NodeUnschedulable}, {Name: names.NodeName}, - {Name: names.TaintToleration, Weight: pointer.Int32(3)}, - {Name: names.NodeAffinity, Weight: pointer.Int32(2)}, + {Name: names.TaintToleration, Weight: ptr.To[int32](3)}, + {Name: names.NodeAffinity, Weight: ptr.To[int32](2)}, {Name: names.NodePorts}, - {Name: names.NodeResourcesFit, Weight: pointer.Int32(1)}, + {Name: names.NodeResourcesFit, Weight: ptr.To[int32](1)}, {Name: names.VolumeRestrictions}, {Name: names.EBSLimits}, {Name: names.GCEPDLimits}, @@ -342,11 +342,11 @@ func TestSchedulerDefaults(t *testing.T) { {Name: names.AzureDiskLimits}, {Name: names.VolumeBinding}, {Name: names.VolumeZone}, - {Name: names.PodTopologySpread, Weight: pointer.Int32(2)}, - {Name: names.InterPodAffinity, Weight: pointer.Int32(2)}, + {Name: names.PodTopologySpread, Weight: ptr.To[int32](2)}, + {Name: names.InterPodAffinity, Weight: ptr.To[int32](2)}, {Name: names.DefaultPreemption}, - {Name: names.NodeResourcesBalancedAllocation, Weight: pointer.Int32(1)}, - {Name: names.ImageLocality, Weight: pointer.Int32(1)}, + {Name: names.NodeResourcesBalancedAllocation, Weight: ptr.To[int32](1)}, + {Name: names.ImageLocality, Weight: ptr.To[int32](1)}, {Name: names.DefaultBinder}, {Name: names.SchedulingGates}, }, @@ -368,16 +368,16 @@ func TestSchedulerDefaults(t *testing.T) { { name: "Prallelism with no port", config: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(16), + Parallelism: ptr.To[int32](16), }, expected: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(16), + Parallelism: ptr.To[int32](16), DebuggingConfiguration: componentbaseconfig.DebuggingConfiguration{ EnableProfiling: &enable, EnableContentionProfiling: &enable, }, LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ - LeaderElect: pointer.Bool(true), + LeaderElect: ptr.To(true), LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, @@ -390,14 +390,14 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), - PodInitialBackoffSeconds: pointer.Int64(1), - PodMaxBackoffSeconds: pointer.Int64(10), + PercentageOfNodesToScore: ptr.To[int32](config.DefaultPercentageOfNodesToScore), + PodInitialBackoffSeconds: ptr.To[int64](1), + PodMaxBackoffSeconds: ptr.To[int64](10), Profiles: []configv1.KubeSchedulerProfile{ { Plugins: getDefaultPlugins(), PluginConfig: pluginConfigs, - SchedulerName: pointer.String("default-scheduler"), + SchedulerName: ptr.To("default-scheduler"), }, }, }, @@ -405,16 +405,16 @@ func TestSchedulerDefaults(t *testing.T) { { name: "set non default parallelism", config: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(8), + Parallelism: ptr.To[int32](8), }, expected: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(8), + Parallelism: ptr.To[int32](8), DebuggingConfiguration: componentbaseconfig.DebuggingConfiguration{ EnableProfiling: &enable, EnableContentionProfiling: &enable, }, LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ - LeaderElect: pointer.Bool(true), + LeaderElect: ptr.To(true), LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, @@ -427,14 +427,14 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), - PodInitialBackoffSeconds: pointer.Int64(1), - PodMaxBackoffSeconds: pointer.Int64(10), + PercentageOfNodesToScore: ptr.To[int32](config.DefaultPercentageOfNodesToScore), + PodInitialBackoffSeconds: ptr.To[int64](1), + PodMaxBackoffSeconds: ptr.To[int64](10), Profiles: []configv1.KubeSchedulerProfile{ { Plugins: getDefaultPlugins(), PluginConfig: pluginConfigs, - SchedulerName: pointer.String("default-scheduler"), + SchedulerName: ptr.To("default-scheduler"), }, }, }, @@ -445,14 +445,14 @@ func TestSchedulerDefaults(t *testing.T) { DelayCacheUntilActive: true, }, expected: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(16), + Parallelism: ptr.To[int32](16), DelayCacheUntilActive: true, DebuggingConfiguration: componentbaseconfig.DebuggingConfiguration{ EnableProfiling: &enable, EnableContentionProfiling: &enable, }, LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ - LeaderElect: pointer.Bool(true), + LeaderElect: ptr.To(true), LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, @@ -465,14 +465,14 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), - PodInitialBackoffSeconds: pointer.Int64(1), - PodMaxBackoffSeconds: pointer.Int64(10), + PercentageOfNodesToScore: ptr.To[int32](config.DefaultPercentageOfNodesToScore), + PodInitialBackoffSeconds: ptr.To[int64](1), + PodMaxBackoffSeconds: ptr.To[int64](10), Profiles: []configv1.KubeSchedulerProfile{ { Plugins: getDefaultPlugins(), PluginConfig: pluginConfigs, - SchedulerName: pointer.String("default-scheduler"), + SchedulerName: ptr.To("default-scheduler"), }, }, }, @@ -480,16 +480,16 @@ func TestSchedulerDefaults(t *testing.T) { { name: "set non default global percentageOfNodesToScore", config: &configv1.KubeSchedulerConfiguration{ - PercentageOfNodesToScore: pointer.Int32(50), + PercentageOfNodesToScore: ptr.To[int32](50), }, expected: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(16), + Parallelism: ptr.To[int32](16), DebuggingConfiguration: componentbaseconfig.DebuggingConfiguration{ EnableProfiling: &enable, EnableContentionProfiling: &enable, }, LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ - LeaderElect: pointer.Bool(true), + LeaderElect: ptr.To(true), LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, @@ -502,14 +502,14 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(50), - PodInitialBackoffSeconds: pointer.Int64(1), - PodMaxBackoffSeconds: pointer.Int64(10), + PercentageOfNodesToScore: ptr.To[int32](50), + PodInitialBackoffSeconds: ptr.To[int64](1), + PodMaxBackoffSeconds: ptr.To[int64](10), Profiles: []configv1.KubeSchedulerProfile{ { Plugins: getDefaultPlugins(), PluginConfig: pluginConfigs, - SchedulerName: pointer.String("default-scheduler"), + SchedulerName: ptr.To("default-scheduler"), }, }, }, @@ -519,18 +519,18 @@ func TestSchedulerDefaults(t *testing.T) { config: &configv1.KubeSchedulerConfiguration{ Profiles: []configv1.KubeSchedulerProfile{ { - PercentageOfNodesToScore: pointer.Int32(50), + PercentageOfNodesToScore: ptr.To[int32](50), }, }, }, expected: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(16), + Parallelism: ptr.To[int32](16), DebuggingConfiguration: componentbaseconfig.DebuggingConfiguration{ EnableProfiling: &enable, EnableContentionProfiling: &enable, }, LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ - LeaderElect: pointer.Bool(true), + LeaderElect: ptr.To(true), LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, @@ -543,15 +543,15 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), - PodInitialBackoffSeconds: pointer.Int64(1), - PodMaxBackoffSeconds: pointer.Int64(10), + PercentageOfNodesToScore: ptr.To[int32](config.DefaultPercentageOfNodesToScore), + PodInitialBackoffSeconds: ptr.To[int64](1), + PodMaxBackoffSeconds: ptr.To[int64](10), Profiles: []configv1.KubeSchedulerProfile{ { Plugins: getDefaultPlugins(), PluginConfig: pluginConfigs, - SchedulerName: pointer.String("default-scheduler"), - PercentageOfNodesToScore: pointer.Int32(50), + SchedulerName: ptr.To("default-scheduler"), + PercentageOfNodesToScore: ptr.To[int32](50), }, }, }, @@ -559,21 +559,21 @@ func TestSchedulerDefaults(t *testing.T) { { name: "set non default global and profile percentageOfNodesToScore", config: &configv1.KubeSchedulerConfiguration{ - PercentageOfNodesToScore: pointer.Int32(10), + PercentageOfNodesToScore: ptr.To[int32](10), Profiles: []configv1.KubeSchedulerProfile{ { - PercentageOfNodesToScore: pointer.Int32(50), + PercentageOfNodesToScore: ptr.To[int32](50), }, }, }, expected: &configv1.KubeSchedulerConfiguration{ - Parallelism: pointer.Int32(16), + Parallelism: ptr.To[int32](16), DebuggingConfiguration: componentbaseconfig.DebuggingConfiguration{ EnableProfiling: &enable, EnableContentionProfiling: &enable, }, LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ - LeaderElect: pointer.Bool(true), + LeaderElect: ptr.To(true), LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, @@ -586,15 +586,15 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(10), - PodInitialBackoffSeconds: pointer.Int64(1), - PodMaxBackoffSeconds: pointer.Int64(10), + PercentageOfNodesToScore: ptr.To[int32](10), + PodInitialBackoffSeconds: ptr.To[int64](1), + PodMaxBackoffSeconds: ptr.To[int64](10), Profiles: []configv1.KubeSchedulerProfile{ { Plugins: getDefaultPlugins(), PluginConfig: pluginConfigs, - SchedulerName: pointer.String("default-scheduler"), - PercentageOfNodesToScore: pointer.Int32(50), + SchedulerName: ptr.To("default-scheduler"), + PercentageOfNodesToScore: ptr.To[int32](50), }, }, }, @@ -621,43 +621,43 @@ func TestPluginArgsDefaults(t *testing.T) { name: "DefaultPreemptionArgs empty", in: &configv1.DefaultPreemptionArgs{}, want: &configv1.DefaultPreemptionArgs{ - MinCandidateNodesPercentage: pointer.Int32(10), - MinCandidateNodesAbsolute: pointer.Int32(100), + MinCandidateNodesPercentage: ptr.To[int32](10), + MinCandidateNodesAbsolute: ptr.To[int32](100), }, }, { name: "DefaultPreemptionArgs with value", in: &configv1.DefaultPreemptionArgs{ - MinCandidateNodesPercentage: pointer.Int32(50), + MinCandidateNodesPercentage: ptr.To[int32](50), }, want: &configv1.DefaultPreemptionArgs{ - MinCandidateNodesPercentage: pointer.Int32(50), - MinCandidateNodesAbsolute: pointer.Int32(100), + MinCandidateNodesPercentage: ptr.To[int32](50), + MinCandidateNodesAbsolute: ptr.To[int32](100), }, }, { name: "InterPodAffinityArgs empty", in: &configv1.InterPodAffinityArgs{}, want: &configv1.InterPodAffinityArgs{ - HardPodAffinityWeight: pointer.Int32(1), + HardPodAffinityWeight: ptr.To[int32](1), }, }, { name: "InterPodAffinityArgs explicit 0", in: &configv1.InterPodAffinityArgs{ - HardPodAffinityWeight: pointer.Int32(0), + HardPodAffinityWeight: ptr.To[int32](0), }, want: &configv1.InterPodAffinityArgs{ - HardPodAffinityWeight: pointer.Int32(0), + HardPodAffinityWeight: ptr.To[int32](0), }, }, { name: "InterPodAffinityArgs with value", in: &configv1.InterPodAffinityArgs{ - HardPodAffinityWeight: pointer.Int32(5), + HardPodAffinityWeight: ptr.To[int32](5), }, want: &configv1.InterPodAffinityArgs{ - HardPodAffinityWeight: pointer.Int32(5), + HardPodAffinityWeight: ptr.To[int32](5), }, }, { @@ -774,7 +774,7 @@ func TestPluginArgsDefaults(t *testing.T) { }, in: &configv1.VolumeBindingArgs{}, want: &configv1.VolumeBindingArgs{ - BindTimeoutSeconds: pointer.Int64(600), + BindTimeoutSeconds: ptr.To[int64](600), }, }, { @@ -784,7 +784,7 @@ func TestPluginArgsDefaults(t *testing.T) { }, in: &configv1.VolumeBindingArgs{}, want: &configv1.VolumeBindingArgs{ - BindTimeoutSeconds: pointer.Int64(600), + BindTimeoutSeconds: ptr.To[int64](600), Shape: []configv1.UtilizationShapePoint{ {Utilization: 0, Score: 0}, {Utilization: 100, Score: 10}, diff --git a/pkg/scheduler/apis/config/validation/validation_test.go b/pkg/scheduler/apis/config/validation/validation_test.go index d2f53a668eb..f25039b369e 100644 --- a/pkg/scheduler/apis/config/validation/validation_test.go +++ b/pkg/scheduler/apis/config/validation/validation_test.go @@ -26,7 +26,7 @@ import ( componentbaseconfig "k8s.io/component-base/config" "k8s.io/kubernetes/pkg/scheduler/apis/config" configv1 "k8s.io/kubernetes/pkg/scheduler/apis/config/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func TestValidateKubeSchedulerConfigurationV1(t *testing.T) { @@ -56,7 +56,7 @@ func TestValidateKubeSchedulerConfigurationV1(t *testing.T) { PodMaxBackoffSeconds: podMaxBackoffSeconds, Profiles: []config.KubeSchedulerProfile{{ SchedulerName: "me", - PercentageOfNodesToScore: pointer.Int32(35), + PercentageOfNodesToScore: ptr.To[int32](35), Plugins: &config.Plugins{ QueueSort: config.PluginSet{ Enabled: []config.Plugin{{Name: "CustomSort"}}, @@ -71,7 +71,7 @@ func TestValidateKubeSchedulerConfigurationV1(t *testing.T) { }}, }, { SchedulerName: "other", - PercentageOfNodesToScore: pointer.Int32(35), + PercentageOfNodesToScore: ptr.To[int32](35), Plugins: &config.Plugins{ QueueSort: config.PluginSet{ Enabled: []config.Plugin{{Name: "CustomSort"}}, @@ -110,10 +110,10 @@ func TestValidateKubeSchedulerConfigurationV1(t *testing.T) { healthzBindAddrInvalid.HealthzBindAddress = "0.0.0.0:9090" percentageOfNodesToScore101 := validConfig.DeepCopy() - percentageOfNodesToScore101.PercentageOfNodesToScore = pointer.Int32(101) + percentageOfNodesToScore101.PercentageOfNodesToScore = ptr.To[int32](101) percentageOfNodesToScoreNegative := validConfig.DeepCopy() - percentageOfNodesToScoreNegative.PercentageOfNodesToScore = pointer.Int32(-1) + percentageOfNodesToScoreNegative.PercentageOfNodesToScore = ptr.To[int32](-1) schedulerNameNotSet := validConfig.DeepCopy() schedulerNameNotSet.Profiles[1].SchedulerName = "" @@ -122,10 +122,10 @@ func TestValidateKubeSchedulerConfigurationV1(t *testing.T) { repeatedSchedulerName.Profiles[0].SchedulerName = "other" profilePercentageOfNodesToScore101 := validConfig.DeepCopy() - profilePercentageOfNodesToScore101.Profiles[1].PercentageOfNodesToScore = pointer.Int32(101) + profilePercentageOfNodesToScore101.Profiles[1].PercentageOfNodesToScore = ptr.To[int32](101) profilePercentageOfNodesToScoreNegative := validConfig.DeepCopy() - profilePercentageOfNodesToScoreNegative.Profiles[1].PercentageOfNodesToScore = pointer.Int32(-1) + profilePercentageOfNodesToScoreNegative.Profiles[1].PercentageOfNodesToScore = ptr.To[int32](-1) differentQueueSort := validConfig.DeepCopy() differentQueueSort.Profiles[1].Plugins.QueueSort.Enabled[0].Name = "AnotherSort" diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go b/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go index ae2ef473b2c..e74d781a321 100644 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go @@ -37,7 +37,7 @@ import ( tf "k8s.io/kubernetes/pkg/scheduler/testing/framework" volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/test/utils/ktesting" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) const ( @@ -780,7 +780,7 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int } if addLimits { driver.Allocatable = &storagev1.VolumeNodeResources{ - Count: pointer.Int32(int32(limit)), + Count: ptr.To(int32(limit)), } } csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, driver) diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go b/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go index a073f9a7e72..776849d4783 100644 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go @@ -32,7 +32,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" st "k8s.io/kubernetes/pkg/scheduler/testing" tf "k8s.io/kubernetes/pkg/scheduler/testing/framework" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var ( @@ -1022,14 +1022,14 @@ func getFakePVCLister(filterName string) tf.PersistentVolumeClaimLister { ObjectMeta: metav1.ObjectMeta{Name: "unboundPVCWithDefaultSCPod"}, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "", - StorageClassName: pointer.String("standard-sc"), + StorageClassName: ptr.To("standard-sc"), }, }, { ObjectMeta: metav1.ObjectMeta{Name: "unboundPVCWithInvalidSCPod"}, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "", - StorageClassName: pointer.String("invalid-sc"), + StorageClassName: ptr.To("invalid-sc"), }, }, } diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go b/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go index b822b400b03..4643605182c 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go @@ -36,7 +36,7 @@ import ( frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime" "k8s.io/kubernetes/pkg/scheduler/internal/cache" st "k8s.io/kubernetes/pkg/scheduler/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var cmpOpts = []cmp.Option{ @@ -2731,7 +2731,7 @@ func TestSingleConstraint(t *testing.T) { "node", v1.DoNotSchedule, fooSelector, - pointer.Int32(4), // larger than the number of domains(3) + ptr.To[int32](4), // larger than the number of domains(3) nil, nil, nil, @@ -2762,7 +2762,7 @@ func TestSingleConstraint(t *testing.T) { "node", v1.DoNotSchedule, fooSelector, - pointer.Int32(2), // smaller than the number of domains(3) + ptr.To[int32](2), // smaller than the number of domains(3) nil, nil, nil, @@ -2793,7 +2793,7 @@ func TestSingleConstraint(t *testing.T) { "zone", v1.DoNotSchedule, fooSelector, - pointer.Int32(3), // larger than the number of domains(2) + ptr.To[int32](3), // larger than the number of domains(2) nil, nil, nil, @@ -2824,7 +2824,7 @@ func TestSingleConstraint(t *testing.T) { "zone", v1.DoNotSchedule, fooSelector, - pointer.Int32(1), // smaller than the number of domains(2) + ptr.To[int32](1), // smaller than the number of domains(2) nil, nil, nil, diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go b/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go index af3c906e51b..fe24df8b45c 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go @@ -37,7 +37,7 @@ import ( frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime" "k8s.io/kubernetes/pkg/scheduler/internal/cache" st "k8s.io/kubernetes/pkg/scheduler/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var podTopologySpreadFunc = frameworkruntime.FactoryAdapter(feature.Features{}, New) @@ -155,8 +155,8 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: map[topologyPair]*int64{ - {key: "zone", value: "zone1"}: pointer.Int64(0), - {key: "zone", value: "zone2"}: pointer.Int64(0), + {key: "zone", value: "zone1"}: ptr.To[int64](0), + {key: "zone", value: "zone2"}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2), topologyNormalizingWeight(3)}, }, @@ -187,8 +187,8 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: map[topologyPair]*int64{ - {key: "zone", value: "zone1"}: pointer.Int64(0), - {key: "zone", value: "zone2"}: pointer.Int64(0), + {key: "zone", value: "zone1"}: ptr.To[int64](0), + {key: "zone", value: "zone2"}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)}, }, @@ -228,7 +228,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New("node-x"), TopologyPairToPodCounts: map[topologyPair]*int64{ - {key: "zone", value: "zone1"}: pointer.Int64(0), + {key: "zone", value: "zone1"}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(1), topologyNormalizingWeight(2)}, }, @@ -270,8 +270,8 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: map[topologyPair]*int64{ - {key: v1.LabelTopologyZone, value: "mars"}: pointer.Int64(0), - {key: v1.LabelTopologyZone, value: ""}: pointer.Int64(0), + {key: v1.LabelTopologyZone, value: "mars"}: ptr.To[int64](0), + {key: v1.LabelTopologyZone, value: ""}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(4), topologyNormalizingWeight(2)}, }, @@ -321,7 +321,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: map[topologyPair]*int64{ - {key: "planet", value: "mars"}: pointer.Int64(0), + {key: "planet", value: "mars"}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(1), topologyNormalizingWeight(1)}, }, @@ -362,7 +362,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: map[topologyPair]*int64{ - {"planet", "mars"}: pointer.Int64(0), + {"planet", "mars"}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(1)}, }, @@ -394,8 +394,8 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: map[topologyPair]*int64{ - {key: "zone", value: "zone1"}: pointer.Int64(0), - {key: "zone", value: "zone2"}: pointer.Int64(0), + {key: "zone", value: "zone1"}: ptr.To[int64](0), + {key: "zone", value: "zone2"}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)}, }, @@ -428,8 +428,8 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: map[topologyPair]*int64{ - {key: "zone", value: "zone1"}: pointer.Int64(0), - {key: "zone", value: "zone2"}: pointer.Int64(0), + {key: "zone", value: "zone1"}: ptr.To[int64](0), + {key: "zone", value: "zone2"}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)}, }, @@ -462,8 +462,8 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: map[topologyPair]*int64{ - {key: "zone", value: "zone1"}: pointer.Int64(0), - {key: "zone", value: "zone2"}: pointer.Int64(0), + {key: "zone", value: "zone1"}: ptr.To[int64](0), + {key: "zone", value: "zone2"}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)}, }, @@ -496,8 +496,8 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: map[topologyPair]*int64{ - {key: "zone", value: "zone1"}: pointer.Int64(0), - {key: "zone", value: "zone2"}: pointer.Int64(0), + {key: "zone", value: "zone1"}: ptr.To[int64](0), + {key: "zone", value: "zone2"}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)}, }, @@ -529,8 +529,8 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: map[topologyPair]*int64{ - {key: "zone", value: "zone1"}: pointer.Int64(0), - {key: "zone", value: "zone2"}: pointer.Int64(0), + {key: "zone", value: "zone1"}: ptr.To[int64](0), + {key: "zone", value: "zone2"}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)}, }, @@ -562,8 +562,8 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, IgnoredNodes: sets.New[string](), TopologyPairToPodCounts: map[topologyPair]*int64{ - {key: "zone", value: "zone1"}: pointer.Int64(0), - {key: "zone", value: "zone2"}: pointer.Int64(0), + {key: "zone", value: "zone1"}: ptr.To[int64](0), + {key: "zone", value: "zone2"}: ptr.To[int64](0), }, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)}, }, @@ -1089,7 +1089,7 @@ func TestPodTopologySpreadScore(t *testing.T) { "node", v1.ScheduleAnyway, fooSelector, - pointer.Int32(10), // larger than the number of domains(3) + ptr.To[int32](10), // larger than the number of domains(3) nil, nil, nil, diff --git a/pkg/scheduler/framework/plugins/volumebinding/test_utils.go b/pkg/scheduler/framework/plugins/volumebinding/test_utils.go index 4ec84825b7e..a1c968d76fc 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/test_utils.go +++ b/pkg/scheduler/framework/plugins/volumebinding/test_utils.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/component-helpers/storage/volume" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) type nodeBuilder struct { @@ -115,7 +115,7 @@ func makePVC(name string, storageClassName string) pvcBuilder { Namespace: v1.NamespaceDefault, }, Spec: v1.PersistentVolumeClaimSpec{ - StorageClassName: pointer.String(storageClassName), + StorageClassName: ptr.To(storageClassName), }, }} } diff --git a/pkg/scheduler/framework/runtime/framework_test.go b/pkg/scheduler/framework/runtime/framework_test.go index 115411dc007..a99f92f3f38 100644 --- a/pkg/scheduler/framework/runtime/framework_test.go +++ b/pkg/scheduler/framework/runtime/framework_test.go @@ -37,7 +37,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/framework" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" "k8s.io/kubernetes/pkg/scheduler/metrics" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) const ( @@ -2838,7 +2838,7 @@ func TestRecordingMetrics(t *testing.T) { recorder := metrics.NewMetricsAsyncRecorder(100, time.Nanosecond, ctx.Done()) profile := config.KubeSchedulerProfile{ - PercentageOfNodesToScore: pointer.Int32(testPercentageOfNodesToScore), + PercentageOfNodesToScore: ptr.To[int32](testPercentageOfNodesToScore), SchedulerName: testProfileName, Plugins: plugins, } @@ -2952,7 +2952,7 @@ func TestRunBindPlugins(t *testing.T) { recorder := metrics.NewMetricsAsyncRecorder(100, time.Nanosecond, ctx.Done()) profile := config.KubeSchedulerProfile{ SchedulerName: testProfileName, - PercentageOfNodesToScore: pointer.Int32(testPercentageOfNodesToScore), + PercentageOfNodesToScore: ptr.To[int32](testPercentageOfNodesToScore), Plugins: plugins, } fwk, err := newFrameworkWithQueueSortAndBind(ctx, r, profile, withMetricsRecorder(recorder)) diff --git a/pkg/scheduler/metrics/resources/resources_test.go b/pkg/scheduler/metrics/resources/resources_test.go index 34f82837a1a..a1d2c1f3c44 100644 --- a/pkg/scheduler/metrics/resources/resources_test.go +++ b/pkg/scheduler/metrics/resources/resources_test.go @@ -32,6 +32,7 @@ import ( corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/testutil" + "k8s.io/utils/ptr" ) type fakePodLister struct { @@ -108,10 +109,6 @@ kube_pod_resource_request{namespace="test",node="node-one",pod="foo",priority="" } func Test_podResourceCollector_CollectWithStability(t *testing.T) { - int32p := func(i int32) *int32 { - return &i - } - tests := []struct { name string @@ -291,7 +288,7 @@ func Test_podResourceCollector_CollectWithStability(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "foo"}, Spec: v1.PodSpec{ SchedulerName: "default-scheduler", - Priority: int32p(0), + Priority: ptr.To[int32](0), NodeName: "node-one", Containers: []v1.Container{ {Resources: v1.ResourceRequirements{Requests: v1.ResourceList{"cpu": resource.MustParse("1")}}}, diff --git a/pkg/scheduler/schedule_one_test.go b/pkg/scheduler/schedule_one_test.go index d7be9094c44..bbf68c12d42 100644 --- a/pkg/scheduler/schedule_one_test.go +++ b/pkg/scheduler/schedule_one_test.go @@ -66,7 +66,7 @@ import ( st "k8s.io/kubernetes/pkg/scheduler/testing" tf "k8s.io/kubernetes/pkg/scheduler/testing/framework" schedutil "k8s.io/kubernetes/pkg/scheduler/util" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) const ( @@ -2179,7 +2179,7 @@ func TestSchedulerSchedulePod(t *testing.T) { nodes: []string{"node1", "node2", "node3"}, pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), wantNodes: sets.New("node2"), - wantEvaluatedNodes: pointer.Int32(1), + wantEvaluatedNodes: ptr.To[int32](1), }, { name: "test prefilter plugin returning non-intersecting nodes", @@ -2277,7 +2277,7 @@ func TestSchedulerSchedulePod(t *testing.T) { nodes: []string{"node1", "node2", "node3"}, pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(), wantNodes: sets.New("node2", "node3"), - wantEvaluatedNodes: pointer.Int32(3), + wantEvaluatedNodes: ptr.To[int32](3), }, { name: "test all prescore plugins return skip", @@ -2923,7 +2923,7 @@ func TestNumFeasibleNodesToFind(t *testing.T) { }, { name: "set profile percentageOfNodesToScore and nodes number not more than 50", - profilePercentage: pointer.Int32(40), + profilePercentage: ptr.To[int32](40), numAllNodes: 10, wantNumNodes: 10, }, @@ -2934,14 +2934,14 @@ func TestNumFeasibleNodesToFind(t *testing.T) { }, { name: "set profile percentageOfNodesToScore and nodes number more than 50", - profilePercentage: pointer.Int32(40), + profilePercentage: ptr.To[int32](40), numAllNodes: 1000, wantNumNodes: 400, }, { name: "set global and profile percentageOfNodesToScore and nodes number more than 50", globalPercentage: 100, - profilePercentage: pointer.Int32(40), + profilePercentage: ptr.To[int32](40), numAllNodes: 1000, wantNumNodes: 400, }, @@ -2958,7 +2958,7 @@ func TestNumFeasibleNodesToFind(t *testing.T) { }, { name: "set profile percentageOfNodesToScore and nodes number more than 50*125", - profilePercentage: pointer.Int32(40), + profilePercentage: ptr.To[int32](40), numAllNodes: 6000, wantNumNodes: 2400, }, diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index d05a3a58b1d..bc85e70eaeb 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -54,7 +54,7 @@ import ( st "k8s.io/kubernetes/pkg/scheduler/testing" tf "k8s.io/kubernetes/pkg/scheduler/testing/framework" testingclock "k8s.io/utils/clock/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func TestSchedulerCreation(t *testing.T) { @@ -438,7 +438,7 @@ func TestWithPercentageOfNodesToScore(t *testing.T) { }, { name: "percentageOfNodesScore is not nil", - percentageOfNodesToScoreConfig: pointer.Int32(10), + percentageOfNodesToScoreConfig: ptr.To[int32](10), wantedPercentageOfNodesToScore: 10, }, } diff --git a/pkg/scheduler/testing/wrappers.go b/pkg/scheduler/testing/wrappers.go index 8d94118df10..336e99e7e6d 100644 --- a/pkg/scheduler/testing/wrappers.go +++ b/pkg/scheduler/testing/wrappers.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" imageutils "k8s.io/kubernetes/test/utils/image" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var zero int64 @@ -255,7 +255,7 @@ func (p *PodWrapper) OwnerReference(name string, gvk schema.GroupVersionKind) *P APIVersion: gvk.GroupVersion().String(), Kind: gvk.Kind, Name: name, - Controller: pointer.Bool(true), + Controller: ptr.To(true), }, } return p @@ -889,7 +889,7 @@ func (wrapper *ResourceClaimWrapper) OwnerReference(name, uid string, gvk schema Kind: gvk.Kind, Name: name, UID: types.UID(uid), - Controller: pointer.Bool(true), + Controller: ptr.To(true), }, } return wrapper @@ -971,8 +971,8 @@ func (wrapper *PodSchedulingWrapper) OwnerReference(name, uid string, gvk schema Kind: gvk.Kind, Name: name, UID: types.UID(uid), - Controller: pointer.Bool(true), - BlockOwnerDeletion: pointer.Bool(true), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), }, } return wrapper