From 7297f48f12f96cea4e2ca76a8be35baff6056666 Mon Sep 17 00:00:00 2001 From: Yuan Chen Date: Fri, 16 Sep 2022 13:18:12 -0700 Subject: [PATCH] Add profile level percentageOfNodesToScore Fix conversion errors Changed the order update update fix manaul coversions keep the global parameter for backward compatibility Address Wei's comments Fix an error Fix issues Add unit tests for validation Fix a comment Address comments Update comments fix verifiation errors Add tests for scheme_test.go Convert percentageOfNodesToScore to pointer Fix errors Resolve conflicts Fix testing errors Address Wei's comments Revert IntPtr to Int changes Address comments Not overrite percentageOfNodesToScore Fix a bug Fix a bug change errs to err Fix a nit Remove duplication Address comments Fix lint warning Fix an issue Update comments Clean up Address comments Revert changes to defaults fix unit test error Update Fix tests Use default PluginConfigs --- .../app/options/options_test.go | 3 +- pkg/generated/openapi/zz_generated.openapi.go | 9 +- .../apis/config/scheme/scheme_test.go | 80 ++++++++++- pkg/scheduler/apis/config/types.go | 15 +- pkg/scheduler/apis/config/v1/defaults.go | 3 +- pkg/scheduler/apis/config/v1/defaults_test.go | 133 +++++++++++++++++- .../apis/config/v1/zz_generated.conversion.go | 10 +- .../apis/config/v1beta2/conversion.go | 6 + .../apis/config/v1beta2/defaults_test.go | 9 +- .../config/v1beta2/zz_generated.conversion.go | 24 ++-- .../apis/config/v1beta3/conversion.go | 6 + .../apis/config/v1beta3/defaults_test.go | 11 +- .../config/v1beta3/zz_generated.conversion.go | 24 ++-- .../apis/config/validation/validation.go | 17 ++- .../apis/config/validation/validation_test.go | 92 ++++++++++-- .../apis/config/zz_generated.deepcopy.go | 10 ++ pkg/scheduler/framework/interface.go | 5 +- pkg/scheduler/framework/runtime/framework.go | 18 ++- .../framework/runtime/framework_test.go | 16 ++- pkg/scheduler/schedule_one.go | 26 ++-- pkg/scheduler/schedule_one_test.go | 55 +++++--- pkg/scheduler/scheduler.go | 14 +- pkg/scheduler/scheduler_test.go | 45 ++++++ .../k8s.io/kube-scheduler/config/v1/types.go | 13 +- .../config/v1/zz_generated.deepcopy.go | 5 + 25 files changed, 523 insertions(+), 126 deletions(-) diff --git a/cmd/kube-scheduler/app/options/options_test.go b/cmd/kube-scheduler/app/options/options_test.go index c6af802e5fe..772f017293f 100644 --- a/cmd/kube-scheduler/app/options/options_test.go +++ b/cmd/kube-scheduler/app/options/options_test.go @@ -42,6 +42,7 @@ import ( configtesting "k8s.io/kubernetes/pkg/scheduler/apis/config/testing" "k8s.io/kubernetes/pkg/scheduler/apis/config/testing/defaults" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" + "k8s.io/utils/pointer" ) func TestSchedulerOptions(t *testing.T) { @@ -395,7 +396,7 @@ profiles: defaultPodInitialBackoffSeconds := int64(1) defaultPodMaxBackoffSeconds := int64(10) - defaultPercentageOfNodesToScore := int32(0) + defaultPercentageOfNodesToScore := pointer.Int32(0) testcases := []struct { name string diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index 3de8cacef21..7adbf279f0c 100644 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -51793,7 +51793,7 @@ func schema_k8sio_kube_scheduler_config_v1_KubeSchedulerConfiguration(ref common }, "percentageOfNodesToScore": { SchemaProps: spec.SchemaProps{ - Description: "PercentageOfNodesToScore is the percentage of all nodes that once found feasible for running a pod, the scheduler stops its search for more feasible nodes in the cluster. This helps improve scheduler's performance. Scheduler always tries to find at least \"minFeasibleNodesToFind\" feasible nodes no matter what the value of this flag is. Example: if the cluster size is 500 nodes and the value of this flag is 30, then scheduler stops finding further feasible nodes once it finds 150 feasible ones. When the value is 0, default percentage (5%--50% based on the size of the cluster) of the nodes will be scored.", + Description: "PercentageOfNodesToScore is the percentage of all nodes that once found feasible for running a pod, the scheduler stops its search for more feasible nodes in the cluster. This helps improve scheduler's performance. Scheduler always tries to find at least \"minFeasibleNodesToFind\" feasible nodes no matter what the value of this flag is. Example: if the cluster size is 500 nodes and the value of this flag is 30, then scheduler stops finding further feasible nodes once it finds 150 feasible ones. When the value is 0, default percentage (5%--50% based on the size of the cluster) of the nodes will be scored. It is overridden by profile level PercentageofNodesToScore.", Type: []string{"integer"}, Format: "int32", }, @@ -51876,6 +51876,13 @@ func schema_k8sio_kube_scheduler_config_v1_KubeSchedulerProfile(ref common.Refer Format: "", }, }, + "percentageOfNodesToScore": { + SchemaProps: spec.SchemaProps{ + Description: "PercentageOfNodesToScore is the percentage of all nodes that once found feasible for running a pod, the scheduler stops its search for more feasible nodes in the cluster. This helps improve scheduler's performance. Scheduler always tries to find at least \"minFeasibleNodesToFind\" feasible nodes no matter what the value of this flag is. Example: if the cluster size is 500 nodes and the value of this flag is 30, then scheduler stops finding further feasible nodes once it finds 150 feasible ones. When the value is 0, default percentage (5%--50% based on the size of the cluster) of the nodes will be scored. It will override global PercentageOfNodesToScore. If it is empty, global PercentageOfNodesToScore will be used.", + Type: []string{"integer"}, + Format: "int32", + }, + }, "plugins": { SchemaProps: spec.SchemaProps{ Description: "Plugins specify the set of plugins that should be enabled or disabled. Enabled plugins are the ones that should be enabled in addition to the default plugins. Disabled plugins are any of the default plugins that should be disabled. When no enabled or disabled plugin is specified for an extension point, default plugins for that extension point will be used if there is any. If a QueueSort plugin is specified, the same QueueSort Plugin and PluginConfig must be specified for all profiles.", diff --git a/pkg/scheduler/apis/config/scheme/scheme_test.go b/pkg/scheduler/apis/config/scheme/scheme_test.go index 3d934c3a160..84e51bf53a0 100644 --- a/pkg/scheduler/apis/config/scheme/scheme_test.go +++ b/pkg/scheduler/apis/config/scheme/scheme_test.go @@ -48,6 +48,7 @@ func TestCodecsDecodePluginConfig(t *testing.T) { data: []byte(` apiVersion: kubescheduler.config.k8s.io/v1beta2 kind: KubeSchedulerConfiguration +percentageOfNodesToScore: 0 profiles: - pluginConfig: - name: DefaultPreemption @@ -94,8 +95,9 @@ profiles: `), wantProfiles: []config.KubeSchedulerProfile{ { - SchedulerName: "default-scheduler", - Plugins: defaults.PluginsV1beta2, + SchedulerName: "default-scheduler", + PercentageOfNodesToScore: nil, + Plugins: defaults.PluginsV1beta2, PluginConfig: []config.PluginConfig{ { Name: "DefaultPreemption", @@ -169,6 +171,22 @@ profiles: }, }, }, + { + name: "v1beta2 with non-default global percentageOfNodesToScore", + data: []byte(` +apiVersion: kubescheduler.config.k8s.io/v1beta2 +kind: KubeSchedulerConfiguration +percentageOfNodesToScore: 10 +`), + wantProfiles: []config.KubeSchedulerProfile{ + { + SchedulerName: "default-scheduler", + PercentageOfNodesToScore: nil, + Plugins: defaults.PluginsV1beta2, + PluginConfig: defaults.PluginConfigsV1beta2, + }, + }, + }, { name: "v1beta2 plugins can include version and kind", data: []byte(` @@ -510,6 +528,22 @@ profiles: }, }, }, + { + name: "v1beta3 with non-default global percentageOfNodesToScore", + data: []byte(` +apiVersion: kubescheduler.config.k8s.io/v1beta3 +kind: KubeSchedulerConfiguration +percentageOfNodesToScore: 10 +`), + wantProfiles: []config.KubeSchedulerProfile{ + { + SchedulerName: "default-scheduler", + PercentageOfNodesToScore: nil, + Plugins: defaults.PluginsV1beta3, + PluginConfig: defaults.PluginConfigsV1beta3, + }, + }, + }, { name: "v1beta3 plugins can include version and kind", data: []byte(` @@ -776,8 +810,9 @@ profiles: `), wantProfiles: []config.KubeSchedulerProfile{ { - SchedulerName: "default-scheduler", - Plugins: defaults.PluginsV1, + SchedulerName: "default-scheduler", + PercentageOfNodesToScore: nil, + Plugins: defaults.PluginsV1, PluginConfig: []config.PluginConfig{ { Name: "DefaultPreemption", @@ -851,6 +886,40 @@ profiles: }, }, }, + { + name: "v1 with non-default global percentageOfNodesToScore", + data: []byte(` +apiVersion: kubescheduler.config.k8s.io/v1 +kind: KubeSchedulerConfiguration +percentageOfNodesToScore: 10 +`), + wantProfiles: []config.KubeSchedulerProfile{ + { + SchedulerName: "default-scheduler", + PercentageOfNodesToScore: nil, + Plugins: defaults.PluginsV1, + PluginConfig: defaults.PluginConfigsV1, + }, + }, + }, + { + name: "v1 with non-default global and profile percentageOfNodesToScore", + data: []byte(` +apiVersion: kubescheduler.config.k8s.io/v1 +kind: KubeSchedulerConfiguration +percentageOfNodesToScore: 10 +profiles: +- percentageOfNodesToScore: 20 +`), + wantProfiles: []config.KubeSchedulerProfile{ + { + SchedulerName: "default-scheduler", + PercentageOfNodesToScore: pointer.Int32(20), + Plugins: defaults.PluginsV1, + PluginConfig: defaults.PluginConfigsV1, + }, + }, + }, { name: "v1 plugins can include version and kind", data: []byte(` @@ -1284,7 +1353,6 @@ leaderElection: retryPeriod: 0s metricsBindAddress: "" parallelism: 8 -percentageOfNodesToScore: 0 podInitialBackoffSeconds: 0 podMaxBackoffSeconds: 0 profiles: @@ -1503,7 +1571,6 @@ leaderElection: resourceNamespace: "" retryPeriod: 0s parallelism: 8 -percentageOfNodesToScore: 0 podInitialBackoffSeconds: 0 podMaxBackoffSeconds: 0 profiles: @@ -1722,7 +1789,6 @@ leaderElection: resourceNamespace: "" retryPeriod: 0s parallelism: 8 -percentageOfNodesToScore: 0 podInitialBackoffSeconds: 0 podMaxBackoffSeconds: 0 profiles: diff --git a/pkg/scheduler/apis/config/types.go b/pkg/scheduler/apis/config/types.go index 75fdb1e23d2..2c5567f2189 100644 --- a/pkg/scheduler/apis/config/types.go +++ b/pkg/scheduler/apis/config/types.go @@ -74,8 +74,8 @@ type KubeSchedulerConfiguration struct { // Example: if the cluster size is 500 nodes and the value of this flag is 30, // then scheduler stops finding further feasible nodes once it finds 150 feasible ones. // When the value is 0, default percentage (5%--50% based on the size of the cluster) of the - // nodes will be scored. - PercentageOfNodesToScore int32 + // nodes will be scored. It is overridden by profile level PercentageOfNodesToScore. + PercentageOfNodesToScore *int32 // PodInitialBackoffSeconds is the initial backoff for unschedulable pods. // If specified, it must be greater than 0. If this value is null, the default value (1s) @@ -105,6 +105,17 @@ type KubeSchedulerProfile struct { // is scheduled with this profile. SchedulerName string + // PercentageOfNodesToScore is the percentage of all nodes that once found feasible + // for running a pod, the scheduler stops its search for more feasible nodes in + // the cluster. This helps improve scheduler's performance. Scheduler always tries to find + // at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is. + // Example: if the cluster size is 500 nodes and the value of this flag is 30, + // then scheduler stops finding further feasible nodes once it finds 150 feasible ones. + // When the value is 0, default percentage (5%--50% based on the size of the cluster) of the + // nodes will be scored. It will override global PercentageOfNodesToScore. If it is empty, + // global PercentageOfNodesToScore will be used. + PercentageOfNodesToScore *int32 + // Plugins specify the set of plugins that should be enabled or disabled. // Enabled plugins are the ones that should be enabled in addition to the // default plugins. Disabled plugins are any of the default plugins that diff --git a/pkg/scheduler/apis/config/v1/defaults.go b/pkg/scheduler/apis/config/v1/defaults.go index 6916123f281..0f03d21d7c8 100644 --- a/pkg/scheduler/apis/config/v1/defaults.go +++ b/pkg/scheduler/apis/config/v1/defaults.go @@ -121,8 +121,7 @@ func SetDefaults_KubeSchedulerConfiguration(obj *configv1.KubeSchedulerConfigura } if obj.PercentageOfNodesToScore == nil { - percentageOfNodesToScore := int32(config.DefaultPercentageOfNodesToScore) - obj.PercentageOfNodesToScore = &percentageOfNodesToScore + obj.PercentageOfNodesToScore = pointer.Int32(config.DefaultPercentageOfNodesToScore) } if len(obj.LeaderElection.ResourceLock) == 0 { diff --git a/pkg/scheduler/apis/config/v1/defaults_test.go b/pkg/scheduler/apis/config/v1/defaults_test.go index e8d0d8e367c..b0780475542 100644 --- a/pkg/scheduler/apis/config/v1/defaults_test.go +++ b/pkg/scheduler/apis/config/v1/defaults_test.go @@ -32,6 +32,7 @@ import ( featuregatetesting "k8s.io/component-base/featuregate/testing" configv1 "k8s.io/kube-scheduler/config/v1" "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" "k8s.io/utils/pointer" ) @@ -144,7 +145,7 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32Ptr(0), + PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), PodInitialBackoffSeconds: pointer.Int64Ptr(1), PodMaxBackoffSeconds: pointer.Int64Ptr(10), Profiles: []configv1.KubeSchedulerProfile{ @@ -181,7 +182,7 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32Ptr(0), + PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), PodInitialBackoffSeconds: pointer.Int64Ptr(1), PodMaxBackoffSeconds: pointer.Int64Ptr(10), Profiles: []configv1.KubeSchedulerProfile{ @@ -237,7 +238,7 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32Ptr(0), + PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), PodInitialBackoffSeconds: pointer.Int64Ptr(1), PodMaxBackoffSeconds: pointer.Int64Ptr(10), Profiles: []configv1.KubeSchedulerProfile{ @@ -388,7 +389,7 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32Ptr(0), + PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), PodInitialBackoffSeconds: pointer.Int64Ptr(1), PodMaxBackoffSeconds: pointer.Int64Ptr(10), Profiles: []configv1.KubeSchedulerProfile{ @@ -425,7 +426,7 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32Ptr(0), + PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), PodInitialBackoffSeconds: pointer.Int64Ptr(1), PodMaxBackoffSeconds: pointer.Int64Ptr(10), Profiles: []configv1.KubeSchedulerProfile{ @@ -437,6 +438,128 @@ func TestSchedulerDefaults(t *testing.T) { }, }, }, + { + name: "set non default global percentageOfNodesToScore", + config: &configv1.KubeSchedulerConfiguration{ + PercentageOfNodesToScore: pointer.Int32Ptr(50), + }, + expected: &configv1.KubeSchedulerConfiguration{ + Parallelism: pointer.Int32Ptr(16), + DebuggingConfiguration: componentbaseconfig.DebuggingConfiguration{ + EnableProfiling: &enable, + EnableContentionProfiling: &enable, + }, + LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ + LeaderElect: pointer.BoolPtr(true), + LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, + RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, + RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, + ResourceLock: "leases", + ResourceNamespace: "kube-system", + ResourceName: "kube-scheduler", + }, + ClientConnection: componentbaseconfig.ClientConnectionConfiguration{ + QPS: 50, + Burst: 100, + ContentType: "application/vnd.kubernetes.protobuf", + }, + PercentageOfNodesToScore: pointer.Int32Ptr(50), + PodInitialBackoffSeconds: pointer.Int64Ptr(1), + PodMaxBackoffSeconds: pointer.Int64Ptr(10), + Profiles: []configv1.KubeSchedulerProfile{ + { + Plugins: getDefaultPlugins(), + PluginConfig: pluginConfigs, + SchedulerName: pointer.StringPtr("default-scheduler"), + }, + }, + }, + }, + { + name: "set non default profile percentageOfNodesToScore", + config: &configv1.KubeSchedulerConfiguration{ + Profiles: []configv1.KubeSchedulerProfile{ + { + PercentageOfNodesToScore: pointer.Int32Ptr(50), + }, + }, + }, + expected: &configv1.KubeSchedulerConfiguration{ + Parallelism: pointer.Int32Ptr(16), + DebuggingConfiguration: componentbaseconfig.DebuggingConfiguration{ + EnableProfiling: &enable, + EnableContentionProfiling: &enable, + }, + LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ + LeaderElect: pointer.BoolPtr(true), + LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, + RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, + RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, + ResourceLock: "leases", + ResourceNamespace: "kube-system", + ResourceName: "kube-scheduler", + }, + ClientConnection: componentbaseconfig.ClientConnectionConfiguration{ + QPS: 50, + Burst: 100, + ContentType: "application/vnd.kubernetes.protobuf", + }, + PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), + PodInitialBackoffSeconds: pointer.Int64Ptr(1), + PodMaxBackoffSeconds: pointer.Int64Ptr(10), + Profiles: []configv1.KubeSchedulerProfile{ + { + Plugins: getDefaultPlugins(), + PluginConfig: pluginConfigs, + SchedulerName: pointer.StringPtr("default-scheduler"), + PercentageOfNodesToScore: pointer.Int32Ptr(50), + }, + }, + }, + }, + { + name: "set non default global and profile percentageOfNodesToScore", + config: &configv1.KubeSchedulerConfiguration{ + PercentageOfNodesToScore: pointer.Int32Ptr(10), + Profiles: []configv1.KubeSchedulerProfile{ + { + PercentageOfNodesToScore: pointer.Int32Ptr(50), + }, + }, + }, + expected: &configv1.KubeSchedulerConfiguration{ + Parallelism: pointer.Int32Ptr(16), + DebuggingConfiguration: componentbaseconfig.DebuggingConfiguration{ + EnableProfiling: &enable, + EnableContentionProfiling: &enable, + }, + LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ + LeaderElect: pointer.BoolPtr(true), + LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, + RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, + RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, + ResourceLock: "leases", + ResourceNamespace: "kube-system", + ResourceName: "kube-scheduler", + }, + ClientConnection: componentbaseconfig.ClientConnectionConfiguration{ + QPS: 50, + Burst: 100, + ContentType: "application/vnd.kubernetes.protobuf", + }, + PercentageOfNodesToScore: pointer.Int32Ptr(10), + PodInitialBackoffSeconds: pointer.Int64Ptr(1), + PodMaxBackoffSeconds: pointer.Int64Ptr(10), + Profiles: []configv1.KubeSchedulerProfile{ + { + Plugins: getDefaultPlugins(), + PluginConfig: pluginConfigs, + SchedulerName: pointer.StringPtr("default-scheduler"), + PercentageOfNodesToScore: pointer.Int32Ptr(50), + }, + }, + }, + }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { diff --git a/pkg/scheduler/apis/config/v1/zz_generated.conversion.go b/pkg/scheduler/apis/config/v1/zz_generated.conversion.go index 792131e5eec..3fca82d3a1f 100644 --- a/pkg/scheduler/apis/config/v1/zz_generated.conversion.go +++ b/pkg/scheduler/apis/config/v1/zz_generated.conversion.go @@ -408,9 +408,7 @@ func autoConvert_v1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfigurat if err := v1alpha1.Convert_v1alpha1_DebuggingConfiguration_To_config_DebuggingConfiguration(&in.DebuggingConfiguration, &out.DebuggingConfiguration, s); err != nil { return err } - if err := metav1.Convert_Pointer_int32_To_int32(&in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore, s); err != nil { - return err - } + out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore)) if err := metav1.Convert_Pointer_int64_To_int64(&in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds, s); err != nil { return err } @@ -447,9 +445,7 @@ func autoConvert_config_KubeSchedulerConfiguration_To_v1_KubeSchedulerConfigurat if err := v1alpha1.Convert_config_DebuggingConfiguration_To_v1alpha1_DebuggingConfiguration(&in.DebuggingConfiguration, &out.DebuggingConfiguration, s); err != nil { return err } - if err := metav1.Convert_int32_To_Pointer_int32(&in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore, s); err != nil { - return err - } + out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore)) if err := metav1.Convert_int64_To_Pointer_int64(&in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds, s); err != nil { return err } @@ -475,6 +471,7 @@ func autoConvert_v1_KubeSchedulerProfile_To_config_KubeSchedulerProfile(in *v1.K if err := metav1.Convert_Pointer_string_To_string(&in.SchedulerName, &out.SchedulerName, s); err != nil { return err } + out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore)) if in.Plugins != nil { in, out := &in.Plugins, &out.Plugins *out = new(config.Plugins) @@ -507,6 +504,7 @@ func autoConvert_config_KubeSchedulerProfile_To_v1_KubeSchedulerProfile(in *conf if err := metav1.Convert_string_To_Pointer_string(&in.SchedulerName, &out.SchedulerName, s); err != nil { return err } + out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore)) if in.Plugins != nil { in, out := &in.Plugins, &out.Plugins *out = new(v1.Plugins) diff --git a/pkg/scheduler/apis/config/v1beta2/conversion.go b/pkg/scheduler/apis/config/v1beta2/conversion.go index 9a656f5655f..0a6b6e779f2 100644 --- a/pkg/scheduler/apis/config/v1beta2/conversion.go +++ b/pkg/scheduler/apis/config/v1beta2/conversion.go @@ -105,3 +105,9 @@ func convertToExternalPluginConfigArgs(out *v1beta2.KubeSchedulerConfiguration) } return nil } + +// Convert_config_KubeSchedulerProfile_To_v1beta2_KubeSchedulerProfile uses auto coversion by +// ignoring per profile PercentageOfNodesToScore. +func Convert_config_KubeSchedulerProfile_To_v1beta2_KubeSchedulerProfile(in *config.KubeSchedulerProfile, out *v1beta2.KubeSchedulerProfile, s conversion.Scope) error { + return autoConvert_config_KubeSchedulerProfile_To_v1beta2_KubeSchedulerProfile(in, out, s) +} diff --git a/pkg/scheduler/apis/config/v1beta2/defaults_test.go b/pkg/scheduler/apis/config/v1beta2/defaults_test.go index 08085cf2896..e76ddea41d9 100644 --- a/pkg/scheduler/apis/config/v1beta2/defaults_test.go +++ b/pkg/scheduler/apis/config/v1beta2/defaults_test.go @@ -32,6 +32,7 @@ import ( featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/kube-scheduler/config/v1beta2" "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" "k8s.io/utils/pointer" ) @@ -144,7 +145,7 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(0), + PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), PodInitialBackoffSeconds: pointer.Int64(1), PodMaxBackoffSeconds: pointer.Int64(10), Profiles: []v1beta2.KubeSchedulerProfile{ @@ -181,7 +182,7 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(0), + PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), PodInitialBackoffSeconds: pointer.Int64(1), PodMaxBackoffSeconds: pointer.Int64(10), Profiles: []v1beta2.KubeSchedulerProfile{ @@ -237,7 +238,7 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(0), + PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), PodInitialBackoffSeconds: pointer.Int64(1), PodMaxBackoffSeconds: pointer.Int64(10), Profiles: []v1beta2.KubeSchedulerProfile{ @@ -430,7 +431,7 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(0), + PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), PodInitialBackoffSeconds: pointer.Int64(1), PodMaxBackoffSeconds: pointer.Int64(10), Profiles: []v1beta2.KubeSchedulerProfile{ diff --git a/pkg/scheduler/apis/config/v1beta2/zz_generated.conversion.go b/pkg/scheduler/apis/config/v1beta2/zz_generated.conversion.go index 07764f9c6d8..953d61b19f9 100644 --- a/pkg/scheduler/apis/config/v1beta2/zz_generated.conversion.go +++ b/pkg/scheduler/apis/config/v1beta2/zz_generated.conversion.go @@ -95,11 +95,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*config.KubeSchedulerProfile)(nil), (*v1beta2.KubeSchedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_KubeSchedulerProfile_To_v1beta2_KubeSchedulerProfile(a.(*config.KubeSchedulerProfile), b.(*v1beta2.KubeSchedulerProfile), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1beta2.NodeAffinityArgs)(nil), (*config.NodeAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_NodeAffinityArgs_To_config_NodeAffinityArgs(a.(*v1beta2.NodeAffinityArgs), b.(*config.NodeAffinityArgs), scope) }); err != nil { @@ -235,6 +230,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*config.KubeSchedulerProfile)(nil), (*v1beta2.KubeSchedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_KubeSchedulerProfile_To_v1beta2_KubeSchedulerProfile(a.(*config.KubeSchedulerProfile), b.(*v1beta2.KubeSchedulerProfile), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.KubeSchedulerConfiguration)(nil), (*config.KubeSchedulerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(a.(*v1beta2.KubeSchedulerConfiguration), b.(*config.KubeSchedulerConfiguration), scope) }); err != nil { @@ -414,9 +414,7 @@ func autoConvert_v1beta2_KubeSchedulerConfiguration_To_config_KubeSchedulerConfi if err := v1alpha1.Convert_v1alpha1_DebuggingConfiguration_To_config_DebuggingConfiguration(&in.DebuggingConfiguration, &out.DebuggingConfiguration, s); err != nil { return err } - if err := v1.Convert_Pointer_int32_To_int32(&in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore, s); err != nil { - return err - } + out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore)) if err := v1.Convert_Pointer_int64_To_int64(&in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds, s); err != nil { return err } @@ -457,9 +455,7 @@ func autoConvert_config_KubeSchedulerConfiguration_To_v1beta2_KubeSchedulerConfi if err := v1alpha1.Convert_config_DebuggingConfiguration_To_v1alpha1_DebuggingConfiguration(&in.DebuggingConfiguration, &out.DebuggingConfiguration, s); err != nil { return err } - if err := v1.Convert_int32_To_Pointer_int32(&in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore, s); err != nil { - return err - } + out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore)) if err := v1.Convert_int64_To_Pointer_int64(&in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds, s); err != nil { return err } @@ -517,6 +513,7 @@ func autoConvert_config_KubeSchedulerProfile_To_v1beta2_KubeSchedulerProfile(in if err := v1.Convert_string_To_Pointer_string(&in.SchedulerName, &out.SchedulerName, s); err != nil { return err } + // WARNING: in.PercentageOfNodesToScore requires manual conversion: does not exist in peer-type if in.Plugins != nil { in, out := &in.Plugins, &out.Plugins *out = new(v1beta2.Plugins) @@ -540,11 +537,6 @@ func autoConvert_config_KubeSchedulerProfile_To_v1beta2_KubeSchedulerProfile(in return nil } -// Convert_config_KubeSchedulerProfile_To_v1beta2_KubeSchedulerProfile is an autogenerated conversion function. -func Convert_config_KubeSchedulerProfile_To_v1beta2_KubeSchedulerProfile(in *config.KubeSchedulerProfile, out *v1beta2.KubeSchedulerProfile, s conversion.Scope) error { - return autoConvert_config_KubeSchedulerProfile_To_v1beta2_KubeSchedulerProfile(in, out, s) -} - func autoConvert_v1beta2_NodeAffinityArgs_To_config_NodeAffinityArgs(in *v1beta2.NodeAffinityArgs, out *config.NodeAffinityArgs, s conversion.Scope) error { out.AddedAffinity = (*corev1.NodeAffinity)(unsafe.Pointer(in.AddedAffinity)) return nil diff --git a/pkg/scheduler/apis/config/v1beta3/conversion.go b/pkg/scheduler/apis/config/v1beta3/conversion.go index e37daf09c55..262f1e6ef2e 100644 --- a/pkg/scheduler/apis/config/v1beta3/conversion.go +++ b/pkg/scheduler/apis/config/v1beta3/conversion.go @@ -105,3 +105,9 @@ func convertToExternalPluginConfigArgs(out *v1beta3.KubeSchedulerConfiguration) } return nil } + +// Convert_config_KubeSchedulerProfile_To_v1beta3_KubeSchedulerProfile called auto coversion by +// ignoring per profile PercentageOfNodesToScore. +func Convert_config_KubeSchedulerProfile_To_v1beta3_KubeSchedulerProfile(in *config.KubeSchedulerProfile, out *v1beta3.KubeSchedulerProfile, s conversion.Scope) error { + return autoConvert_config_KubeSchedulerProfile_To_v1beta3_KubeSchedulerProfile(in, out, s) +} diff --git a/pkg/scheduler/apis/config/v1beta3/defaults_test.go b/pkg/scheduler/apis/config/v1beta3/defaults_test.go index beac99f530c..5b555da6a89 100644 --- a/pkg/scheduler/apis/config/v1beta3/defaults_test.go +++ b/pkg/scheduler/apis/config/v1beta3/defaults_test.go @@ -32,6 +32,7 @@ import ( featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/kube-scheduler/config/v1beta3" "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" "k8s.io/utils/pointer" ) @@ -144,7 +145,7 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(0), + PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), PodInitialBackoffSeconds: pointer.Int64(1), PodMaxBackoffSeconds: pointer.Int64(10), Profiles: []v1beta3.KubeSchedulerProfile{ @@ -181,7 +182,7 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(0), + PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), PodInitialBackoffSeconds: pointer.Int64(1), PodMaxBackoffSeconds: pointer.Int64(10), Profiles: []v1beta3.KubeSchedulerProfile{ @@ -237,7 +238,7 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(0), + PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), PodInitialBackoffSeconds: pointer.Int64(1), PodMaxBackoffSeconds: pointer.Int64(10), Profiles: []v1beta3.KubeSchedulerProfile{ @@ -388,7 +389,7 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(0), + PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), PodInitialBackoffSeconds: pointer.Int64(1), PodMaxBackoffSeconds: pointer.Int64(10), Profiles: []v1beta3.KubeSchedulerProfile{ @@ -425,7 +426,7 @@ func TestSchedulerDefaults(t *testing.T) { Burst: 100, ContentType: "application/vnd.kubernetes.protobuf", }, - PercentageOfNodesToScore: pointer.Int32(0), + PercentageOfNodesToScore: pointer.Int32(config.DefaultPercentageOfNodesToScore), PodInitialBackoffSeconds: pointer.Int64(1), PodMaxBackoffSeconds: pointer.Int64(10), Profiles: []v1beta3.KubeSchedulerProfile{ diff --git a/pkg/scheduler/apis/config/v1beta3/zz_generated.conversion.go b/pkg/scheduler/apis/config/v1beta3/zz_generated.conversion.go index 6a854ad09c2..48800c0ec75 100644 --- a/pkg/scheduler/apis/config/v1beta3/zz_generated.conversion.go +++ b/pkg/scheduler/apis/config/v1beta3/zz_generated.conversion.go @@ -95,11 +95,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*config.KubeSchedulerProfile)(nil), (*v1beta3.KubeSchedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_KubeSchedulerProfile_To_v1beta3_KubeSchedulerProfile(a.(*config.KubeSchedulerProfile), b.(*v1beta3.KubeSchedulerProfile), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1beta3.NodeAffinityArgs)(nil), (*config.NodeAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta3_NodeAffinityArgs_To_config_NodeAffinityArgs(a.(*v1beta3.NodeAffinityArgs), b.(*config.NodeAffinityArgs), scope) }); err != nil { @@ -235,6 +230,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*config.KubeSchedulerProfile)(nil), (*v1beta3.KubeSchedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_KubeSchedulerProfile_To_v1beta3_KubeSchedulerProfile(a.(*config.KubeSchedulerProfile), b.(*v1beta3.KubeSchedulerProfile), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta3.KubeSchedulerConfiguration)(nil), (*config.KubeSchedulerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta3_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(a.(*v1beta3.KubeSchedulerConfiguration), b.(*config.KubeSchedulerConfiguration), scope) }); err != nil { @@ -408,9 +408,7 @@ func autoConvert_v1beta3_KubeSchedulerConfiguration_To_config_KubeSchedulerConfi if err := v1alpha1.Convert_v1alpha1_DebuggingConfiguration_To_config_DebuggingConfiguration(&in.DebuggingConfiguration, &out.DebuggingConfiguration, s); err != nil { return err } - if err := v1.Convert_Pointer_int32_To_int32(&in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore, s); err != nil { - return err - } + out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore)) if err := v1.Convert_Pointer_int64_To_int64(&in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds, s); err != nil { return err } @@ -447,9 +445,7 @@ func autoConvert_config_KubeSchedulerConfiguration_To_v1beta3_KubeSchedulerConfi if err := v1alpha1.Convert_config_DebuggingConfiguration_To_v1alpha1_DebuggingConfiguration(&in.DebuggingConfiguration, &out.DebuggingConfiguration, s); err != nil { return err } - if err := v1.Convert_int32_To_Pointer_int32(&in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore, s); err != nil { - return err - } + out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore)) if err := v1.Convert_int64_To_Pointer_int64(&in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds, s); err != nil { return err } @@ -507,6 +503,7 @@ func autoConvert_config_KubeSchedulerProfile_To_v1beta3_KubeSchedulerProfile(in if err := v1.Convert_string_To_Pointer_string(&in.SchedulerName, &out.SchedulerName, s); err != nil { return err } + // WARNING: in.PercentageOfNodesToScore requires manual conversion: does not exist in peer-type if in.Plugins != nil { in, out := &in.Plugins, &out.Plugins *out = new(v1beta3.Plugins) @@ -530,11 +527,6 @@ func autoConvert_config_KubeSchedulerProfile_To_v1beta3_KubeSchedulerProfile(in return nil } -// Convert_config_KubeSchedulerProfile_To_v1beta3_KubeSchedulerProfile is an autogenerated conversion function. -func Convert_config_KubeSchedulerProfile_To_v1beta3_KubeSchedulerProfile(in *config.KubeSchedulerProfile, out *v1beta3.KubeSchedulerProfile, s conversion.Scope) error { - return autoConvert_config_KubeSchedulerProfile_To_v1beta3_KubeSchedulerProfile(in, out, s) -} - func autoConvert_v1beta3_NodeAffinityArgs_To_config_NodeAffinityArgs(in *v1beta3.NodeAffinityArgs, out *config.NodeAffinityArgs, s conversion.Scope) error { out.AddedAffinity = (*corev1.NodeAffinity)(unsafe.Pointer(in.AddedAffinity)) return nil diff --git a/pkg/scheduler/apis/config/validation/validation.go b/pkg/scheduler/apis/config/validation/validation.go index 277714c38a0..47d85717be2 100644 --- a/pkg/scheduler/apis/config/validation/validation.go +++ b/pkg/scheduler/apis/config/validation/validation.go @@ -88,10 +88,9 @@ func ValidateKubeSchedulerConfiguration(cc *config.KubeSchedulerConfiguration) u } } } - if cc.PercentageOfNodesToScore < 0 || cc.PercentageOfNodesToScore > 100 { - errs = append(errs, field.Invalid(field.NewPath("percentageOfNodesToScore"), - cc.PercentageOfNodesToScore, "not in valid range [0-100]")) - } + + errs = append(errs, validatePercentageOfNodesToScore(field.NewPath("percentageOfNodesToScore"), cc.PercentageOfNodesToScore)) + if cc.PodInitialBackoffSeconds <= 0 { errs = append(errs, field.Invalid(field.NewPath("podInitialBackoffSeconds"), cc.PodInitialBackoffSeconds, "must be greater than 0")) @@ -117,6 +116,15 @@ func splitHostIntPort(s string) (string, int, error) { return host, portInt, err } +func validatePercentageOfNodesToScore(path *field.Path, percentageOfNodesToScore *int32) error { + if percentageOfNodesToScore != nil { + if *percentageOfNodesToScore < 0 || *percentageOfNodesToScore > 100 { + return field.Invalid(path, *percentageOfNodesToScore, "not in valid range [0-100]") + } + } + return nil +} + type invalidPlugins struct { schemeGroupVersion string plugins []string @@ -171,6 +179,7 @@ func validateKubeSchedulerProfile(path *field.Path, apiVersion string, profile * if len(profile.SchedulerName) == 0 { errs = append(errs, field.Required(path.Child("schedulerName"), "")) } + errs = append(errs, validatePercentageOfNodesToScore(path.Child("percentageOfNodesToScore"), profile.PercentageOfNodesToScore)) errs = append(errs, validatePluginConfig(path, apiVersion, profile)...) return errs } diff --git a/pkg/scheduler/apis/config/validation/validation_test.go b/pkg/scheduler/apis/config/validation/validation_test.go index f99ba0bbbca..c2fb27a002d 100644 --- a/pkg/scheduler/apis/config/validation/validation_test.go +++ b/pkg/scheduler/apis/config/validation/validation_test.go @@ -27,6 +27,7 @@ import ( configv1 "k8s.io/kubernetes/pkg/scheduler/apis/config/v1" "k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2" "k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3" + "k8s.io/utils/pointer" ) func TestValidateKubeSchedulerConfigurationV1beta2(t *testing.T) { @@ -54,7 +55,7 @@ func TestValidateKubeSchedulerConfigurationV1beta2(t *testing.T) { }, PodInitialBackoffSeconds: podInitialBackoffSeconds, PodMaxBackoffSeconds: podMaxBackoffSeconds, - PercentageOfNodesToScore: 35, + PercentageOfNodesToScore: pointer.Int32(35), Profiles: []config.KubeSchedulerProfile{ { SchedulerName: "me", @@ -113,7 +114,10 @@ func TestValidateKubeSchedulerConfigurationV1beta2(t *testing.T) { healthzBindAddrInvalid.HealthzBindAddress = "0.0.0.0:9090" percentageOfNodesToScore101 := validConfig.DeepCopy() - percentageOfNodesToScore101.PercentageOfNodesToScore = int32(101) + percentageOfNodesToScore101.PercentageOfNodesToScore = pointer.Int32(101) + + percentageOfNodesToScoreNegative := validConfig.DeepCopy() + percentageOfNodesToScoreNegative.PercentageOfNodesToScore = pointer.Int32(-1) schedulerNameNotSet := validConfig.DeepCopy() schedulerNameNotSet.Profiles[1].SchedulerName = "" @@ -121,6 +125,12 @@ func TestValidateKubeSchedulerConfigurationV1beta2(t *testing.T) { repeatedSchedulerName := validConfig.DeepCopy() repeatedSchedulerName.Profiles[0].SchedulerName = "other" + profilePercentageOfNodesToScore101 := validConfig.DeepCopy() + profilePercentageOfNodesToScore101.Profiles[1].PercentageOfNodesToScore = pointer.Int32(101) + + profilePercentageOfNodesToScoreNegative := validConfig.DeepCopy() + profilePercentageOfNodesToScoreNegative.Profiles[1].PercentageOfNodesToScore = pointer.Int32(-1) + differentQueueSort := validConfig.DeepCopy() differentQueueSort.Profiles[1].Plugins.QueueSort.Enabled[0].Name = "AnotherSort" @@ -237,11 +247,16 @@ func TestValidateKubeSchedulerConfigurationV1beta2(t *testing.T) { config: healthzBindAddrInvalid, errorString: "must be empty or with an explicit 0 port", }, - "bad-percentage-of-nodes-to-score": { + "greater-than-100-percentage-of-nodes-to-score": { expectedToFail: true, config: percentageOfNodesToScore101, errorString: "not in valid range [0-100]", }, + "negative-percentage-of-nodes-to-score": { + expectedToFail: true, + config: percentageOfNodesToScoreNegative, + errorString: "not in valid range [0-100]", + }, "scheduler-name-not-set": { expectedToFail: true, config: schedulerNameNotSet, @@ -252,6 +267,16 @@ func TestValidateKubeSchedulerConfigurationV1beta2(t *testing.T) { config: repeatedSchedulerName, errorString: "Duplicate value", }, + "greater-than-100-profile-percentage-of-nodes-to-score": { + expectedToFail: true, + config: profilePercentageOfNodesToScore101, + errorString: "not in valid range [0-100]", + }, + "negative-100-profile-percentage-of-nodes-to-score": { + expectedToFail: true, + config: profilePercentageOfNodesToScoreNegative, + errorString: "not in valid range [0-100]", + }, "different-queue-sort": { expectedToFail: true, config: differentQueueSort, @@ -345,7 +370,7 @@ func TestValidateKubeSchedulerConfigurationV1beta3(t *testing.T) { }, PodInitialBackoffSeconds: podInitialBackoffSeconds, PodMaxBackoffSeconds: podMaxBackoffSeconds, - PercentageOfNodesToScore: 35, + PercentageOfNodesToScore: pointer.Int32(35), Profiles: []config.KubeSchedulerProfile{ { SchedulerName: "me", @@ -404,7 +429,10 @@ func TestValidateKubeSchedulerConfigurationV1beta3(t *testing.T) { healthzBindAddrInvalid.HealthzBindAddress = "0.0.0.0:9090" percentageOfNodesToScore101 := validConfig.DeepCopy() - percentageOfNodesToScore101.PercentageOfNodesToScore = int32(101) + percentageOfNodesToScore101.PercentageOfNodesToScore = pointer.Int32(101) + + percentageOfNodesToScoreNegative := validConfig.DeepCopy() + percentageOfNodesToScoreNegative.Profiles[1].PercentageOfNodesToScore = pointer.Int32(-1) schedulerNameNotSet := validConfig.DeepCopy() schedulerNameNotSet.Profiles[1].SchedulerName = "" @@ -412,6 +440,12 @@ func TestValidateKubeSchedulerConfigurationV1beta3(t *testing.T) { repeatedSchedulerName := validConfig.DeepCopy() repeatedSchedulerName.Profiles[0].SchedulerName = "other" + profilePercentageOfNodesToScore101 := validConfig.DeepCopy() + profilePercentageOfNodesToScore101.Profiles[1].PercentageOfNodesToScore = pointer.Int32(101) + + profilePercentageOfNodesToScoreNegative := validConfig.DeepCopy() + profilePercentageOfNodesToScoreNegative.Profiles[1].PercentageOfNodesToScore = pointer.Int32(-1) + differentQueueSort := validConfig.DeepCopy() differentQueueSort.Profiles[1].Plugins.QueueSort.Enabled[0].Name = "AnotherSort" @@ -533,6 +567,11 @@ func TestValidateKubeSchedulerConfigurationV1beta3(t *testing.T) { config: percentageOfNodesToScore101, errorString: "not in valid range [0-100]", }, + "negative-percentage-of-nodes-to-score": { + expectedToFail: true, + config: percentageOfNodesToScoreNegative, + errorString: "not in valid range [0-100]", + }, "scheduler-name-not-set": { expectedToFail: true, config: schedulerNameNotSet, @@ -543,6 +582,16 @@ func TestValidateKubeSchedulerConfigurationV1beta3(t *testing.T) { config: repeatedSchedulerName, errorString: "Duplicate value", }, + "greater-than-100-profile-percentage-of-nodes-to-score": { + expectedToFail: true, + config: profilePercentageOfNodesToScore101, + errorString: "not in valid range [0-100]", + }, + "negative-100-profile-percentage-of-nodes-to-score": { + expectedToFail: true, + config: profilePercentageOfNodesToScoreNegative, + errorString: "not in valid range [0-100]", + }, "different-queue-sort": { expectedToFail: true, config: differentQueueSort, @@ -636,10 +685,10 @@ func TestValidateKubeSchedulerConfigurationV1(t *testing.T) { }, PodInitialBackoffSeconds: podInitialBackoffSeconds, PodMaxBackoffSeconds: podMaxBackoffSeconds, - PercentageOfNodesToScore: 35, Profiles: []config.KubeSchedulerProfile{ { - SchedulerName: "me", + SchedulerName: "me", + PercentageOfNodesToScore: pointer.Int32(35), Plugins: &config.Plugins{ QueueSort: config.PluginSet{ Enabled: []config.Plugin{{Name: "CustomSort"}}, @@ -656,7 +705,8 @@ func TestValidateKubeSchedulerConfigurationV1(t *testing.T) { }, }, { - SchedulerName: "other", + SchedulerName: "other", + PercentageOfNodesToScore: pointer.Int32(35), Plugins: &config.Plugins{ QueueSort: config.PluginSet{ Enabled: []config.Plugin{{Name: "CustomSort"}}, @@ -695,7 +745,10 @@ func TestValidateKubeSchedulerConfigurationV1(t *testing.T) { healthzBindAddrInvalid.HealthzBindAddress = "0.0.0.0:9090" percentageOfNodesToScore101 := validConfig.DeepCopy() - percentageOfNodesToScore101.PercentageOfNodesToScore = int32(101) + percentageOfNodesToScore101.PercentageOfNodesToScore = pointer.Int32(101) + + percentageOfNodesToScoreNegative := validConfig.DeepCopy() + percentageOfNodesToScoreNegative.PercentageOfNodesToScore = pointer.Int32(-1) schedulerNameNotSet := validConfig.DeepCopy() schedulerNameNotSet.Profiles[1].SchedulerName = "" @@ -703,6 +756,12 @@ func TestValidateKubeSchedulerConfigurationV1(t *testing.T) { repeatedSchedulerName := validConfig.DeepCopy() repeatedSchedulerName.Profiles[0].SchedulerName = "other" + profilePercentageOfNodesToScore101 := validConfig.DeepCopy() + profilePercentageOfNodesToScore101.Profiles[1].PercentageOfNodesToScore = pointer.Int32(101) + + profilePercentageOfNodesToScoreNegative := validConfig.DeepCopy() + profilePercentageOfNodesToScoreNegative.Profiles[1].PercentageOfNodesToScore = pointer.Int32(-1) + differentQueueSort := validConfig.DeepCopy() differentQueueSort.Profiles[1].Plugins.QueueSort.Enabled[0].Name = "AnotherSort" @@ -827,6 +886,11 @@ func TestValidateKubeSchedulerConfigurationV1(t *testing.T) { config: percentageOfNodesToScore101, errorString: "not in valid range [0-100]", }, + "negative-percentage-of-nodes-to-score": { + expectedToFail: true, + config: percentageOfNodesToScoreNegative, + errorString: "not in valid range [0-100]", + }, "scheduler-name-not-set": { expectedToFail: true, config: schedulerNameNotSet, @@ -837,6 +901,16 @@ func TestValidateKubeSchedulerConfigurationV1(t *testing.T) { config: repeatedSchedulerName, errorString: "Duplicate value", }, + "greater-than-100-profile-percentage-of-nodes-to-score": { + expectedToFail: true, + config: profilePercentageOfNodesToScore101, + errorString: "not in valid range [0-100]", + }, + "negative-profile-percentage-of-nodes-to-score": { + expectedToFail: true, + config: profilePercentageOfNodesToScoreNegative, + errorString: "not in valid range [0-100]", + }, "different-queue-sort": { expectedToFail: true, config: differentQueueSort, diff --git a/pkg/scheduler/apis/config/zz_generated.deepcopy.go b/pkg/scheduler/apis/config/zz_generated.deepcopy.go index 40ce4da5878..b139c909333 100644 --- a/pkg/scheduler/apis/config/zz_generated.deepcopy.go +++ b/pkg/scheduler/apis/config/zz_generated.deepcopy.go @@ -157,6 +157,11 @@ func (in *KubeSchedulerConfiguration) DeepCopyInto(out *KubeSchedulerConfigurati out.LeaderElection = in.LeaderElection out.ClientConnection = in.ClientConnection out.DebuggingConfiguration = in.DebuggingConfiguration + if in.PercentageOfNodesToScore != nil { + in, out := &in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore + *out = new(int32) + **out = **in + } if in.Profiles != nil { in, out := &in.Profiles, &out.Profiles *out = make([]KubeSchedulerProfile, len(*in)) @@ -195,6 +200,11 @@ func (in *KubeSchedulerConfiguration) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeSchedulerProfile) DeepCopyInto(out *KubeSchedulerProfile) { *out = *in + if in.PercentageOfNodesToScore != nil { + in, out := &in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore + *out = new(int32) + **out = **in + } if in.Plugins != nil { in, out := &in.Plugins, &out.Plugins *out = new(Plugins) diff --git a/pkg/scheduler/framework/interface.go b/pkg/scheduler/framework/interface.go index 10c2776ddb6..3e69551f605 100644 --- a/pkg/scheduler/framework/interface.go +++ b/pkg/scheduler/framework/interface.go @@ -574,8 +574,11 @@ type Framework interface { // ListPlugins returns a map of extension point name to list of configured Plugins. ListPlugins() *config.Plugins - // ProfileName returns the profile name associated to this framework. + // ProfileName returns the profile name associated to a profile. ProfileName() string + + // PercentageOfNodesToScore returns percentageOfNodesToScore associated to a profile. + PercentageOfNodesToScore() *int32 } // Handle provides data and some tools that plugins can use. It is diff --git a/pkg/scheduler/framework/runtime/framework.go b/pkg/scheduler/framework/runtime/framework.go index 6ff81c6f947..9584294fb30 100644 --- a/pkg/scheduler/framework/runtime/framework.go +++ b/pkg/scheduler/framework/runtime/framework.go @@ -92,8 +92,9 @@ type frameworkImpl struct { eventRecorder events.EventRecorder informerFactory informers.SharedInformerFactory - metricsRecorder *metricsRecorder - profileName string + metricsRecorder *metricsRecorder + profileName string + percentageOfNodesToScore *int32 extenders []framework.Extender framework.PodNominator @@ -271,6 +272,7 @@ func NewFramework(r Registry, profile *config.KubeSchedulerProfile, stopCh <-cha } f.profileName = profile.SchedulerName + f.percentageOfNodesToScore = profile.PercentageOfNodesToScore if profile.Plugins == nil { return f, nil } @@ -287,9 +289,10 @@ func NewFramework(r Registry, profile *config.KubeSchedulerProfile, stopCh <-cha pluginConfig[name] = profile.PluginConfig[i].Args } outputProfile := config.KubeSchedulerProfile{ - SchedulerName: f.profileName, - Plugins: profile.Plugins, - PluginConfig: make([]config.PluginConfig, 0, len(pg)), + SchedulerName: f.profileName, + PercentageOfNodesToScore: f.percentageOfNodesToScore, + Plugins: profile.Plugins, + PluginConfig: make([]config.PluginConfig, 0, len(pg)), } pluginsMap := make(map[string]framework.Plugin) @@ -1340,6 +1343,11 @@ func (f *frameworkImpl) ProfileName() string { return f.profileName } +// PercentageOfNodesToScore returns percentageOfNodesToScore associated to a profile. +func (f *frameworkImpl) PercentageOfNodesToScore() *int32 { + return f.percentageOfNodesToScore +} + // Parallelizer returns a parallelizer holding parallelism for scheduler. func (f *frameworkImpl) Parallelizer() parallelize.Parallelizer { return f.parallelizer diff --git a/pkg/scheduler/framework/runtime/framework_test.go b/pkg/scheduler/framework/runtime/framework_test.go index f1af27a21a3..ddd899f81d1 100644 --- a/pkg/scheduler/framework/runtime/framework_test.go +++ b/pkg/scheduler/framework/runtime/framework_test.go @@ -37,6 +37,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/framework" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" "k8s.io/kubernetes/pkg/scheduler/metrics" + "k8s.io/utils/pointer" ) const ( @@ -52,8 +53,9 @@ const ( permitPlugin = "permit-plugin" bindPlugin = "bind-plugin" - testProfileName = "test-profile" - nodeName = "testNode" + testProfileName = "test-profile" + testPercentageOfNodesToScore = 35 + nodeName = "testNode" injectReason = "injected status" injectFilterReason = "injected filter status" @@ -2287,8 +2289,9 @@ func TestRecordingMetrics(t *testing.T) { stopCh := make(chan struct{}) recorder := newMetricsRecorder(100, time.Nanosecond, stopCh) profile := config.KubeSchedulerProfile{ - SchedulerName: testProfileName, - Plugins: plugins, + PercentageOfNodesToScore: pointer.Int32(testPercentageOfNodesToScore), + SchedulerName: testProfileName, + Plugins: plugins, } f, err := newFrameworkWithQueueSortAndBind(r, profile, stopCh, withMetricsRecorder(recorder)) if err != nil { @@ -2398,8 +2401,9 @@ func TestRunBindPlugins(t *testing.T) { stopCh := make(chan struct{}) recorder := newMetricsRecorder(100, time.Nanosecond, stopCh) profile := config.KubeSchedulerProfile{ - SchedulerName: testProfileName, - Plugins: plugins, + SchedulerName: testProfileName, + PercentageOfNodesToScore: pointer.Int32(testPercentageOfNodesToScore), + Plugins: plugins, } fwk, err := newFrameworkWithQueueSortAndBind(r, profile, stopCh, withMetricsRecorder(recorder)) if err != nil { diff --git a/pkg/scheduler/schedule_one.go b/pkg/scheduler/schedule_one.go index c4d5ae75def..83ef673d2c9 100644 --- a/pkg/scheduler/schedule_one.go +++ b/pkg/scheduler/schedule_one.go @@ -511,7 +511,7 @@ func (sched *Scheduler) findNodesThatPassFilters( diagnosis framework.Diagnosis, nodes []*framework.NodeInfo) ([]*v1.Node, error) { numAllNodes := len(nodes) - numNodesToFind := sched.numFeasibleNodesToFind(int32(numAllNodes)) + numNodesToFind := sched.numFeasibleNodesToFind(fwk.PercentageOfNodesToScore(), int32(numAllNodes)) // Create feasible list with enough space to avoid growing it // and allow assigning. @@ -576,21 +576,27 @@ func (sched *Scheduler) findNodesThatPassFilters( // numFeasibleNodesToFind returns the number of feasible nodes that once found, the scheduler stops // its search for more feasible nodes. -func (sched *Scheduler) numFeasibleNodesToFind(numAllNodes int32) (numNodes int32) { - if numAllNodes < minFeasibleNodesToFind || sched.percentageOfNodesToScore >= 100 { +func (sched *Scheduler) numFeasibleNodesToFind(percentageOfNodesToScore *int32, numAllNodes int32) (numNodes int32) { + if numAllNodes < minFeasibleNodesToFind { return numAllNodes } - adaptivePercentage := sched.percentageOfNodesToScore - if adaptivePercentage <= 0 { - basePercentageOfNodesToScore := int32(50) - adaptivePercentage = basePercentageOfNodesToScore - numAllNodes/125 - if adaptivePercentage < minFeasibleNodesPercentageToFind { - adaptivePercentage = minFeasibleNodesPercentageToFind + // Use profile percentageOfNodesToScore if it's set. Otherwise, use global percentageOfNodesToScore. + var percentage int32 + if percentageOfNodesToScore != nil { + percentage = *percentageOfNodesToScore + } else { + percentage = sched.percentageOfNodesToScore + } + + if percentage == 0 { + percentage = int32(50) - numAllNodes/125 + if percentage < minFeasibleNodesPercentageToFind { + percentage = minFeasibleNodesPercentageToFind } } - numNodes = numAllNodes * adaptivePercentage / 100 + numNodes = numAllNodes * percentage / 100 if numNodes < minFeasibleNodesToFind { return minFeasibleNodesToFind } diff --git a/pkg/scheduler/schedule_one_test.go b/pkg/scheduler/schedule_one_test.go index ee245ebf2ab..dd42264f04c 100644 --- a/pkg/scheduler/schedule_one_test.go +++ b/pkg/scheduler/schedule_one_test.go @@ -2346,10 +2346,11 @@ var lowPriority, midPriority, highPriority = int32(0), int32(100), int32(1000) func TestNumFeasibleNodesToFind(t *testing.T) { tests := []struct { - name string - percentageOfNodesToScore int32 - numAllNodes int32 - wantNumNodes int32 + name string + globalPercentage int32 + profilePercentage *int32 + numAllNodes int32 + wantNumNodes int32 }{ { name: "not set percentageOfNodesToScore and nodes number not more than 50", @@ -2357,10 +2358,10 @@ func TestNumFeasibleNodesToFind(t *testing.T) { wantNumNodes: 10, }, { - name: "set percentageOfNodesToScore and nodes number not more than 50", - percentageOfNodesToScore: 40, - numAllNodes: 10, - wantNumNodes: 10, + name: "set profile percentageOfNodesToScore and nodes number not more than 50", + profilePercentage: pointer.Int32(40), + numAllNodes: 10, + wantNumNodes: 10, }, { name: "not set percentageOfNodesToScore and nodes number more than 50", @@ -2368,29 +2369,43 @@ func TestNumFeasibleNodesToFind(t *testing.T) { wantNumNodes: 420, }, { - name: "set percentageOfNodesToScore and nodes number more than 50", - percentageOfNodesToScore: 40, - numAllNodes: 1000, - wantNumNodes: 400, + name: "set profile percentageOfNodesToScore and nodes number more than 50", + profilePercentage: pointer.Int32(40), + numAllNodes: 1000, + wantNumNodes: 400, }, { - name: "not set percentageOfNodesToScore and nodes number more than 50*125", + name: "set global and profile percentageOfNodesToScore and nodes number more than 50", + globalPercentage: 100, + profilePercentage: pointer.Int32(40), + numAllNodes: 1000, + wantNumNodes: 400, + }, + { + name: "set global percentageOfNodesToScore and nodes number more than 50", + globalPercentage: 40, + numAllNodes: 1000, + wantNumNodes: 400, + }, + { + name: "not set profile percentageOfNodesToScore and nodes number more than 50*125", numAllNodes: 6000, wantNumNodes: 300, }, { - name: "set percentageOfNodesToScore and nodes number more than 50*125", - percentageOfNodesToScore: 40, - numAllNodes: 6000, - wantNumNodes: 2400, + name: "set profile percentageOfNodesToScore and nodes number more than 50*125", + profilePercentage: pointer.Int32(40), + numAllNodes: 6000, + wantNumNodes: 2400, }, } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { sched := &Scheduler{ - percentageOfNodesToScore: tt.percentageOfNodesToScore, + percentageOfNodesToScore: tt.globalPercentage, } - if gotNumNodes := sched.numFeasibleNodesToFind(tt.numAllNodes); gotNumNodes != tt.wantNumNodes { + if gotNumNodes := sched.numFeasibleNodesToFind(tt.profilePercentage, tt.numAllNodes); gotNumNodes != tt.wantNumNodes { t.Errorf("Scheduler.numFeasibleNodesToFind() = %v, want %v", gotNumNodes, tt.wantNumNodes) } }) @@ -2423,7 +2438,7 @@ func TestFairEvaluationForNodes(t *testing.T) { // To make numAllNodes % nodesToFind != 0 sched.percentageOfNodesToScore = 30 - nodesToFind := int(sched.numFeasibleNodesToFind(int32(numAllNodes))) + nodesToFind := int(sched.numFeasibleNodesToFind(fwk.PercentageOfNodesToScore(), int32(numAllNodes))) // Iterating over all nodes more than twice for i := 0; i < 2*(numAllNodes/nodesToFind+1); i++ { diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 96b0005d5c9..8aee2fb7e70 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -104,8 +104,9 @@ func (s *Scheduler) applyDefaultHandlers() { } type schedulerOptions struct { - componentConfigVersion string - kubeConfig *restclient.Config + componentConfigVersion string + kubeConfig *restclient.Config + // Overridden by profile level percentageOfNodesToScore if set in v1. percentageOfNodesToScore int32 podInitialBackoffSeconds int64 podMaxBackoffSeconds int64 @@ -166,10 +167,13 @@ func WithParallelism(threads int32) Option { } } -// WithPercentageOfNodesToScore sets percentageOfNodesToScore for Scheduler, the default value is 50 -func WithPercentageOfNodesToScore(percentageOfNodesToScore int32) Option { +// WithPercentageOfNodesToScore sets percentageOfNodesToScore for Scheduler. +// The default value of 0 will use an adaptive percentage: 50 - (num of nodes)/125. +func WithPercentageOfNodesToScore(percentageOfNodesToScore *int32) Option { return func(o *schedulerOptions) { - o.percentageOfNodesToScore = percentageOfNodesToScore + if percentageOfNodesToScore != nil { + o.percentageOfNodesToScore = *percentageOfNodesToScore + } } } diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index 140d11c5689..641c7dbe0e0 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -46,6 +46,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/profile" st "k8s.io/kubernetes/pkg/scheduler/testing" testingclock "k8s.io/utils/clock/testing" + "k8s.io/utils/pointer" ) func TestSchedulerCreation(t *testing.T) { @@ -415,6 +416,50 @@ func TestFailureHandler_PodAlreadyBound(t *testing.T) { } } +// TestWithPercentageOfNodesToScore tests scheduler's PercentageOfNodesToScore is set correctly. +func TestWithPercentageOfNodesToScore(t *testing.T) { + tests := []struct { + name string + percentageOfNodesToScoreConfig *int32 + wantedPercentageOfNodesToScore int32 + }{ + { + name: "percentageOfNodesScore is nil", + percentageOfNodesToScoreConfig: nil, + wantedPercentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore, + }, + { + name: "percentageOfNodesScore is not nil", + percentageOfNodesToScoreConfig: pointer.Int32(10), + wantedPercentageOfNodesToScore: 10, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := fake.NewSimpleClientset() + informerFactory := informers.NewSharedInformerFactory(client, 0) + eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()}) + stopCh := make(chan struct{}) + defer close(stopCh) + sched, err := New( + client, + informerFactory, + nil, + profile.NewRecorderFactory(eventBroadcaster), + stopCh, + WithPercentageOfNodesToScore(tt.percentageOfNodesToScoreConfig), + ) + if err != nil { + t.Fatalf("Failed to create scheduler: %v", err) + } + if sched.percentageOfNodesToScore != tt.wantedPercentageOfNodesToScore { + t.Errorf("scheduler.percercentageOfNodesToScore = %v, want %v", sched.percentageOfNodesToScore, tt.wantedPercentageOfNodesToScore) + } + }) + } +} + // getPodFromPriorityQueue is the function used in the TestDefaultErrorFunc test to get // the specific pod from the given priority queue. It returns the found pod in the priority queue. func getPodFromPriorityQueue(queue *internalqueue.PriorityQueue, pod *v1.Pod) *v1.Pod { diff --git a/staging/src/k8s.io/kube-scheduler/config/v1/types.go b/staging/src/k8s.io/kube-scheduler/config/v1/types.go index 4e104600a41..9fdec69d1ee 100644 --- a/staging/src/k8s.io/kube-scheduler/config/v1/types.go +++ b/staging/src/k8s.io/kube-scheduler/config/v1/types.go @@ -64,7 +64,7 @@ type KubeSchedulerConfiguration struct { // Example: if the cluster size is 500 nodes and the value of this flag is 30, // then scheduler stops finding further feasible nodes once it finds 150 feasible ones. // When the value is 0, default percentage (5%--50% based on the size of the cluster) of the - // nodes will be scored. + // nodes will be scored. It is overridden by profile level PercentageofNodesToScore. PercentageOfNodesToScore *int32 `json:"percentageOfNodesToScore,omitempty"` // PodInitialBackoffSeconds is the initial backoff for unschedulable pods. @@ -135,6 +135,17 @@ type KubeSchedulerProfile struct { // is scheduled with this profile. SchedulerName *string `json:"schedulerName,omitempty"` + // PercentageOfNodesToScore is the percentage of all nodes that once found feasible + // for running a pod, the scheduler stops its search for more feasible nodes in + // the cluster. This helps improve scheduler's performance. Scheduler always tries to find + // at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is. + // Example: if the cluster size is 500 nodes and the value of this flag is 30, + // then scheduler stops finding further feasible nodes once it finds 150 feasible ones. + // When the value is 0, default percentage (5%--50% based on the size of the cluster) of the + // nodes will be scored. It will override global PercentageOfNodesToScore. If it is empty, + // global PercentageOfNodesToScore will be used. + PercentageOfNodesToScore *int32 `json:"percentageOfNodesToScore,omitempty"` + // Plugins specify the set of plugins that should be enabled or disabled. // Enabled plugins are the ones that should be enabled in addition to the // default plugins. Disabled plugins are any of the default plugins that diff --git a/staging/src/k8s.io/kube-scheduler/config/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/kube-scheduler/config/v1/zz_generated.deepcopy.go index 0b5f11edc1e..48d2dffaef4 100644 --- a/staging/src/k8s.io/kube-scheduler/config/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/kube-scheduler/config/v1/zz_generated.deepcopy.go @@ -235,6 +235,11 @@ func (in *KubeSchedulerProfile) DeepCopyInto(out *KubeSchedulerProfile) { *out = new(string) **out = **in } + if in.PercentageOfNodesToScore != nil { + in, out := &in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore + *out = new(int32) + **out = **in + } if in.Plugins != nil { in, out := &in.Plugins, &out.Plugins *out = new(Plugins)