Remove enableNonPreempting field from scheduler codebase

This commit is contained in:
Wei Huang 2020-05-08 16:47:30 -07:00
parent 13010d199c
commit e283e73994
No known key found for this signature in database
GPG Key ID: BE5E9752F8B6E005
8 changed files with 10 additions and 31 deletions

View File

@ -61,7 +61,6 @@ go_test(
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/controller/volume/scheduling:go_default_library",
"//pkg/features:go_default_library",
"//pkg/scheduler/apis/config:go_default_library",
"//pkg/scheduler/apis/config/scheme:go_default_library",
"//pkg/scheduler/core:go_default_library",
@ -91,7 +90,6 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",

View File

@ -600,8 +600,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
informerFactory.Core().V1().PersistentVolumeClaims().Lister(),
informerFactory.Policy().V1beta1().PodDisruptionBudgets().Lister(),
false,
schedulerapi.DefaultPercentageOfNodesToScore,
false)
schedulerapi.DefaultPercentageOfNodesToScore)
podIgnored := &v1.Pod{}
result, err := scheduler.Schedule(context.Background(), prof, framework.NewCycleState(), podIgnored)
if test.expectsErr {

View File

@ -131,7 +131,6 @@ type genericScheduler struct {
pdbLister policylisters.PodDisruptionBudgetLister
disablePreemption bool
percentageOfNodesToScore int32
enableNonPreempting bool
nextStartNodeIndex int
}
@ -259,7 +258,7 @@ func (g *genericScheduler) Preempt(ctx context.Context, prof *profile.Profile, s
if !ok || fitError == nil {
return nil, nil, nil, nil
}
if !podEligibleToPreemptOthers(pod, g.nodeInfoSnapshot.NodeInfos(), g.enableNonPreempting) {
if !podEligibleToPreemptOthers(pod, g.nodeInfoSnapshot.NodeInfos()) {
klog.V(5).Infof("Pod %v/%v is not eligible for more preemption.", pod.Namespace, pod.Name)
return nil, nil, nil, nil
}
@ -1053,8 +1052,8 @@ func nodesWherePreemptionMightHelp(nodes []*framework.NodeInfo, fitErr *FitError
// considered for preemption.
// We look at the node that is nominated for this pod and as long as there are
// terminating pods on the node, we don't consider this for preempting more pods.
func podEligibleToPreemptOthers(pod *v1.Pod, nodeInfos framework.NodeInfoLister, enableNonPreempting bool) bool {
if enableNonPreempting && pod.Spec.PreemptionPolicy != nil && *pod.Spec.PreemptionPolicy == v1.PreemptNever {
func podEligibleToPreemptOthers(pod *v1.Pod, nodeInfos framework.NodeInfoLister) bool {
if pod.Spec.PreemptionPolicy != nil && *pod.Spec.PreemptionPolicy == v1.PreemptNever {
klog.V(5).Infof("Pod %v/%v is not eligible for preemption because it has a preemptionPolicy of %v", pod.Namespace, pod.Name, v1.PreemptNever)
return false
}
@ -1108,8 +1107,7 @@ func NewGenericScheduler(
pvcLister corelisters.PersistentVolumeClaimLister,
pdbLister policylisters.PodDisruptionBudgetLister,
disablePreemption bool,
percentageOfNodesToScore int32,
enableNonPreempting bool) ScheduleAlgorithm {
percentageOfNodesToScore int32) ScheduleAlgorithm {
return &genericScheduler{
cache: cache,
schedulingQueue: podQueue,
@ -1119,6 +1117,5 @@ func NewGenericScheduler(
pdbLister: pdbLister,
disablePreemption: disablePreemption,
percentageOfNodesToScore: percentageOfNodesToScore,
enableNonPreempting: enableNonPreempting,
}
}

View File

@ -817,8 +817,7 @@ func TestGenericScheduler(t *testing.T) {
pvcLister,
informerFactory.Policy().V1beta1().PodDisruptionBudgets().Lister(),
false,
schedulerapi.DefaultPercentageOfNodesToScore,
false)
schedulerapi.DefaultPercentageOfNodesToScore)
result, err := scheduler.Schedule(context.Background(), prof, framework.NewCycleState(), test.pod)
if !reflect.DeepEqual(err, test.wErr) {
t.Errorf("Unexpected error: %v, expected: %v", err.Error(), test.wErr)
@ -845,7 +844,7 @@ func makeScheduler(nodes []*v1.Node) *genericScheduler {
internalqueue.NewSchedulingQueue(nil),
emptySnapshot,
nil, nil, nil, false,
schedulerapi.DefaultPercentageOfNodesToScore, false)
schedulerapi.DefaultPercentageOfNodesToScore)
cache.UpdateSnapshot(s.(*genericScheduler).nodeInfoSnapshot)
return s.(*genericScheduler)
}
@ -1139,8 +1138,7 @@ func TestZeroRequest(t *testing.T) {
nil,
nil,
false,
schedulerapi.DefaultPercentageOfNodesToScore,
false).(*genericScheduler)
schedulerapi.DefaultPercentageOfNodesToScore).(*genericScheduler)
scheduler.nodeInfoSnapshot = snapshot
ctx := context.Background()
@ -1619,8 +1617,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
nil,
informerFactory.Policy().V1beta1().PodDisruptionBudgets().Lister(),
false,
schedulerapi.DefaultPercentageOfNodesToScore,
false)
schedulerapi.DefaultPercentageOfNodesToScore)
g := scheduler.(*genericScheduler)
assignDefaultStartTime(test.pods)
@ -2416,8 +2413,7 @@ func TestPreempt(t *testing.T) {
informerFactory.Core().V1().PersistentVolumeClaims().Lister(),
informerFactory.Policy().V1beta1().PodDisruptionBudgets().Lister(),
false,
schedulerapi.DefaultPercentageOfNodesToScore,
true)
schedulerapi.DefaultPercentageOfNodesToScore)
state := framework.NewCycleState()
// Some tests rely on PreFilter plugin to compute its CycleState.
preFilterStatus := fwk.RunPreFilterPlugins(context.Background(), state, test.pod)

View File

@ -101,8 +101,6 @@ type Configurator struct {
podMaxBackoffSeconds int64
enableNonPreempting bool
profiles []schedulerapi.KubeSchedulerProfile
registry framework.Registry
nodeInfoSnapshot *internalcache.Snapshot
@ -204,7 +202,6 @@ func (c *Configurator) create() (*Scheduler, error) {
GetPodDisruptionBudgetLister(c.informerFactory),
c.disablePreemption,
c.percentageOfNodesToScore,
c.enableNonPreempting,
)
return &Scheduler{

View File

@ -29,7 +29,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
@ -38,7 +37,6 @@ import (
"k8s.io/client-go/tools/events"
extenderv1 "k8s.io/kube-scheduler/extender/v1"
apicore "k8s.io/kubernetes/pkg/apis/core"
kubefeatures "k8s.io/kubernetes/pkg/features"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/scheme"
frameworkplugins "k8s.io/kubernetes/pkg/scheduler/framework/plugins"
@ -471,7 +469,6 @@ func newConfigFactoryWithFrameworkRegistry(
podInitialBackoffSeconds: podInitialBackoffDurationSeconds,
podMaxBackoffSeconds: podMaxBackoffDurationSeconds,
StopEverything: stopCh,
enableNonPreempting: utilfeature.DefaultFeatureGate.Enabled(kubefeatures.NonPreemptingPriority),
registry: registry,
profiles: []schedulerapi.KubeSchedulerProfile{
{SchedulerName: testSchedulerName},

View File

@ -28,7 +28,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
@ -36,7 +35,6 @@ import (
"k8s.io/klog"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller/volume/scheduling"
kubefeatures "k8s.io/kubernetes/pkg/features"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/scheme"
"k8s.io/kubernetes/pkg/scheduler/core"
@ -279,7 +277,6 @@ func New(client clientset.Interface,
bindTimeoutSeconds: options.bindTimeoutSeconds,
podInitialBackoffSeconds: options.podInitialBackoffSeconds,
podMaxBackoffSeconds: options.podMaxBackoffSeconds,
enableNonPreempting: utilfeature.DefaultFeatureGate.Enabled(kubefeatures.NonPreemptingPriority),
profiles: append([]schedulerapi.KubeSchedulerProfile(nil), options.profiles...),
registry: registry,
nodeInfoSnapshot: snapshot,

View File

@ -820,7 +820,6 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache internalcache.C
informerFactory.Policy().V1beta1().PodDisruptionBudgets().Lister(),
false,
schedulerapi.DefaultPercentageOfNodesToScore,
false,
)
errChan := make(chan error, 1)
@ -1175,7 +1174,6 @@ func TestSchedulerBinding(t *testing.T) {
nil,
false,
0,
false,
)
sched := Scheduler{
Algorithm: algo,