Merge pull request #112563 from kerthcet/cleanup/optimize-new-scheduler

Remove newScheduler for reducing complexity
This commit is contained in:
Kubernetes Prow Robot 2022-10-11 12:32:41 -07:00 committed by GitHub
commit 5113b705d2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 86 additions and 132 deletions

View File

@ -295,18 +295,16 @@ func TestSchedulerWithExtenders(t *testing.T) {
t.Fatal(err)
}
scheduler := newScheduler(
cache,
extenders,
nil,
nil,
nil,
nil,
nil,
emptySnapshot,
schedulerapi.DefaultPercentageOfNodesToScore)
sched := &Scheduler{
Cache: cache,
nodeInfoSnapshot: emptySnapshot,
percentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
Extenders: extenders,
}
sched.applyDefaultHandlers()
podIgnored := &v1.Pod{}
result, err := scheduler.SchedulePod(ctx, fwk, framework.NewCycleState(), podIgnored)
result, err := sched.SchedulePod(ctx, fwk, framework.NewCycleState(), podIgnored)
if test.expectsErr {
if err == nil {
t.Errorf("Unexpected non-error, result %+v", result)

View File

@ -584,24 +584,20 @@ func TestSchedulerScheduleOne(t *testing.T) {
t.Fatal(err)
}
s := newScheduler(
cache,
nil,
func() *framework.QueuedPodInfo {
sched := &Scheduler{
Cache: cache,
client: client,
NextPod: func() *framework.QueuedPodInfo {
return &framework.QueuedPodInfo{PodInfo: framework.NewPodInfo(item.sendPod)}
},
nil,
internalqueue.NewTestQueue(ctx, nil),
profile.Map{
testSchedulerName: fwk,
},
client,
nil,
0)
s.SchedulePod = func(ctx context.Context, fwk framework.Framework, state *framework.CycleState, pod *v1.Pod) (ScheduleResult, error) {
SchedulingQueue: internalqueue.NewTestQueue(ctx, nil),
Profiles: profile.Map{testSchedulerName: fwk},
}
sched.SchedulePod = func(ctx context.Context, fwk framework.Framework, state *framework.CycleState, pod *v1.Pod) (ScheduleResult, error) {
return item.mockResult.result, item.mockResult.err
}
s.FailureHandler = func(_ context.Context, fwk framework.Framework, p *framework.QueuedPodInfo, err error, _ string, _ *framework.NominatingInfo) {
sched.FailureHandler = func(_ context.Context, fwk framework.Framework, p *framework.QueuedPodInfo, err error, _ string, _ *framework.NominatingInfo) {
gotPod = p.Pod
gotError = err
@ -616,7 +612,7 @@ func TestSchedulerScheduleOne(t *testing.T) {
}
close(called)
})
s.scheduleOne(ctx)
sched.scheduleOne(ctx)
<-called
if e, a := item.expectAssumedPod, gotAssumedPod; !reflect.DeepEqual(e, a) {
t.Errorf("assumed pod: wanted %v, got %v", e, a)
@ -2011,20 +2007,17 @@ func TestSchedulerSchedulePod(t *testing.T) {
t.Fatal(err)
}
scheduler := newScheduler(
cache,
nil,
nil,
nil,
nil,
nil,
nil,
snapshot,
schedulerapi.DefaultPercentageOfNodesToScore)
sched := &Scheduler{
Cache: cache,
nodeInfoSnapshot: snapshot,
percentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
}
sched.applyDefaultHandlers()
informerFactory.Start(ctx.Done())
informerFactory.WaitForCacheSync(ctx.Done())
result, err := scheduler.SchedulePod(ctx, fwk, framework.NewCycleState(), test.pod)
result, err := sched.SchedulePod(ctx, fwk, framework.NewCycleState(), test.pod)
if err != test.wErr {
gotFitErr, gotOK := err.(*framework.FitError)
wantFitErr, wantOK := test.wErr.(*framework.FitError)
@ -2324,19 +2317,14 @@ func TestZeroRequest(t *testing.T) {
t.Fatalf("error creating framework: %+v", err)
}
scheduler := newScheduler(
nil,
nil,
nil,
nil,
nil,
nil,
nil,
snapshot,
schedulerapi.DefaultPercentageOfNodesToScore)
sched := &Scheduler{
nodeInfoSnapshot: snapshot,
percentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
}
sched.applyDefaultHandlers()
state := framework.NewCycleState()
_, _, err = scheduler.findNodesThatFitPod(ctx, fwk, state, test.pod)
_, _, err = sched.findNodesThatFitPod(ctx, fwk, state, test.pod)
if err != nil {
t.Fatalf("error filtering nodes: %+v", err)
}
@ -2511,18 +2499,15 @@ func TestPreferNominatedNodeFilterCallCounts(t *testing.T) {
t.Fatal(err)
}
snapshot := internalcache.NewSnapshot(nil, nodes)
scheduler := newScheduler(
cache,
nil,
nil,
nil,
nil,
nil,
nil,
snapshot,
schedulerapi.DefaultPercentageOfNodesToScore)
_, _, err = scheduler.findNodesThatFitPod(ctx, fwk, framework.NewCycleState(), test.pod)
sched := &Scheduler{
Cache: cache,
nodeInfoSnapshot: snapshot,
percentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
}
sched.applyDefaultHandlers()
_, _, err = sched.findNodesThatFitPod(ctx, fwk, framework.NewCycleState(), test.pod)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -2572,18 +2557,14 @@ func makeScheduler(nodes []*v1.Node) *Scheduler {
cache.AddNode(n)
}
s := newScheduler(
cache,
nil,
nil,
nil,
nil,
nil,
nil,
emptySnapshot,
schedulerapi.DefaultPercentageOfNodesToScore)
cache.UpdateSnapshot(s.nodeInfoSnapshot)
return s
sched := &Scheduler{
Cache: cache,
nodeInfoSnapshot: emptySnapshot,
percentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
}
sched.applyDefaultHandlers()
cache.UpdateSnapshot(sched.nodeInfoSnapshot)
return sched
}
func makeNode(node string, milliCPU, memory int64) *v1.Node {
@ -2671,20 +2652,19 @@ func setupTestScheduler(ctx context.Context, queuedPodStore *clientcache.FIFO, c
)
errChan := make(chan error, 1)
sched := newScheduler(
cache,
nil,
func() *framework.QueuedPodInfo {
sched := &Scheduler{
Cache: cache,
client: client,
nodeInfoSnapshot: internalcache.NewEmptySnapshot(),
percentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
NextPod: func() *framework.QueuedPodInfo {
return &framework.QueuedPodInfo{PodInfo: framework.NewPodInfo(clientcache.Pop(queuedPodStore).(*v1.Pod))}
},
nil,
schedulingQueue,
profile.Map{
testSchedulerName: fwk,
},
client,
internalcache.NewEmptySnapshot(),
schedulerapi.DefaultPercentageOfNodesToScore)
SchedulingQueue: schedulingQueue,
Profiles: profile.Map{testSchedulerName: fwk},
}
sched.SchedulePod = sched.schedulePod
sched.FailureHandler = func(_ context.Context, _ framework.Framework, p *framework.QueuedPodInfo, err error, _ string, _ *framework.NominatingInfo) {
errChan <- err

View File

@ -98,6 +98,11 @@ type Scheduler struct {
nextStartNodeIndex int
}
func (s *Scheduler) applyDefaultHandlers() {
s.SchedulePod = s.schedulePod
s.FailureHandler = s.handleSchedulingFailure
}
type schedulerOptions struct {
componentConfigVersion string
kubeConfig *restclient.Config
@ -311,17 +316,18 @@ func New(client clientset.Interface,
debugger := cachedebugger.New(nodeLister, podLister, schedulerCache, podQueue)
debugger.ListenForSignal(stopEverything)
sched := newScheduler(
schedulerCache,
extenders,
internalqueue.MakeNextPodFunc(podQueue),
stopEverything,
podQueue,
profiles,
client,
snapshot,
options.percentageOfNodesToScore,
)
sched := &Scheduler{
Cache: schedulerCache,
client: client,
nodeInfoSnapshot: snapshot,
percentageOfNodesToScore: options.percentageOfNodesToScore,
Extenders: extenders,
NextPod: internalqueue.MakeNextPodFunc(podQueue),
StopEverything: stopEverything,
SchedulingQueue: podQueue,
Profiles: profiles,
}
sched.applyDefaultHandlers()
addAllEventHandlers(sched, informerFactory, dynInformerFactory, unionedGVKs(clusterEventMap))
@ -412,33 +418,6 @@ func buildExtenders(extenders []schedulerapi.Extender, profiles []schedulerapi.K
type FailureHandlerFn func(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, err error, reason string, nominatingInfo *framework.NominatingInfo)
// newScheduler creates a Scheduler object.
func newScheduler(
cache internalcache.Cache,
extenders []framework.Extender,
nextPod func() *framework.QueuedPodInfo,
stopEverything <-chan struct{},
schedulingQueue internalqueue.SchedulingQueue,
profiles profile.Map,
client clientset.Interface,
nodeInfoSnapshot *internalcache.Snapshot,
percentageOfNodesToScore int32) *Scheduler {
sched := Scheduler{
Cache: cache,
Extenders: extenders,
NextPod: nextPod,
StopEverything: stopEverything,
SchedulingQueue: schedulingQueue,
Profiles: profiles,
client: client,
nodeInfoSnapshot: nodeInfoSnapshot,
percentageOfNodesToScore: percentageOfNodesToScore,
}
sched.SchedulePod = sched.schedulePod
sched.FailureHandler = sched.handleSchedulingFailure
return &sched
}
func unionedGVKs(m map[framework.ClusterEvent]sets.String) map[framework.GVK]framework.ActionType {
gvkMap := make(map[framework.GVK]framework.ActionType)
for evt := range m {

View File

@ -460,17 +460,14 @@ func initScheduler(stop <-chan struct{}, cache internalcache.Cache, queue intern
return nil, nil, err
}
s := newScheduler(
cache,
nil,
nil,
stop,
queue,
profile.Map{testSchedulerName: fwk},
client,
nil,
0,
)
s := &Scheduler{
Cache: cache,
client: client,
StopEverything: stop,
SchedulingQueue: queue,
Profiles: profile.Map{testSchedulerName: fwk},
}
s.applyDefaultHandlers()
return s, fwk, nil
}