Fix potential context leaking in scheduler

This commit is contained in:
Wei Huang 2022-08-11 22:22:40 -07:00
parent 7b1b801295
commit a07e27082a
No known key found for this signature in database
GPG Key ID: 17AFE05D01EA77B2
3 changed files with 4 additions and 2 deletions

View File

@ -572,7 +572,7 @@ func (ev *Evaluator) DryRunPreemption(ctx context.Context, pod *v1.Pod, potentia
fh := ev.Handler fh := ev.Handler
nonViolatingCandidates := newCandidateList(numCandidates) nonViolatingCandidates := newCandidateList(numCandidates)
violatingCandidates := newCandidateList(numCandidates) violatingCandidates := newCandidateList(numCandidates)
parallelCtx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
nodeStatuses := make(framework.NodeToStatusMap) nodeStatuses := make(framework.NodeToStatusMap)
var statusesLock sync.Mutex var statusesLock sync.Mutex
@ -611,6 +611,6 @@ func (ev *Evaluator) DryRunPreemption(ctx context.Context, pod *v1.Pod, potentia
nodeStatuses[nodeInfoCopy.Node().Name] = status nodeStatuses[nodeInfoCopy.Node().Name] = status
statusesLock.Unlock() statusesLock.Unlock()
} }
fh.Parallelizer().Until(parallelCtx, len(potentialNodes), checkNode) fh.Parallelizer().Until(ctx, len(potentialNodes), checkNode)
return append(nonViolatingCandidates.get(), violatingCandidates.get()...), nodeStatuses, utilerrors.NewAggregate(errs) return append(nonViolatingCandidates.get(), violatingCandidates.get()...), nodeStatuses, utilerrors.NewAggregate(errs)
} }

View File

@ -907,6 +907,7 @@ func (f *frameworkImpl) RunScorePlugins(ctx context.Context, state *framework.Cy
pluginToNodeScores[pl.Name()] = make(framework.NodeScoreList, len(nodes)) pluginToNodeScores[pl.Name()] = make(framework.NodeScoreList, len(nodes))
} }
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
defer cancel()
errCh := parallelize.NewErrorChannel() errCh := parallelize.NewErrorChannel()
// Run Score method for each node in parallel. // Run Score method for each node in parallel.

View File

@ -476,6 +476,7 @@ func (sched *Scheduler) findNodesThatPassFilters(
var statusesLock sync.Mutex var statusesLock sync.Mutex
var feasibleNodesLen int32 var feasibleNodesLen int32
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
defer cancel()
checkNode := func(i int) { checkNode := func(i int) {
// We check the nodes starting from where we left off in the previous scheduling cycle, // We check the nodes starting from where we left off in the previous scheduling cycle,
// this is to make sure all nodes have the same chance of being examined across pods. // this is to make sure all nodes have the same chance of being examined across pods.