mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Fix potential context leaking in scheduler
This commit is contained in:
parent
7b1b801295
commit
a07e27082a
@ -572,7 +572,7 @@ func (ev *Evaluator) DryRunPreemption(ctx context.Context, pod *v1.Pod, potentia
|
||||
fh := ev.Handler
|
||||
nonViolatingCandidates := newCandidateList(numCandidates)
|
||||
violatingCandidates := newCandidateList(numCandidates)
|
||||
parallelCtx, cancel := context.WithCancel(ctx)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
nodeStatuses := make(framework.NodeToStatusMap)
|
||||
var statusesLock sync.Mutex
|
||||
@ -611,6 +611,6 @@ func (ev *Evaluator) DryRunPreemption(ctx context.Context, pod *v1.Pod, potentia
|
||||
nodeStatuses[nodeInfoCopy.Node().Name] = status
|
||||
statusesLock.Unlock()
|
||||
}
|
||||
fh.Parallelizer().Until(parallelCtx, len(potentialNodes), checkNode)
|
||||
fh.Parallelizer().Until(ctx, len(potentialNodes), checkNode)
|
||||
return append(nonViolatingCandidates.get(), violatingCandidates.get()...), nodeStatuses, utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
@ -907,6 +907,7 @@ func (f *frameworkImpl) RunScorePlugins(ctx context.Context, state *framework.Cy
|
||||
pluginToNodeScores[pl.Name()] = make(framework.NodeScoreList, len(nodes))
|
||||
}
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
errCh := parallelize.NewErrorChannel()
|
||||
|
||||
// Run Score method for each node in parallel.
|
||||
|
@ -476,6 +476,7 @@ func (sched *Scheduler) findNodesThatPassFilters(
|
||||
var statusesLock sync.Mutex
|
||||
var feasibleNodesLen int32
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
checkNode := func(i int) {
|
||||
// We check the nodes starting from where we left off in the previous scheduling cycle,
|
||||
// this is to make sure all nodes have the same chance of being examined across pods.
|
||||
|
Loading…
Reference in New Issue
Block a user