mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Merge pull request #84335 from mrkm4ntr/fix-node-evaluation
Add unit test to catch scheduler's node order evaluation regressions
This commit is contained in:
commit
979688b5cd
@ -164,7 +164,7 @@ type genericScheduler struct {
|
|||||||
disablePreemption bool
|
disablePreemption bool
|
||||||
percentageOfNodesToScore int32
|
percentageOfNodesToScore int32
|
||||||
enableNonPreempting bool
|
enableNonPreempting bool
|
||||||
lastProcessedNodeIndex int
|
nextStartNodeIndex int
|
||||||
}
|
}
|
||||||
|
|
||||||
// snapshot snapshots scheduler cache and node infos for all fit and priority
|
// snapshot snapshots scheduler cache and node infos for all fit and priority
|
||||||
@ -499,7 +499,7 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
|
|||||||
checkNode := func(i int) {
|
checkNode := func(i int) {
|
||||||
// We check the nodes starting from where we left off in the previous scheduling cycle,
|
// We check the nodes starting from where we left off in the previous scheduling cycle,
|
||||||
// this is to make sure all nodes have the same chance of being examined across pods.
|
// this is to make sure all nodes have the same chance of being examined across pods.
|
||||||
nodeInfo := g.nodeInfoSnapshot.NodeInfoList[(g.lastProcessedNodeIndex+i)%allNodes]
|
nodeInfo := g.nodeInfoSnapshot.NodeInfoList[(g.nextStartNodeIndex+i)%allNodes]
|
||||||
fits, failedPredicates, status, err := g.podFitsOnNode(
|
fits, failedPredicates, status, err := g.podFitsOnNode(
|
||||||
ctx,
|
ctx,
|
||||||
state,
|
state,
|
||||||
@ -536,7 +536,7 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
|
|||||||
// are found.
|
// are found.
|
||||||
workqueue.ParallelizeUntil(ctx, 16, allNodes, checkNode)
|
workqueue.ParallelizeUntil(ctx, 16, allNodes, checkNode)
|
||||||
processedNodes := int(filteredLen) + len(filteredNodesStatuses) + len(failedPredicateMap)
|
processedNodes := int(filteredLen) + len(filteredNodesStatuses) + len(failedPredicateMap)
|
||||||
g.lastProcessedNodeIndex = (g.lastProcessedNodeIndex + processedNodes) % allNodes
|
g.nextStartNodeIndex = (g.nextStartNodeIndex + processedNodes) % allNodes
|
||||||
|
|
||||||
filtered = filtered[:filteredLen]
|
filtered = filtered[:filteredLen]
|
||||||
if err := errCh.ReceiveError(); err != nil {
|
if err := errCh.ReceiveError(); err != nil {
|
||||||
|
@ -2286,3 +2286,32 @@ func assignDefaultStartTime(pods []*v1.Pod) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFairEvaluationForNodes(t *testing.T) {
|
||||||
|
defer algorithmpredicates.SetPredicatesOrderingDuringTest(order)()
|
||||||
|
predicates := map[string]algorithmpredicates.FitPredicate{"true": truePredicate}
|
||||||
|
numAllNodes := 500
|
||||||
|
nodeNames := make([]string, 0, numAllNodes)
|
||||||
|
for i := 0; i < numAllNodes; i++ {
|
||||||
|
nodeNames = append(nodeNames, strconv.Itoa(i))
|
||||||
|
}
|
||||||
|
nodes := makeNodeList(nodeNames)
|
||||||
|
g := makeScheduler(predicates, nodes)
|
||||||
|
// To make numAllNodes % nodesToFind != 0
|
||||||
|
g.percentageOfNodesToScore = 30
|
||||||
|
nodesToFind := int(g.numFeasibleNodesToFind(int32(numAllNodes)))
|
||||||
|
|
||||||
|
// Iterating over all nodes more than twice
|
||||||
|
for i := 0; i < 2*(numAllNodes/nodesToFind+1); i++ {
|
||||||
|
nodesThatFit, _, _, err := g.findNodesThatFit(context.Background(), framework.NewCycleState(), &v1.Pod{})
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
if len(nodesThatFit) != nodesToFind {
|
||||||
|
t.Errorf("got %d nodes filtered, want %d", len(nodesThatFit), nodesToFind)
|
||||||
|
}
|
||||||
|
if g.nextStartNodeIndex != (i+1)*nodesToFind%numAllNodes {
|
||||||
|
t.Errorf("got %d lastProcessedNodeIndex, want %d", g.nextStartNodeIndex, (i+1)*nodesToFind%numAllNodes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user