From e6dd36759f07e5967f9f74cbda9c77a59a7977b1 Mon Sep 17 00:00:00 2001 From: Oleg Guba Date: Thu, 29 Feb 2024 20:43:50 -0800 Subject: [PATCH] [kubernetes/scheduler] use lockless diagnosis collection in findNodesThatPassFilters --- pkg/scheduler/schedule_one.go | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/pkg/scheduler/schedule_one.go b/pkg/scheduler/schedule_one.go index ae4be17bb63..42fd13eb38d 100644 --- a/pkg/scheduler/schedule_one.go +++ b/pkg/scheduler/schedule_one.go @@ -595,10 +595,15 @@ func (sched *Scheduler) findNodesThatPassFilters( } errCh := parallelize.NewErrorChannel() - var statusesLock sync.Mutex var feasibleNodesLen int32 ctx, cancel := context.WithCancel(ctx) defer cancel() + + type nodeStatus struct { + node string + status *framework.Status + } + result := make([]*nodeStatus, len(feasibleNodes)) checkNode := func(i int) { // We check the nodes starting from where we left off in the previous scheduling cycle, // this is to make sure all nodes have the same chance of being examined across pods. @@ -617,12 +622,16 @@ func (sched *Scheduler) findNodesThatPassFilters( feasibleNodes[length-1] = nodeInfo } } else { - statusesLock.Lock() - diagnosis.NodeToStatusMap[nodeInfo.Node().Name] = status - diagnosis.AddPluginStatus(status) - statusesLock.Unlock() + result[i] = &nodeStatus{node: nodeInfo.Node().Name, status: status} } } + for _, item := range result { + if item == nil { + continue + } + diagnosis.NodeToStatusMap[item.node] = item.status + diagnosis.AddPluginStatus(item.status) + } beginCheckNode := time.Now() statusCode := framework.Success