mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-15 14:14:39 +00:00
Remove direct use of Snapshot's data structures
Signed-off-by: Aldo Culquicondor <acondor@google.com>
This commit is contained in:
@@ -178,7 +178,7 @@ func (g *genericScheduler) Schedule(ctx context.Context, state *framework.CycleS
|
|||||||
}
|
}
|
||||||
trace.Step("Snapshotting scheduler cache and node infos done")
|
trace.Step("Snapshotting scheduler cache and node infos done")
|
||||||
|
|
||||||
if len(g.nodeInfoSnapshot.NodeInfoList) == 0 {
|
if g.nodeInfoSnapshot.NumNodes() == 0 {
|
||||||
return result, ErrNoNodesAvailable
|
return result, ErrNoNodesAvailable
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -205,7 +205,7 @@ func (g *genericScheduler) Schedule(ctx context.Context, state *framework.CycleS
|
|||||||
if len(filteredNodes) == 0 {
|
if len(filteredNodes) == 0 {
|
||||||
return result, &FitError{
|
return result, &FitError{
|
||||||
Pod: pod,
|
Pod: pod,
|
||||||
NumAllNodes: len(g.nodeInfoSnapshot.NodeInfoList),
|
NumAllNodes: g.nodeInfoSnapshot.NumNodes(),
|
||||||
FilteredNodesStatuses: filteredNodesStatuses,
|
FilteredNodesStatuses: filteredNodesStatuses,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -299,19 +299,20 @@ func (g *genericScheduler) Preempt(ctx context.Context, state *framework.CycleSt
|
|||||||
klog.V(5).Infof("Pod %v/%v is not eligible for more preemption.", pod.Namespace, pod.Name)
|
klog.V(5).Infof("Pod %v/%v is not eligible for more preemption.", pod.Namespace, pod.Name)
|
||||||
return nil, nil, nil, nil
|
return nil, nil, nil, nil
|
||||||
}
|
}
|
||||||
if len(g.nodeInfoSnapshot.NodeInfoList) == 0 {
|
allNodes, err := g.nodeInfoSnapshot.NodeInfos().List()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
if len(allNodes) == 0 {
|
||||||
return nil, nil, nil, ErrNoNodesAvailable
|
return nil, nil, nil, ErrNoNodesAvailable
|
||||||
}
|
}
|
||||||
potentialNodes := nodesWherePreemptionMightHelp(g.nodeInfoSnapshot.NodeInfoList, fitError)
|
potentialNodes := nodesWherePreemptionMightHelp(allNodes, fitError)
|
||||||
if len(potentialNodes) == 0 {
|
if len(potentialNodes) == 0 {
|
||||||
klog.V(3).Infof("Preemption will not help schedule pod %v/%v on any node.", pod.Namespace, pod.Name)
|
klog.V(3).Infof("Preemption will not help schedule pod %v/%v on any node.", pod.Namespace, pod.Name)
|
||||||
// In this case, we should clean-up any existing nominated node name of the pod.
|
// In this case, we should clean-up any existing nominated node name of the pod.
|
||||||
return nil, nil, []*v1.Pod{pod}, nil
|
return nil, nil, []*v1.Pod{pod}, nil
|
||||||
}
|
}
|
||||||
var (
|
var pdbs []*policy.PodDisruptionBudget
|
||||||
pdbs []*policy.PodDisruptionBudget
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
if g.pdbLister != nil {
|
if g.pdbLister != nil {
|
||||||
pdbs, err = g.pdbLister.List(labels.Everything())
|
pdbs, err = g.pdbLister.List(labels.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -435,11 +436,17 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
|
|||||||
var filtered []*v1.Node
|
var filtered []*v1.Node
|
||||||
filteredNodesStatuses := framework.NodeToStatusMap{}
|
filteredNodesStatuses := framework.NodeToStatusMap{}
|
||||||
|
|
||||||
|
allNodes, err := g.nodeInfoSnapshot.NodeInfos().List()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
if !g.framework.HasFilterPlugins() {
|
if !g.framework.HasFilterPlugins() {
|
||||||
filtered = g.nodeInfoSnapshot.ListNodes()
|
filtered = make([]*v1.Node, len(allNodes))
|
||||||
|
for i, ni := range allNodes {
|
||||||
|
filtered[i] = ni.Node()
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
allNodes := len(g.nodeInfoSnapshot.NodeInfoList)
|
numNodesToFind := g.numFeasibleNodesToFind(int32(len(allNodes)))
|
||||||
numNodesToFind := g.numFeasibleNodesToFind(int32(allNodes))
|
|
||||||
|
|
||||||
// Create filtered list with enough space to avoid growing it
|
// Create filtered list with enough space to avoid growing it
|
||||||
// and allow assigning.
|
// and allow assigning.
|
||||||
@@ -454,7 +461,7 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
|
|||||||
checkNode := func(i int) {
|
checkNode := func(i int) {
|
||||||
// We check the nodes starting from where we left off in the previous scheduling cycle,
|
// We check the nodes starting from where we left off in the previous scheduling cycle,
|
||||||
// this is to make sure all nodes have the same chance of being examined across pods.
|
// this is to make sure all nodes have the same chance of being examined across pods.
|
||||||
nodeInfo := g.nodeInfoSnapshot.NodeInfoList[(g.nextStartNodeIndex+i)%allNodes]
|
nodeInfo := allNodes[(g.nextStartNodeIndex+i)%len(allNodes)]
|
||||||
fits, status, err := g.podFitsOnNode(ctx, state, pod, nodeInfo)
|
fits, status, err := g.podFitsOnNode(ctx, state, pod, nodeInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errCh.SendErrorWithCancel(err, cancel)
|
errCh.SendErrorWithCancel(err, cancel)
|
||||||
@@ -479,9 +486,9 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
|
|||||||
|
|
||||||
// Stops searching for more nodes once the configured number of feasible nodes
|
// Stops searching for more nodes once the configured number of feasible nodes
|
||||||
// are found.
|
// are found.
|
||||||
workqueue.ParallelizeUntil(ctx, 16, allNodes, checkNode)
|
workqueue.ParallelizeUntil(ctx, 16, len(allNodes), checkNode)
|
||||||
processedNodes := int(filteredLen) + len(filteredNodesStatuses)
|
processedNodes := int(filteredLen) + len(filteredNodesStatuses)
|
||||||
g.nextStartNodeIndex = (g.nextStartNodeIndex + processedNodes) % allNodes
|
g.nextStartNodeIndex = (g.nextStartNodeIndex + processedNodes) % len(allNodes)
|
||||||
|
|
||||||
filtered = filtered[:filteredLen]
|
filtered = filtered[:filteredLen]
|
||||||
if err := errCh.ReceiveError(); err != nil {
|
if err := errCh.ReceiveError(); err != nil {
|
||||||
@@ -653,7 +660,7 @@ func (g *genericScheduler) prioritizeNodes(
|
|||||||
if len(g.extenders) != 0 && nodes != nil {
|
if len(g.extenders) != 0 && nodes != nil {
|
||||||
var mu sync.Mutex
|
var mu sync.Mutex
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
combinedScores := make(map[string]int64, len(g.nodeInfoSnapshot.NodeInfoList))
|
combinedScores := make(map[string]int64, len(nodes))
|
||||||
for i := range g.extenders {
|
for i := range g.extenders {
|
||||||
if !g.extenders[i].IsInterested(pod) {
|
if !g.extenders[i].IsInterested(pod) {
|
||||||
continue
|
continue
|
||||||
|
@@ -132,15 +132,9 @@ func (s *Snapshot) NodeInfos() schedulerlisters.NodeInfoLister {
|
|||||||
return &nodeInfoLister{snapshot: s}
|
return &nodeInfoLister{snapshot: s}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListNodes returns the list of nodes in the snapshot.
|
// NumNodes returns the number of nodes in the snapshot.
|
||||||
func (s *Snapshot) ListNodes() []*v1.Node {
|
func (s *Snapshot) NumNodes() int {
|
||||||
nodes := make([]*v1.Node, 0, len(s.NodeInfoList))
|
return len(s.NodeInfoList)
|
||||||
for _, n := range s.NodeInfoList {
|
|
||||||
if n.Node() != nil {
|
|
||||||
nodes = append(nodes, n.Node())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nodes
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type podLister struct {
|
type podLister struct {
|
||||||
|
Reference in New Issue
Block a user