mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 15:05:27 +00:00
Don't fill in NodeToStatusMap with UnschedulableAndUnresolvable
This commit is contained in:
parent
7ea3bf4db4
commit
c8f0ea1a54
@ -49,7 +49,8 @@ type NodeScore struct {
|
|||||||
Score int64
|
Score int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeToStatusMap declares map from node name to its status.
|
// NodeToStatusMap contains the statuses of the Nodes where the incoming Pod was not schedulable.
|
||||||
|
// A PostFilter plugin that uses this map should interpret absent Nodes as UnschedulableAndUnresolvable.
|
||||||
type NodeToStatusMap map[string]*Status
|
type NodeToStatusMap map[string]*Status
|
||||||
|
|
||||||
// NodePluginScores is a struct with node name and scores for that node.
|
// NodePluginScores is a struct with node name and scores for that node.
|
||||||
@ -448,6 +449,7 @@ type PostFilterPlugin interface {
|
|||||||
// If this scheduling cycle failed at PreFilter, all Nodes have the status from the rejector PreFilter plugin in NodeToStatusMap.
|
// If this scheduling cycle failed at PreFilter, all Nodes have the status from the rejector PreFilter plugin in NodeToStatusMap.
|
||||||
// Note that the scheduling framework runs PostFilter plugins even when PreFilter returned UnschedulableAndUnresolvable.
|
// Note that the scheduling framework runs PostFilter plugins even when PreFilter returned UnschedulableAndUnresolvable.
|
||||||
// In that case, NodeToStatusMap contains all Nodes with UnschedulableAndUnresolvable.
|
// In that case, NodeToStatusMap contains all Nodes with UnschedulableAndUnresolvable.
|
||||||
|
// If there is no entry in the NodeToStatus map, its implicit status is UnschedulableAndUnresolvable.
|
||||||
//
|
//
|
||||||
// Also, ignoring Nodes with UnschedulableAndUnresolvable is the responsibility of each PostFilter plugin,
|
// Also, ignoring Nodes with UnschedulableAndUnresolvable is the responsibility of each PostFilter plugin,
|
||||||
// meaning NodeToStatusMap obviously could have Nodes with UnschedulableAndUnresolvable
|
// meaning NodeToStatusMap obviously could have Nodes with UnschedulableAndUnresolvable
|
||||||
|
@ -289,7 +289,8 @@ func TestPostFilter(t *testing.T) {
|
|||||||
st.MakeNode().Name("node4").Capacity(nodeRes).Obj(),
|
st.MakeNode().Name("node4").Capacity(nodeRes).Obj(),
|
||||||
},
|
},
|
||||||
filteredNodesStatuses: framework.NodeToStatusMap{
|
filteredNodesStatuses: framework.NodeToStatusMap{
|
||||||
"node3": framework.NewStatus(framework.UnschedulableAndUnresolvable),
|
"node1": framework.NewStatus(framework.Unschedulable),
|
||||||
|
"node2": framework.NewStatus(framework.Unschedulable),
|
||||||
"node4": framework.NewStatus(framework.UnschedulableAndUnresolvable),
|
"node4": framework.NewStatus(framework.UnschedulableAndUnresolvable),
|
||||||
},
|
},
|
||||||
wantResult: framework.NewPostFilterResultWithNominatedNode(""),
|
wantResult: framework.NewPostFilterResultWithNominatedNode(""),
|
||||||
@ -1776,7 +1777,15 @@ func TestPreempt(t *testing.T) {
|
|||||||
State: state,
|
State: state,
|
||||||
Interface: &pl,
|
Interface: &pl,
|
||||||
}
|
}
|
||||||
res, status := pe.Preempt(ctx, test.pod, make(framework.NodeToStatusMap))
|
|
||||||
|
// so that these nodes are eligible for preemption, we set their status
|
||||||
|
// to Unschedulable.
|
||||||
|
nodeToStatusMap := make(framework.NodeToStatusMap, len(nodes))
|
||||||
|
for _, n := range nodes {
|
||||||
|
nodeToStatusMap[n.Name] = framework.NewStatus(framework.Unschedulable)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, status := pe.Preempt(ctx, test.pod, nodeToStatusMap)
|
||||||
if !status.IsSuccess() && !status.IsRejected() {
|
if !status.IsSuccess() && !status.IsRejected() {
|
||||||
t.Errorf("unexpected error in preemption: %v", status.AsError())
|
t.Errorf("unexpected error in preemption: %v", status.AsError())
|
||||||
}
|
}
|
||||||
|
@ -415,15 +415,18 @@ func (ev *Evaluator) prepareCandidate(ctx context.Context, c Candidate, pod *v1.
|
|||||||
func nodesWherePreemptionMightHelp(nodes []*framework.NodeInfo, m framework.NodeToStatusMap) ([]*framework.NodeInfo, framework.NodeToStatusMap) {
|
func nodesWherePreemptionMightHelp(nodes []*framework.NodeInfo, m framework.NodeToStatusMap) ([]*framework.NodeInfo, framework.NodeToStatusMap) {
|
||||||
var potentialNodes []*framework.NodeInfo
|
var potentialNodes []*framework.NodeInfo
|
||||||
nodeStatuses := make(framework.NodeToStatusMap)
|
nodeStatuses := make(framework.NodeToStatusMap)
|
||||||
|
unresolvableStatus := framework.NewStatus(framework.UnschedulableAndUnresolvable, "Preemption is not helpful for scheduling")
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
name := node.Node().Name
|
nodeName := node.Node().Name
|
||||||
// We rely on the status by each plugin - 'Unschedulable' or 'UnschedulableAndUnresolvable'
|
// We only attempt preemption on nodes with status 'Unschedulable'. For
|
||||||
// to determine whether preemption may help or not on the node.
|
// diagnostic purposes, we propagate UnschedulableAndUnresolvable if either
|
||||||
if m[name].Code() == framework.UnschedulableAndUnresolvable {
|
// implied by absence in map or explicitly set.
|
||||||
nodeStatuses[node.Node().Name] = framework.NewStatus(framework.UnschedulableAndUnresolvable, "Preemption is not helpful for scheduling")
|
status, ok := m[nodeName]
|
||||||
continue
|
if status.Code() == framework.Unschedulable {
|
||||||
|
potentialNodes = append(potentialNodes, node)
|
||||||
|
} else if !ok || status.Code() == framework.UnschedulableAndUnresolvable {
|
||||||
|
nodeStatuses[nodeName] = unresolvableStatus
|
||||||
}
|
}
|
||||||
potentialNodes = append(potentialNodes, node)
|
|
||||||
}
|
}
|
||||||
return potentialNodes, nodeStatuses
|
return potentialNodes, nodeStatuses
|
||||||
}
|
}
|
||||||
|
@ -147,6 +147,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
|
|||||||
"node1": framework.NewStatus(framework.Unschedulable, interpodaffinity.ErrReasonAntiAffinityRulesNotMatch),
|
"node1": framework.NewStatus(framework.Unschedulable, interpodaffinity.ErrReasonAntiAffinityRulesNotMatch),
|
||||||
"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodename.ErrReason),
|
"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodename.ErrReason),
|
||||||
"node3": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodeunschedulable.ErrReasonUnschedulable),
|
"node3": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodeunschedulable.ErrReasonUnschedulable),
|
||||||
|
"node4": framework.NewStatus(framework.Unschedulable, "Unschedulable"),
|
||||||
},
|
},
|
||||||
expected: sets.New("node1", "node4"),
|
expected: sets.New("node1", "node4"),
|
||||||
},
|
},
|
||||||
@ -155,6 +156,8 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
|
|||||||
nodesStatuses: framework.NodeToStatusMap{
|
nodesStatuses: framework.NodeToStatusMap{
|
||||||
"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, interpodaffinity.ErrReasonAffinityRulesNotMatch),
|
"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, interpodaffinity.ErrReasonAffinityRulesNotMatch),
|
||||||
"node2": framework.NewStatus(framework.Unschedulable, interpodaffinity.ErrReasonAntiAffinityRulesNotMatch),
|
"node2": framework.NewStatus(framework.Unschedulable, interpodaffinity.ErrReasonAntiAffinityRulesNotMatch),
|
||||||
|
"node3": framework.NewStatus(framework.Unschedulable, "Unschedulable"),
|
||||||
|
"node4": framework.NewStatus(framework.Unschedulable, "Unschedulable"),
|
||||||
},
|
},
|
||||||
expected: sets.New("node2", "node3", "node4"),
|
expected: sets.New("node2", "node3", "node4"),
|
||||||
},
|
},
|
||||||
@ -163,6 +166,8 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
|
|||||||
nodesStatuses: framework.NodeToStatusMap{
|
nodesStatuses: framework.NodeToStatusMap{
|
||||||
"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumerestrictions.ErrReasonDiskConflict),
|
"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumerestrictions.ErrReasonDiskConflict),
|
||||||
"node2": framework.NewStatus(framework.Unschedulable, fmt.Sprintf("Insufficient %v", v1.ResourceMemory)),
|
"node2": framework.NewStatus(framework.Unschedulable, fmt.Sprintf("Insufficient %v", v1.ResourceMemory)),
|
||||||
|
"node3": framework.NewStatus(framework.Unschedulable, "Unschedulable"),
|
||||||
|
"node4": framework.NewStatus(framework.Unschedulable, "Unschedulable"),
|
||||||
},
|
},
|
||||||
expected: sets.New("node2", "node3", "node4"),
|
expected: sets.New("node2", "node3", "node4"),
|
||||||
},
|
},
|
||||||
@ -170,6 +175,9 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
|
|||||||
name: "Node condition errors should be considered unresolvable",
|
name: "Node condition errors should be considered unresolvable",
|
||||||
nodesStatuses: framework.NodeToStatusMap{
|
nodesStatuses: framework.NodeToStatusMap{
|
||||||
"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodeunschedulable.ErrReasonUnknownCondition),
|
"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodeunschedulable.ErrReasonUnknownCondition),
|
||||||
|
"node2": framework.NewStatus(framework.Unschedulable, "Unschedulable"),
|
||||||
|
"node3": framework.NewStatus(framework.Unschedulable, "Unschedulable"),
|
||||||
|
"node4": framework.NewStatus(framework.Unschedulable, "Unschedulable"),
|
||||||
},
|
},
|
||||||
expected: sets.New("node2", "node3", "node4"),
|
expected: sets.New("node2", "node3", "node4"),
|
||||||
},
|
},
|
||||||
@ -179,6 +187,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
|
|||||||
"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumezone.ErrReasonConflict),
|
"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumezone.ErrReasonConflict),
|
||||||
"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumebinding.ErrReasonNodeConflict)),
|
"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumebinding.ErrReasonNodeConflict)),
|
||||||
"node3": framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumebinding.ErrReasonBindConflict)),
|
"node3": framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumebinding.ErrReasonBindConflict)),
|
||||||
|
"node4": framework.NewStatus(framework.Unschedulable, "Unschedulable"),
|
||||||
},
|
},
|
||||||
expected: sets.New("node4"),
|
expected: sets.New("node4"),
|
||||||
},
|
},
|
||||||
@ -188,12 +197,14 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
|
|||||||
"node1": framework.NewStatus(framework.Unschedulable, podtopologyspread.ErrReasonConstraintsNotMatch),
|
"node1": framework.NewStatus(framework.Unschedulable, podtopologyspread.ErrReasonConstraintsNotMatch),
|
||||||
"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodename.ErrReason),
|
"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodename.ErrReason),
|
||||||
"node3": framework.NewStatus(framework.Unschedulable, podtopologyspread.ErrReasonConstraintsNotMatch),
|
"node3": framework.NewStatus(framework.Unschedulable, podtopologyspread.ErrReasonConstraintsNotMatch),
|
||||||
|
"node4": framework.NewStatus(framework.Unschedulable, "Unschedulable"),
|
||||||
},
|
},
|
||||||
expected: sets.New("node1", "node3", "node4"),
|
expected: sets.New("node1", "node3", "node4"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "UnschedulableAndUnresolvable status should be skipped but Unschedulable should be tried",
|
name: "UnschedulableAndUnresolvable status should be skipped but Unschedulable should be tried",
|
||||||
nodesStatuses: framework.NodeToStatusMap{
|
nodesStatuses: framework.NodeToStatusMap{
|
||||||
|
"node1": framework.NewStatus(framework.Unschedulable, ""),
|
||||||
"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, ""),
|
"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, ""),
|
||||||
"node3": framework.NewStatus(framework.Unschedulable, ""),
|
"node3": framework.NewStatus(framework.Unschedulable, ""),
|
||||||
"node4": framework.NewStatus(framework.UnschedulableAndUnresolvable, ""),
|
"node4": framework.NewStatus(framework.UnschedulableAndUnresolvable, ""),
|
||||||
@ -203,6 +214,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "ErrReasonNodeLabelNotMatch should not be tried as it indicates that the pod is unschedulable due to node doesn't have the required label",
|
name: "ErrReasonNodeLabelNotMatch should not be tried as it indicates that the pod is unschedulable due to node doesn't have the required label",
|
||||||
nodesStatuses: framework.NodeToStatusMap{
|
nodesStatuses: framework.NodeToStatusMap{
|
||||||
|
"node1": framework.NewStatus(framework.Unschedulable, ""),
|
||||||
"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, podtopologyspread.ErrReasonNodeLabelNotMatch),
|
"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, podtopologyspread.ErrReasonNodeLabelNotMatch),
|
||||||
"node3": framework.NewStatus(framework.Unschedulable, ""),
|
"node3": framework.NewStatus(framework.Unschedulable, ""),
|
||||||
"node4": framework.NewStatus(framework.UnschedulableAndUnresolvable, ""),
|
"node4": framework.NewStatus(framework.UnschedulableAndUnresolvable, ""),
|
||||||
|
@ -325,9 +325,11 @@ const ExtenderName = "Extender"
|
|||||||
|
|
||||||
// Diagnosis records the details to diagnose a scheduling failure.
|
// Diagnosis records the details to diagnose a scheduling failure.
|
||||||
type Diagnosis struct {
|
type Diagnosis struct {
|
||||||
// NodeToStatusMap records the status of each node
|
// NodeToStatusMap records the status of each retriable node (status Unschedulable)
|
||||||
// if they're rejected in PreFilter (via PreFilterResult) or Filter plugins.
|
// if they're rejected in PreFilter (via PreFilterResult) or Filter plugins.
|
||||||
// Nodes that pass PreFilter/Filter plugins are not included in this map.
|
// Nodes that pass PreFilter/Filter plugins are not included in this map.
|
||||||
|
// While this map may contain UnschedulableAndUnresolvable statuses, the absence of
|
||||||
|
// a node should be interpreted as UnschedulableAndUnresolvable.
|
||||||
NodeToStatusMap NodeToStatusMap
|
NodeToStatusMap NodeToStatusMap
|
||||||
// UnschedulablePlugins are plugins that returns Unschedulable or UnschedulableAndUnresolvable.
|
// UnschedulablePlugins are plugins that returns Unschedulable or UnschedulableAndUnresolvable.
|
||||||
UnschedulablePlugins sets.Set[string]
|
UnschedulablePlugins sets.Set[string]
|
||||||
|
@ -485,14 +485,12 @@ func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, fwk framework.F
|
|||||||
nodes := allNodes
|
nodes := allNodes
|
||||||
if !preRes.AllNodes() {
|
if !preRes.AllNodes() {
|
||||||
nodes = make([]*framework.NodeInfo, 0, len(preRes.NodeNames))
|
nodes = make([]*framework.NodeInfo, 0, len(preRes.NodeNames))
|
||||||
for _, n := range allNodes {
|
for nodeName := range preRes.NodeNames {
|
||||||
if !preRes.NodeNames.Has(n.Node().Name) {
|
// PreRes may return nodeName(s) which do not exist; we verify
|
||||||
// We consider Nodes that are filtered out by PreFilterResult as rejected via UnschedulableAndUnresolvable.
|
// node exists in the Snapshot.
|
||||||
// We have to record them in NodeToStatusMap so that they won't be considered as candidates in the preemption.
|
if nodeInfo, err := sched.nodeInfoSnapshot.Get(nodeName); err == nil {
|
||||||
diagnosis.NodeToStatusMap[n.Node().Name] = framework.NewStatus(framework.UnschedulableAndUnresolvable, "node is filtered out by the prefilter result")
|
nodes = append(nodes, nodeInfo)
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
nodes = append(nodes, n)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
feasibleNodes, err := sched.findNodesThatPassFilters(ctx, fwk, state, pod, &diagnosis, nodes)
|
feasibleNodes, err := sched.findNodesThatPassFilters(ctx, fwk, state, pod, &diagnosis, nodes)
|
||||||
|
@ -2344,7 +2344,6 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
|||||||
NumAllNodes: 2,
|
NumAllNodes: 2,
|
||||||
Diagnosis: framework.Diagnosis{
|
Diagnosis: framework.Diagnosis{
|
||||||
NodeToStatusMap: framework.NodeToStatusMap{
|
NodeToStatusMap: framework.NodeToStatusMap{
|
||||||
"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, "node is filtered out by the prefilter result"),
|
|
||||||
"node2": framework.NewStatus(framework.Unschedulable, "injecting failure for pod test-prefilter").WithPlugin("FakeFilter"),
|
"node2": framework.NewStatus(framework.Unschedulable, "injecting failure for pod test-prefilter").WithPlugin("FakeFilter"),
|
||||||
},
|
},
|
||||||
UnschedulablePlugins: sets.New("FakeFilter"),
|
UnschedulablePlugins: sets.New("FakeFilter"),
|
||||||
@ -2453,10 +2452,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
|||||||
Pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
|
Pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
|
||||||
NumAllNodes: 2,
|
NumAllNodes: 2,
|
||||||
Diagnosis: framework.Diagnosis{
|
Diagnosis: framework.Diagnosis{
|
||||||
NodeToStatusMap: framework.NodeToStatusMap{
|
NodeToStatusMap: framework.NodeToStatusMap{},
|
||||||
"1": framework.NewStatus(framework.UnschedulableAndUnresolvable, "node is filtered out by the prefilter result"),
|
|
||||||
"2": framework.NewStatus(framework.UnschedulableAndUnresolvable, "node is filtered out by the prefilter result"),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
Loading…
Reference in New Issue
Block a user