mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
nodeAffinity filtered nodes should be excluded when calculating skew in PodTopologySpread
Signed-off-by: kerthcet <kerthcet@gmail.com>
This commit is contained in:
parent
108c284a33
commit
935cbc8e62
@ -222,6 +222,10 @@ func (pl *PodTopologySpread) calPreFilterState(pod *v1.Pod) (*preFilterState, er
|
||||
TpKeyToCriticalPaths: make(map[string]*criticalPaths, len(constraints)),
|
||||
TpPairToMatchNum: make(map[topologyPair]*int32, sizeHeuristic(len(allNodes), constraints)),
|
||||
}
|
||||
|
||||
// Nodes that pass nodeAffinity check and carry all required topology keys will be
|
||||
// stored in `filteredNodes`, and be looped later to calculate preFilterState.
|
||||
var filteredNodes []*framework.NodeInfo
|
||||
requiredSchedulingTerm := nodeaffinity.GetRequiredNodeAffinity(pod)
|
||||
for _, n := range allNodes {
|
||||
node := n.Node()
|
||||
@ -244,10 +248,12 @@ func (pl *PodTopologySpread) calPreFilterState(pod *v1.Pod) (*preFilterState, er
|
||||
pair := topologyPair{key: c.TopologyKey, value: node.Labels[c.TopologyKey]}
|
||||
s.TpPairToMatchNum[pair] = new(int32)
|
||||
}
|
||||
|
||||
filteredNodes = append(filteredNodes, n)
|
||||
}
|
||||
|
||||
processNode := func(i int) {
|
||||
nodeInfo := allNodes[i]
|
||||
nodeInfo := filteredNodes[i]
|
||||
node := nodeInfo.Node()
|
||||
|
||||
for _, constraint := range constraints {
|
||||
@ -260,7 +266,7 @@ func (pl *PodTopologySpread) calPreFilterState(pod *v1.Pod) (*preFilterState, er
|
||||
atomic.AddInt32(tpCount, int32(count))
|
||||
}
|
||||
}
|
||||
pl.parallelizer.Until(context.Background(), len(allNodes), processNode)
|
||||
pl.parallelizer.Until(context.Background(), len(filteredNodes), processNode)
|
||||
|
||||
// calculate min match for each topology pair
|
||||
for i := 0; i < len(constraints); i++ {
|
||||
|
@ -1400,6 +1400,31 @@ func TestSingleConstraint(t *testing.T) {
|
||||
"node-b": framework.Unschedulable,
|
||||
},
|
||||
},
|
||||
{
|
||||
// In this unit test, NodeAffinity plugin is not involved, so node-b still fits
|
||||
name: "incoming pod has nodeAffinity, pods spread as 0/~2~/0/1, hence node-a fits",
|
||||
pod: st.MakePod().Name("p").Label("foo", "").
|
||||
NodeAffinityNotIn("node", []string{"node-b"}).
|
||||
SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(),
|
||||
st.MakeNode().Name("node-b").Label("zone", "zone1").Label("node", "node-b").Obj(),
|
||||
st.MakeNode().Name("node-x").Label("zone", "zone2").Label("node", "node-x").Obj(),
|
||||
st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(),
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
st.MakePod().Name("p-b1").Node("node-b").Label("foo", "").Obj(),
|
||||
st.MakePod().Name("p-b2").Node("node-b").Label("foo", "").Obj(),
|
||||
st.MakePod().Name("p-y1").Node("node-y").Label("foo", "").Obj(),
|
||||
},
|
||||
wantStatusCode: map[string]framework.Code{
|
||||
"node-a": framework.Success,
|
||||
"node-b": framework.Success, // in real case, it's Unschedulable
|
||||
"node-x": framework.Unschedulable,
|
||||
"node-y": framework.Unschedulable,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
Loading…
Reference in New Issue
Block a user