Fix PodTopologySpread matching pods counts for constraints with the same topologyKey

This commit is contained in:
Maciej Skoczeń 2024-12-04 13:09:56 +00:00
parent a499facee6
commit c3a54926a4
5 changed files with 671 additions and 653 deletions

View File

@ -27,11 +27,6 @@ import (
"k8s.io/utils/ptr" "k8s.io/utils/ptr"
) )
type topologyPair struct {
key string
value string
}
// topologySpreadConstraint is an internal version for v1.TopologySpreadConstraint // topologySpreadConstraint is an internal version for v1.TopologySpreadConstraint
// and where the selector is parsed. // and where the selector is parsed.
// Fields are exported for comparison during testing. // Fields are exported for comparison during testing.

View File

@ -19,6 +19,7 @@ package podtopologyspread
import ( import (
"context" "context"
"fmt" "fmt"
"maps"
"math" "math"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@ -31,7 +32,7 @@ import (
const preFilterStateKey = "PreFilter" + Name const preFilterStateKey = "PreFilter" + Name
// preFilterState computed at PreFilter and used at Filter. // preFilterState computed at PreFilter and used at Filter.
// It combines TpKeyToCriticalPaths and TpPairToMatchNum to represent: // It combines CriticalPaths and TpValueToMatchNum to represent:
// (1) critical paths where the least pods are matched on each spread constraint. // (1) critical paths where the least pods are matched on each spread constraint.
// (2) number of pods matched on each spread constraint. // (2) number of pods matched on each spread constraint.
// A nil preFilterState denotes it's not set at all (in PreFilter phase); // A nil preFilterState denotes it's not set at all (in PreFilter phase);
@ -39,29 +40,23 @@ const preFilterStateKey = "PreFilter" + Name
// Fields are exported for comparison during testing. // Fields are exported for comparison during testing.
type preFilterState struct { type preFilterState struct {
Constraints []topologySpreadConstraint Constraints []topologySpreadConstraint
// We record 2 critical paths instead of all critical paths here. // CriticalPaths is a slice indexed by constraint index.
// criticalPaths[0].MatchNum always holds the minimum matching number. // Per each entry, we record 2 critical paths instead of all critical paths.
// criticalPaths[1].MatchNum is always greater or equal to criticalPaths[0].MatchNum, but // CriticalPaths[i][0].MatchNum always holds the minimum matching number.
// CriticalPaths[i][1].MatchNum is always greater or equal to CriticalPaths[i][0].MatchNum, but
// it's not guaranteed to be the 2nd minimum match number. // it's not guaranteed to be the 2nd minimum match number.
TpKeyToCriticalPaths map[string]*criticalPaths CriticalPaths []*criticalPaths
// TpKeyToDomainsNum is keyed with topologyKey, and valued with the number of domains. // TpValueToMatchNum is a slice indexed by constraint index.
TpKeyToDomainsNum map[string]int // Each entry is keyed with topology value, and valued with the number of matching pods.
// TpPairToMatchNum is keyed with topologyPair, and valued with the number of matching pods. TpValueToMatchNum []map[string]int
TpPairToMatchNum map[topologyPair]int
} }
// minMatchNum returns the global minimum for the calculation of skew while taking MinDomains into account. // minMatchNum returns the global minimum for the calculation of skew while taking MinDomains into account.
func (s *preFilterState) minMatchNum(tpKey string, minDomains int32) (int, error) { func (s *preFilterState) minMatchNum(constraintID int, minDomains int32) (int, error) {
paths, ok := s.TpKeyToCriticalPaths[tpKey] paths := s.CriticalPaths[constraintID]
if !ok {
return 0, fmt.Errorf("failed to retrieve path by topology key")
}
minMatchNum := paths[0].MatchNum minMatchNum := paths[0].MatchNum
domainsNum, ok := s.TpKeyToDomainsNum[tpKey] domainsNum := len(s.TpValueToMatchNum[constraintID])
if !ok {
return 0, fmt.Errorf("failed to retrieve the number of domains by topology key")
}
if domainsNum < int(minDomains) { if domainsNum < int(minDomains) {
// When the number of eligible domains with matching topology keys is less than `minDomains`, // When the number of eligible domains with matching topology keys is less than `minDomains`,
@ -80,16 +75,14 @@ func (s *preFilterState) Clone() framework.StateData {
copy := preFilterState{ copy := preFilterState{
// Constraints are shared because they don't change. // Constraints are shared because they don't change.
Constraints: s.Constraints, Constraints: s.Constraints,
TpKeyToCriticalPaths: make(map[string]*criticalPaths, len(s.TpKeyToCriticalPaths)), CriticalPaths: make([]*criticalPaths, len(s.CriticalPaths)),
// The number of domains does not change as a result of AddPod/RemovePod methods on PreFilter Extensions TpValueToMatchNum: make([]map[string]int, len(s.TpValueToMatchNum)),
TpKeyToDomainsNum: s.TpKeyToDomainsNum,
TpPairToMatchNum: make(map[topologyPair]int, len(s.TpPairToMatchNum)),
} }
for tpKey, paths := range s.TpKeyToCriticalPaths { for i, paths := range s.CriticalPaths {
copy.TpKeyToCriticalPaths[tpKey] = &criticalPaths{paths[0], paths[1]} copy.CriticalPaths[i] = &criticalPaths{paths[0], paths[1]}
} }
for tpPair, matchNum := range s.TpPairToMatchNum { for i, tpMap := range s.TpValueToMatchNum {
copy.TpPairToMatchNum[tpPair] = matchNum copy.TpValueToMatchNum[i] = maps.Clone(tpMap)
} }
return &copy return &copy
} }
@ -200,7 +193,7 @@ func (pl *PodTopologySpread) updateWithPod(s *preFilterState, updatedPod, preemp
} }
podLabelSet := labels.Set(updatedPod.Labels) podLabelSet := labels.Set(updatedPod.Labels)
for _, constraint := range s.Constraints { for i, constraint := range s.Constraints {
if !constraint.Selector.Matches(podLabelSet) { if !constraint.Selector.Matches(podLabelSet) {
continue continue
} }
@ -210,10 +203,9 @@ func (pl *PodTopologySpread) updateWithPod(s *preFilterState, updatedPod, preemp
continue continue
} }
k, v := constraint.TopologyKey, node.Labels[constraint.TopologyKey] v := node.Labels[constraint.TopologyKey]
pair := topologyPair{key: k, value: v} s.TpValueToMatchNum[i][v] += delta
s.TpPairToMatchNum[pair] += delta s.CriticalPaths[i].update(v, s.TpValueToMatchNum[i][v])
s.TpKeyToCriticalPaths[k].update(v, s.TpPairToMatchNum[pair])
} }
} }
@ -232,6 +224,12 @@ func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error
return s, nil return s, nil
} }
type topologyCount struct {
topologyValue string
constraintID int
count int
}
// calPreFilterState computes preFilterState describing how pods are spread on topologies. // calPreFilterState computes preFilterState describing how pods are spread on topologies.
func (pl *PodTopologySpread) calPreFilterState(ctx context.Context, pod *v1.Pod) (*preFilterState, error) { func (pl *PodTopologySpread) calPreFilterState(ctx context.Context, pod *v1.Pod) (*preFilterState, error) {
constraints, err := pl.getConstraints(pod) constraints, err := pl.getConstraints(pod)
@ -249,14 +247,17 @@ func (pl *PodTopologySpread) calPreFilterState(ctx context.Context, pod *v1.Pod)
s := preFilterState{ s := preFilterState{
Constraints: constraints, Constraints: constraints,
TpKeyToCriticalPaths: make(map[string]*criticalPaths, len(constraints)), CriticalPaths: make([]*criticalPaths, len(constraints)),
TpPairToMatchNum: make(map[topologyPair]int, sizeHeuristic(len(allNodes), constraints)), TpValueToMatchNum: make([]map[string]int, len(constraints)),
}
for i := 0; i < len(constraints); i++ {
s.TpValueToMatchNum[i] = make(map[string]int, sizeHeuristic(len(allNodes), constraints[i]))
} }
tpCountsByNode := make([]map[topologyPair]int, len(allNodes)) tpCountsByNode := make([][]topologyCount, len(allNodes))
requiredNodeAffinity := nodeaffinity.GetRequiredNodeAffinity(pod) requiredNodeAffinity := nodeaffinity.GetRequiredNodeAffinity(pod)
processNode := func(i int) { processNode := func(n int) {
nodeInfo := allNodes[i] nodeInfo := allNodes[n]
node := nodeInfo.Node() node := nodeInfo.Node()
if !pl.enableNodeInclusionPolicyInPodTopologySpread { if !pl.enableNodeInclusionPolicyInPodTopologySpread {
@ -272,38 +273,39 @@ func (pl *PodTopologySpread) calPreFilterState(ctx context.Context, pod *v1.Pod)
return return
} }
tpCounts := make(map[topologyPair]int, len(constraints)) tpCounts := make([]topologyCount, 0, len(constraints))
for _, c := range constraints { for i, c := range constraints {
if pl.enableNodeInclusionPolicyInPodTopologySpread && if pl.enableNodeInclusionPolicyInPodTopologySpread &&
!c.matchNodeInclusionPolicies(pod, node, requiredNodeAffinity) { !c.matchNodeInclusionPolicies(pod, node, requiredNodeAffinity) {
continue continue
} }
pair := topologyPair{key: c.TopologyKey, value: node.Labels[c.TopologyKey]} value := node.Labels[c.TopologyKey]
count := countPodsMatchSelector(nodeInfo.Pods, c.Selector, pod.Namespace) count := countPodsMatchSelector(nodeInfo.Pods, c.Selector, pod.Namespace)
tpCounts[pair] = count tpCounts = append(tpCounts, topologyCount{
topologyValue: value,
constraintID: i,
count: count,
})
} }
tpCountsByNode[i] = tpCounts tpCountsByNode[n] = tpCounts
} }
pl.parallelizer.Until(ctx, len(allNodes), processNode, pl.Name()) pl.parallelizer.Until(ctx, len(allNodes), processNode, pl.Name())
for _, tpCounts := range tpCountsByNode { for _, tpCounts := range tpCountsByNode {
for tp, count := range tpCounts { // tpCounts might not hold all the constraints, so index can't be used here as constraintID.
s.TpPairToMatchNum[tp] += count for _, tpCount := range tpCounts {
s.TpValueToMatchNum[tpCount.constraintID][tpCount.topologyValue] += tpCount.count
} }
} }
s.TpKeyToDomainsNum = make(map[string]int, len(constraints))
for tp := range s.TpPairToMatchNum {
s.TpKeyToDomainsNum[tp.key]++
}
// calculate min match for each topology pair // calculate min match for each constraint and topology value
for i := 0; i < len(constraints); i++ { for i := 0; i < len(constraints); i++ {
key := constraints[i].TopologyKey s.CriticalPaths[i] = newCriticalPaths()
s.TpKeyToCriticalPaths[key] = newCriticalPaths()
for value, num := range s.TpValueToMatchNum[i] {
s.CriticalPaths[i].update(value, num)
} }
for pair, num := range s.TpPairToMatchNum {
s.TpKeyToCriticalPaths[pair.key].update(pair.value, num)
} }
return &s, nil return &s, nil
@ -325,7 +327,7 @@ func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.C
logger := klog.FromContext(ctx) logger := klog.FromContext(ctx)
podLabelSet := labels.Set(pod.Labels) podLabelSet := labels.Set(pod.Labels)
for _, c := range s.Constraints { for i, c := range s.Constraints {
tpKey := c.TopologyKey tpKey := c.TopologyKey
tpVal, ok := node.Labels[c.TopologyKey] tpVal, ok := node.Labels[c.TopologyKey]
if !ok { if !ok {
@ -335,9 +337,9 @@ func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.C
// judging criteria: // judging criteria:
// 'existing matching num' + 'if self-match (1 or 0)' - 'global minimum' <= 'maxSkew' // 'existing matching num' + 'if self-match (1 or 0)' - 'global minimum' <= 'maxSkew'
minMatchNum, err := s.minMatchNum(tpKey, c.MinDomains) minMatchNum, err := s.minMatchNum(i, c.MinDomains)
if err != nil { if err != nil {
logger.Error(err, "Internal error occurred while retrieving value precalculated in PreFilter", "topologyKey", tpKey, "paths", s.TpKeyToCriticalPaths) logger.Error(err, "Internal error occurred while retrieving value precalculated in PreFilter", "topologyKey", tpKey, "paths", s.CriticalPaths[i])
continue continue
} }
@ -346,11 +348,7 @@ func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.C
selfMatchNum = 1 selfMatchNum = 1
} }
pair := topologyPair{key: tpKey, value: tpVal} matchNum := s.TpValueToMatchNum[i][tpVal]
matchNum := 0
if tpCount, ok := s.TpPairToMatchNum[pair]; ok {
matchNum = tpCount
}
skew := matchNum + selfMatchNum - minMatchNum skew := matchNum + selfMatchNum - minMatchNum
if skew > int(c.MaxSkew) { if skew > int(c.MaxSkew) {
logger.V(5).Info("Node failed spreadConstraint: matchNum + selfMatchNum - minMatchNum > maxSkew", "node", klog.KObj(node), "topologyKey", tpKey, "matchNum", matchNum, "selfMatchNum", selfMatchNum, "minMatchNum", minMatchNum, "maxSkew", c.MaxSkew) logger.V(5).Info("Node failed spreadConstraint: matchNum + selfMatchNum - minMatchNum > maxSkew", "node", klog.KObj(node), "topologyKey", tpKey, "matchNum", matchNum, "selfMatchNum", selfMatchNum, "minMatchNum", minMatchNum, "maxSkew", c.MaxSkew)
@ -361,11 +359,9 @@ func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.C
return nil return nil
} }
func sizeHeuristic(nodes int, constraints []topologySpreadConstraint) int { func sizeHeuristic(nodes int, constraint topologySpreadConstraint) int {
for _, c := range constraints { if constraint.TopologyKey == v1.LabelHostname {
if c.TopologyKey == v1.LabelHostname {
return nodes return nodes
} }
}
return 0 return 0
} }

View File

@ -37,8 +37,9 @@ type preScoreState struct {
Constraints []topologySpreadConstraint Constraints []topologySpreadConstraint
// IgnoredNodes is a set of node names which miss some Constraints[*].topologyKey. // IgnoredNodes is a set of node names which miss some Constraints[*].topologyKey.
IgnoredNodes sets.Set[string] IgnoredNodes sets.Set[string]
// TopologyPairToPodCounts is keyed with topologyPair, and valued with the number of matching pods. // TopologyValueToPodCounts is a slice indexed by constraint index.
TopologyPairToPodCounts map[topologyPair]*int64 // Each entry is keyed with topology value, and valued with the number of matching pods.
TopologyValueToPodCounts []map[string]*int64
// TopologyNormalizingWeight is the weight we give to the counts per topology. // TopologyNormalizingWeight is the weight we give to the counts per topology.
// This allows the pod counts of smaller topologies to not be watered down by // This allows the pod counts of smaller topologies to not be watered down by
// bigger ones. // bigger ones.
@ -76,6 +77,10 @@ func (pl *PodTopologySpread) initPreScoreState(s *preScoreState, pod *v1.Pod, fi
if len(s.Constraints) == 0 { if len(s.Constraints) == 0 {
return nil return nil
} }
s.TopologyValueToPodCounts = make([]map[string]*int64, len(s.Constraints))
for i := 0; i < len(s.Constraints); i++ {
s.TopologyValueToPodCounts[i] = make(map[string]*int64)
}
topoSize := make([]int, len(s.Constraints)) topoSize := make([]int, len(s.Constraints))
for _, node := range filteredNodes { for _, node := range filteredNodes {
if requireAllTopologies && !nodeLabelsMatchSpreadConstraints(node.Node().Labels, s.Constraints) { if requireAllTopologies && !nodeLabelsMatchSpreadConstraints(node.Node().Labels, s.Constraints) {
@ -89,9 +94,9 @@ func (pl *PodTopologySpread) initPreScoreState(s *preScoreState, pod *v1.Pod, fi
if constraint.TopologyKey == v1.LabelHostname { if constraint.TopologyKey == v1.LabelHostname {
continue continue
} }
pair := topologyPair{key: constraint.TopologyKey, value: node.Node().Labels[constraint.TopologyKey]} value := node.Node().Labels[constraint.TopologyKey]
if s.TopologyPairToPodCounts[pair] == nil { if s.TopologyValueToPodCounts[i][value] == nil {
s.TopologyPairToPodCounts[pair] = new(int64) s.TopologyValueToPodCounts[i][value] = new(int64)
topoSize[i]++ topoSize[i]++
} }
} }
@ -127,7 +132,6 @@ func (pl *PodTopologySpread) PreScore(
state := &preScoreState{ state := &preScoreState{
IgnoredNodes: sets.New[string](), IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: make(map[topologyPair]*int64),
} }
// Only require that nodes have all the topology labels if using // Only require that nodes have all the topology labels if using
// non-system-default spreading rules. This allows nodes that don't have a // non-system-default spreading rules. This allows nodes that don't have a
@ -145,8 +149,8 @@ func (pl *PodTopologySpread) PreScore(
// Ignore parsing errors for backwards compatibility. // Ignore parsing errors for backwards compatibility.
requiredNodeAffinity := nodeaffinity.GetRequiredNodeAffinity(pod) requiredNodeAffinity := nodeaffinity.GetRequiredNodeAffinity(pod)
processAllNode := func(i int) { processAllNode := func(n int) {
nodeInfo := allNodes[i] nodeInfo := allNodes[n]
node := nodeInfo.Node() node := nodeInfo.Node()
if !pl.enableNodeInclusionPolicyInPodTopologySpread { if !pl.enableNodeInclusionPolicyInPodTopologySpread {
@ -161,17 +165,17 @@ func (pl *PodTopologySpread) PreScore(
return return
} }
for _, c := range state.Constraints { for i, c := range state.Constraints {
if pl.enableNodeInclusionPolicyInPodTopologySpread && if pl.enableNodeInclusionPolicyInPodTopologySpread &&
!c.matchNodeInclusionPolicies(pod, node, requiredNodeAffinity) { !c.matchNodeInclusionPolicies(pod, node, requiredNodeAffinity) {
continue continue
} }
pair := topologyPair{key: c.TopologyKey, value: node.Labels[c.TopologyKey]} value := node.Labels[c.TopologyKey]
// If current topology pair is not associated with any candidate node, // If current topology pair is not associated with any candidate node,
// continue to avoid unnecessary calculation. // continue to avoid unnecessary calculation.
// Per-node counts are also skipped, as they are done during Score. // Per-node counts are also skipped, as they are done during Score.
tpCount := state.TopologyPairToPodCounts[pair] tpCount := state.TopologyValueToPodCounts[i][value]
if tpCount == nil { if tpCount == nil {
continue continue
} }
@ -214,8 +218,7 @@ func (pl *PodTopologySpread) Score(ctx context.Context, cycleState *framework.Cy
if c.TopologyKey == v1.LabelHostname { if c.TopologyKey == v1.LabelHostname {
cnt = int64(countPodsMatchSelector(nodeInfo.Pods, c.Selector, pod.Namespace)) cnt = int64(countPodsMatchSelector(nodeInfo.Pods, c.Selector, pod.Namespace))
} else { } else {
pair := topologyPair{key: c.TopologyKey, value: tpVal} cnt = *s.TopologyValueToPodCounts[i][tpVal]
cnt = *s.TopologyPairToPodCounts[pair]
} }
score += scoreForCount(cnt, c.MaxSkew, s.TopologyNormalizingWeight[i]) score += scoreForCount(cnt, c.MaxSkew, s.TopologyNormalizingWeight[i])
} }

View File

@ -156,9 +156,12 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
}, },
}, },
IgnoredNodes: sets.New[string](), IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: map[topologyPair]*int64{ TopologyValueToPodCounts: []map[string]*int64{
{key: "zone", value: "zone1"}: ptr.To[int64](0), {
{key: "zone", value: "zone2"}: ptr.To[int64](0), "zone1": ptr.To[int64](0),
"zone2": ptr.To[int64](0),
},
{},
}, },
TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2), topologyNormalizingWeight(3)}, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2), topologyNormalizingWeight(3)},
}, },
@ -188,10 +191,10 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
}, },
}, },
IgnoredNodes: sets.New[string](), IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: map[topologyPair]*int64{ TopologyValueToPodCounts: []map[string]*int64{{
{key: "zone", value: "zone1"}: ptr.To[int64](0), "zone1": ptr.To[int64](0),
{key: "zone", value: "zone2"}: ptr.To[int64](0), "zone2": ptr.To[int64](0),
}, }},
TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)}, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)},
}, },
}, },
@ -229,8 +232,11 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
}, },
}, },
IgnoredNodes: sets.New("node-x"), IgnoredNodes: sets.New("node-x"),
TopologyPairToPodCounts: map[topologyPair]*int64{ TopologyValueToPodCounts: []map[string]*int64{
{key: "zone", value: "zone1"}: ptr.To[int64](0), {
"zone1": ptr.To[int64](0),
},
{},
}, },
TopologyNormalizingWeight: []float64{topologyNormalizingWeight(1), topologyNormalizingWeight(2)}, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(1), topologyNormalizingWeight(2)},
}, },
@ -271,9 +277,12 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
}, },
}, },
IgnoredNodes: sets.New[string](), IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: map[topologyPair]*int64{ TopologyValueToPodCounts: []map[string]*int64{
{key: v1.LabelTopologyZone, value: "mars"}: ptr.To[int64](0), {},
{key: v1.LabelTopologyZone, value: ""}: ptr.To[int64](0), {
"mars": ptr.To[int64](0),
"": ptr.To[int64](0),
},
}, },
TopologyNormalizingWeight: []float64{topologyNormalizingWeight(4), topologyNormalizingWeight(2)}, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(4), topologyNormalizingWeight(2)},
}, },
@ -322,8 +331,11 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
}, },
}, },
IgnoredNodes: sets.New[string](), IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: map[topologyPair]*int64{ TopologyValueToPodCounts: []map[string]*int64{
{key: "planet", value: "mars"}: ptr.To[int64](0), {},
{
"mars": ptr.To[int64](0),
},
}, },
TopologyNormalizingWeight: []float64{topologyNormalizingWeight(1), topologyNormalizingWeight(1)}, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(1), topologyNormalizingWeight(1)},
}, },
@ -363,9 +375,9 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
}, },
}, },
IgnoredNodes: sets.New[string](), IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: map[topologyPair]*int64{ TopologyValueToPodCounts: []map[string]*int64{{
{"planet", "mars"}: ptr.To[int64](0), "mars": ptr.To[int64](0),
}, }},
TopologyNormalizingWeight: []float64{topologyNormalizingWeight(1)}, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(1)},
}, },
}, },
@ -395,10 +407,10 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
}, },
}, },
IgnoredNodes: sets.New[string](), IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: map[topologyPair]*int64{ TopologyValueToPodCounts: []map[string]*int64{{
{key: "zone", value: "zone1"}: ptr.To[int64](0), "zone1": ptr.To[int64](0),
{key: "zone", value: "zone2"}: ptr.To[int64](0), "zone2": ptr.To[int64](0),
}, }},
TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)}, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)},
}, },
enableNodeInclusionPolicy: true, enableNodeInclusionPolicy: true,
@ -429,10 +441,10 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
}, },
}, },
IgnoredNodes: sets.New[string](), IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: map[topologyPair]*int64{ TopologyValueToPodCounts: []map[string]*int64{{
{key: "zone", value: "zone1"}: ptr.To[int64](0), "zone1": ptr.To[int64](0),
{key: "zone", value: "zone2"}: ptr.To[int64](0), "zone2": ptr.To[int64](0),
}, }},
TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)}, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)},
}, },
enableNodeInclusionPolicy: true, enableNodeInclusionPolicy: true,
@ -463,10 +475,10 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
}, },
}, },
IgnoredNodes: sets.New[string](), IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: map[topologyPair]*int64{ TopologyValueToPodCounts: []map[string]*int64{{
{key: "zone", value: "zone1"}: ptr.To[int64](0), "zone1": ptr.To[int64](0),
{key: "zone", value: "zone2"}: ptr.To[int64](0), "zone2": ptr.To[int64](0),
}, }},
TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)}, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)},
}, },
enableNodeInclusionPolicy: true, enableNodeInclusionPolicy: true,
@ -497,10 +509,10 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
}, },
}, },
IgnoredNodes: sets.New[string](), IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: map[topologyPair]*int64{ TopologyValueToPodCounts: []map[string]*int64{{
{key: "zone", value: "zone1"}: ptr.To[int64](0), "zone1": ptr.To[int64](0),
{key: "zone", value: "zone2"}: ptr.To[int64](0), "zone2": ptr.To[int64](0),
}, }},
TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)}, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)},
}, },
enableNodeInclusionPolicy: true, enableNodeInclusionPolicy: true,
@ -530,10 +542,10 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
}, },
}, },
IgnoredNodes: sets.New[string](), IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: map[topologyPair]*int64{ TopologyValueToPodCounts: []map[string]*int64{{
{key: "zone", value: "zone1"}: ptr.To[int64](0), "zone1": ptr.To[int64](0),
{key: "zone", value: "zone2"}: ptr.To[int64](0), "zone2": ptr.To[int64](0),
}, }},
TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)}, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)},
}, },
enableNodeInclusionPolicy: true, enableNodeInclusionPolicy: true,
@ -563,10 +575,10 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
}, },
}, },
IgnoredNodes: sets.New[string](), IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: map[topologyPair]*int64{ TopologyValueToPodCounts: []map[string]*int64{{
{key: "zone", value: "zone1"}: ptr.To[int64](0), "zone1": ptr.To[int64](0),
{key: "zone", value: "zone2"}: ptr.To[int64](0), "zone2": ptr.To[int64](0),
}, }},
TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)}, TopologyNormalizingWeight: []float64{topologyNormalizingWeight(2)},
}, },
enableNodeInclusionPolicy: true, enableNodeInclusionPolicy: true,
@ -953,6 +965,33 @@ func TestPodTopologySpreadScore(t *testing.T) {
{Name: "node-x", Score: 63}, {Name: "node-x", Score: 63},
}, },
}, },
{
name: "two Constraints on zone, 2 out of 4 nodes are candidates",
pod: st.MakePod().Name("p").Label("foo", "").Label("bar", "").
SpreadConstraint(1, "zone", v1.ScheduleAnyway, fooSelector, nil, nil, nil, nil).
SpreadConstraint(1, "zone", v1.ScheduleAnyway, barSelector, nil, nil, nil, nil).
Obj(),
existingPods: []*v1.Pod{
st.MakePod().Name("p-a1").Node("node-a").Label("foo", "").Label("bar", "").Obj(),
st.MakePod().Name("p-a2").Node("node-a").Label("foo", "").Obj(),
st.MakePod().Name("p-b1").Node("node-b").Label("foo", "").Label("bar", "").Obj(),
st.MakePod().Name("p-y1").Node("node-y").Label("foo", "").Obj(),
st.MakePod().Name("p-y2").Node("node-y").Label("foo", "").Label("bar", "").Obj(),
st.MakePod().Name("p-y3").Node("node-y").Label("foo", "").Obj(),
},
nodes: []*v1.Node{
st.MakeNode().Name("node-a").Label("zone", "zone1").Obj(),
st.MakeNode().Name("node-x").Label("zone", "zone2").Obj(),
},
failedNodes: []*v1.Node{
st.MakeNode().Name("node-b").Label("zone", "zone1").Obj(),
st.MakeNode().Name("node-y").Label("zone", "zone2").Obj(),
},
want: []framework.NodeScore{
{Name: "node-a", Score: 85},
{Name: "node-x", Score: 100},
},
},
{ {
// If Constraints hold different labelSelectors, it's a little complex. // If Constraints hold different labelSelectors, it's a little complex.
// +----------------------+------------------------+ // +----------------------+------------------------+