mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 01:06:27 +00:00
Fixed a scheduler panic on PodAffinity
This commit is contained in:
parent
e85473c2df
commit
4c9312fa7e
@ -51,17 +51,16 @@ func NewInterPodAffinityPriority(
|
|||||||
}
|
}
|
||||||
|
|
||||||
type podAffinityPriorityMap struct {
|
type podAffinityPriorityMap struct {
|
||||||
// nodes contain all nodes that should be considered
|
// nodes contain all nodes that should be considered.
|
||||||
nodes []*v1.Node
|
nodes []*v1.Node
|
||||||
// counts store the mapping from node name to so-far computed score of
|
// counts store the so-far computed score for each node.
|
||||||
// the node.
|
counts []int64
|
||||||
counts map[string]*int64
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPodAffinityPriorityMap(nodes []*v1.Node) *podAffinityPriorityMap {
|
func newPodAffinityPriorityMap(nodes []*v1.Node) *podAffinityPriorityMap {
|
||||||
return &podAffinityPriorityMap{
|
return &podAffinityPriorityMap{
|
||||||
nodes: nodes,
|
nodes: nodes,
|
||||||
counts: make(map[string]*int64, len(nodes)),
|
counts: make([]int64, len(nodes)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -73,9 +72,9 @@ func (p *podAffinityPriorityMap) processTerm(term *v1.PodAffinityTerm, podDefini
|
|||||||
}
|
}
|
||||||
match := priorityutil.PodMatchesTermsNamespaceAndSelector(podToCheck, namespaces, selector)
|
match := priorityutil.PodMatchesTermsNamespaceAndSelector(podToCheck, namespaces, selector)
|
||||||
if match {
|
if match {
|
||||||
for _, node := range p.nodes {
|
for i, node := range p.nodes {
|
||||||
if priorityutil.NodesHaveSameTopologyKey(node, fixedNode, term.TopologyKey) {
|
if priorityutil.NodesHaveSameTopologyKey(node, fixedNode, term.TopologyKey) {
|
||||||
atomic.AddInt64(p.counts[node.Name], weight)
|
atomic.AddInt64(&p.counts[i], weight)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -102,17 +101,11 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
|
|||||||
hasAffinityConstraints := affinity != nil && affinity.PodAffinity != nil
|
hasAffinityConstraints := affinity != nil && affinity.PodAffinity != nil
|
||||||
hasAntiAffinityConstraints := affinity != nil && affinity.PodAntiAffinity != nil
|
hasAntiAffinityConstraints := affinity != nil && affinity.PodAntiAffinity != nil
|
||||||
|
|
||||||
// priorityMap stores the mapping from node name to so-far computed score of
|
// pm stores (1) all nodes that should be considered and (2) the so-far computed score for each node.
|
||||||
// the node.
|
|
||||||
pm := newPodAffinityPriorityMap(nodes)
|
pm := newPodAffinityPriorityMap(nodes)
|
||||||
allNodeNames := make([]string, 0, len(nodeNameToInfo))
|
allNodeNames := make([]string, 0, len(nodeNameToInfo))
|
||||||
lazyInit := hasAffinityConstraints || hasAntiAffinityConstraints
|
|
||||||
for name := range nodeNameToInfo {
|
for name := range nodeNameToInfo {
|
||||||
allNodeNames = append(allNodeNames, name)
|
allNodeNames = append(allNodeNames, name)
|
||||||
// if pod has affinity defined, or target node has affinityPods
|
|
||||||
if lazyInit || len(nodeNameToInfo[name].PodsWithAffinity()) != 0 {
|
|
||||||
pm.counts[name] = new(int64)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// convert the topology key based weights to the node name based weights
|
// convert the topology key based weights to the node name based weights
|
||||||
@ -216,25 +209,22 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, node := range nodes {
|
for i := range nodes {
|
||||||
if pm.counts[node.Name] == nil {
|
if pm.counts[i] > maxCount {
|
||||||
continue
|
maxCount = pm.counts[i]
|
||||||
}
|
}
|
||||||
if *pm.counts[node.Name] > maxCount {
|
if pm.counts[i] < minCount {
|
||||||
maxCount = *pm.counts[node.Name]
|
minCount = pm.counts[i]
|
||||||
}
|
|
||||||
if *pm.counts[node.Name] < minCount {
|
|
||||||
minCount = *pm.counts[node.Name]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// calculate final priority score for each node
|
// calculate final priority score for each node
|
||||||
result := make(schedulerapi.HostPriorityList, 0, len(nodes))
|
result := make(schedulerapi.HostPriorityList, 0, len(nodes))
|
||||||
maxMinDiff := maxCount - minCount
|
maxMinDiff := maxCount - minCount
|
||||||
for _, node := range nodes {
|
for i, node := range nodes {
|
||||||
fScore := float64(0)
|
fScore := float64(0)
|
||||||
if maxMinDiff > 0 && pm.counts[node.Name] != nil {
|
if maxMinDiff > 0 {
|
||||||
fScore = float64(schedulerapi.MaxPriority) * (float64(*pm.counts[node.Name]-minCount) / float64(maxCount-minCount))
|
fScore = float64(schedulerapi.MaxPriority) * (float64(pm.counts[i]-minCount) / float64(maxCount-minCount))
|
||||||
}
|
}
|
||||||
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int64(fScore)})
|
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int64(fScore)})
|
||||||
if klog.V(10) {
|
if klog.V(10) {
|
||||||
|
@ -507,6 +507,22 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: 0}},
|
||||||
name: "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry",
|
name: "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry",
|
||||||
},
|
},
|
||||||
|
// Cover https://github.com/kubernetes/kubernetes/issues/82796 which panics upon:
|
||||||
|
// 1. Some nodes in a topology don't have pods with affinity, but other nodes in the same topology have.
|
||||||
|
// 2. The incoming pod doesn't have affinity.
|
||||||
|
{
|
||||||
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
|
pods: []*v1.Pod{
|
||||||
|
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
|
{Spec: v1.PodSpec{NodeName: "machine2", Affinity: stayWithS1InRegionAwayFromS2InAz}},
|
||||||
|
},
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||||
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
||||||
|
},
|
||||||
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||||
|
name: "Avoid panic when partial nodes in a topology don't have pods with affinity",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
Loading…
Reference in New Issue
Block a user