mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
PodAffinity code refinements
This commit is contained in:
parent
ee9de6457c
commit
fad876b6f9
@ -802,41 +802,84 @@ func (checker *PodAffinityChecker) InterPodAffinityMatches(pod *api.Pod, meta in
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if checker.NodeMatchPodAffinityAntiAffinity(pod, allPods, node) {
|
||||
return true, nil
|
||||
affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return false, ErrPodAffinityNotMatch
|
||||
|
||||
// Check if the current node match the inter-pod affinity scheduling constraints.
|
||||
// Hard inter-pod affinity is not symmetric, check only when affinity.PodAffinity exists.
|
||||
if affinity.PodAffinity != nil {
|
||||
if !checker.NodeMatchesHardPodAffinity(pod, allPods, node, affinity.PodAffinity) {
|
||||
return false, ErrPodAffinityNotMatch
|
||||
}
|
||||
}
|
||||
|
||||
// Hard inter-pod anti-affinity is symmetric, we should always check it.
|
||||
if !checker.NodeMatchesHardPodAntiAffinity(pod, allPods, node, affinity.PodAntiAffinity) {
|
||||
return false, ErrPodAffinityNotMatch
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// AnyPodMatchesPodAffinityTerm checks if any of given pods can match the specific podAffinityTerm.
|
||||
func (checker *PodAffinityChecker) AnyPodMatchesPodAffinityTerm(pod *api.Pod, allPods []*api.Pod, node *api.Node, podAffinityTerm api.PodAffinityTerm) (bool, error) {
|
||||
// First return value indicates whether a matching pod exists on a node that matches the topology key,
|
||||
// while the second return value indicates whether a matching pod exists anywhere.
|
||||
// TODO: Do we really need any pod matching, or all pods matching? I think the latter.
|
||||
func (checker *PodAffinityChecker) AnyPodMatchesPodAffinityTerm(pod *api.Pod, allPods []*api.Pod, node *api.Node, podAffinityTerm api.PodAffinityTerm) (bool, bool, error) {
|
||||
matchingPodExists := false
|
||||
for _, ep := range allPods {
|
||||
epNode, err := checker.info.GetNodeInfo(ep.Spec.NodeName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, matchingPodExists, err
|
||||
}
|
||||
match, err := checker.failureDomains.CheckIfPodMatchPodAffinityTerm(ep, epNode, node, pod, podAffinityTerm)
|
||||
if err != nil || match {
|
||||
return match, err
|
||||
match, err := priorityutil.PodMatchesTermsNamespaceAndSelector(ep, pod, &podAffinityTerm)
|
||||
if err != nil {
|
||||
return false, matchingPodExists, err
|
||||
}
|
||||
|
||||
if match {
|
||||
matchingPodExists = true
|
||||
if checker.failureDomains.NodesHaveSameTopologyKey(node, epNode, podAffinityTerm.TopologyKey) {
|
||||
return true, matchingPodExists, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
return false, matchingPodExists, nil
|
||||
}
|
||||
|
||||
func getPodAffinityTerms(podAffinity *api.PodAffinity) (terms []api.PodAffinityTerm) {
|
||||
if podAffinity != nil {
|
||||
if len(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
|
||||
terms = podAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
}
|
||||
// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.
|
||||
//if len(podAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 {
|
||||
// terms = append(terms, podAffinity.RequiredDuringSchedulingRequiredDuringExecution...)
|
||||
//}
|
||||
}
|
||||
return terms
|
||||
}
|
||||
|
||||
func getPodAntiAffinityTerms(podAntiAffinity *api.PodAntiAffinity) (terms []api.PodAffinityTerm) {
|
||||
if podAntiAffinity != nil {
|
||||
if len(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
|
||||
terms = podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
}
|
||||
// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.
|
||||
//if len(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 {
|
||||
// terms = append(terms, podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution...)
|
||||
//}
|
||||
}
|
||||
return terms
|
||||
}
|
||||
|
||||
// Checks whether the given node has pods which satisfy all the required pod affinity scheduling rules.
|
||||
// If node has pods which satisfy all the required pod affinity scheduling rules then return true.
|
||||
func (checker *PodAffinityChecker) NodeMatchesHardPodAffinity(pod *api.Pod, allPods []*api.Pod, node *api.Node, podAffinity *api.PodAffinity) bool {
|
||||
var podAffinityTerms []api.PodAffinityTerm
|
||||
if len(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
|
||||
podAffinityTerms = podAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
}
|
||||
// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.
|
||||
//if len(podAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 {
|
||||
// podAffinityTerms = append(podAffinityTerms, podAffinity.RequiredDuringSchedulingRequiredDuringExecution...)
|
||||
//}
|
||||
|
||||
for _, podAffinityTerm := range podAffinityTerms {
|
||||
podAffinityTermMatches, err := checker.AnyPodMatchesPodAffinityTerm(pod, allPods, node, podAffinityTerm)
|
||||
for _, podAffinityTerm := range getPodAffinityTerms(podAffinity) {
|
||||
podAffinityTermMatches, matchingPodExists, err := checker.AnyPodMatchesPodAffinityTerm(pod, allPods, node, podAffinityTerm)
|
||||
if err != nil {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v, an error ocurred when checking existing pods on the node for PodAffinityTerm %v err: %v",
|
||||
podName(pod), node.Name, podAffinityTerm, err)
|
||||
@ -844,31 +887,16 @@ func (checker *PodAffinityChecker) NodeMatchesHardPodAffinity(pod *api.Pod, allP
|
||||
}
|
||||
|
||||
if !podAffinityTermMatches {
|
||||
// TODO: Think about whether this can be simplified once we have controllerRef
|
||||
// Check if it is in special case that the requiredDuringScheduling affinity requirement can be disregarded.
|
||||
// If the requiredDuringScheduling affinity requirement matches a pod's own labels and namespace, and there are no other such pods
|
||||
// anywhere, then disregard the requirement.
|
||||
// This allows rules like "schedule all of the pods of this collection to the same zone" to not block forever
|
||||
// because the first pod of the collection can't be scheduled.
|
||||
names := priorityutil.GetNamespacesFromPodAffinityTerm(pod, podAffinityTerm)
|
||||
labelSelector, err := unversioned.LabelSelectorAsSelector(podAffinityTerm.LabelSelector)
|
||||
if err != nil || !names.Has(pod.Namespace) || !labelSelector.Matches(labels.Set(pod.Labels)) {
|
||||
match, err := priorityutil.PodMatchesTermsNamespaceAndSelector(pod, pod, &podAffinityTerm)
|
||||
if err != nil || !match || matchingPodExists {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because none of the existing pods on this node satisfy the PodAffinityTerm %v, err: %+v",
|
||||
podName(pod), node.Name, podAffinityTerm, err)
|
||||
return false
|
||||
}
|
||||
|
||||
// the affinity is to put the pod together with other pods from its same service or controller
|
||||
filteredPods := priorityutil.FilterPodsByNameSpaces(names, allPods)
|
||||
for _, filteredPod := range filteredPods {
|
||||
// if found an existing pod from same service or RC,
|
||||
// the affinity scheduling rules cannot be disregarded.
|
||||
if labelSelector.Matches(labels.Set(filteredPod.Labels)) {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because none of the existing pods on this node satisfy the PodAffinityTerm %v",
|
||||
podName(pod), node.Name, podAffinityTerm)
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// all the required pod affinity scheduling rules satisfied
|
||||
@ -884,21 +912,12 @@ func (checker *PodAffinityChecker) NodeMatchesHardPodAffinity(pod *api.Pod, allP
|
||||
// scheduling rules and scheduling the pod onto the node won't
|
||||
// break any existing pods' anti-affinity rules, then return true.
|
||||
func (checker *PodAffinityChecker) NodeMatchesHardPodAntiAffinity(pod *api.Pod, allPods []*api.Pod, node *api.Node, podAntiAffinity *api.PodAntiAffinity) bool {
|
||||
var podAntiAffinityTerms []api.PodAffinityTerm
|
||||
if podAntiAffinity != nil && len(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
|
||||
podAntiAffinityTerms = podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
}
|
||||
// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.
|
||||
//if len(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 {
|
||||
// podAntiAffinityTerms = append(podAntiAffinityTerms, podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution...)
|
||||
//}
|
||||
|
||||
// foreach element podAntiAffinityTerm of podAntiAffinityTerms
|
||||
// if the pod matches the term (breaks the anti-affinity),
|
||||
// don't schedule the pod onto this node.
|
||||
for _, podAntiAffinityTerm := range podAntiAffinityTerms {
|
||||
podAntiAffinityTermMatches, err := checker.AnyPodMatchesPodAffinityTerm(pod, allPods, node, podAntiAffinityTerm)
|
||||
if err != nil || podAntiAffinityTermMatches == true {
|
||||
for _, podAntiAffinityTerm := range getPodAntiAffinityTerms(podAntiAffinity) {
|
||||
podAntiAffinityTermMatches, _, err := checker.AnyPodMatchesPodAffinityTerm(pod, allPods, node, podAntiAffinityTerm)
|
||||
if err != nil || podAntiAffinityTermMatches {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because not all the existing pods on this node satisfy the PodAntiAffinityTerm %v, err: %v",
|
||||
podName(pod), node.Name, podAntiAffinityTerm, err)
|
||||
return false
|
||||
@ -914,32 +933,21 @@ func (checker *PodAffinityChecker) NodeMatchesHardPodAntiAffinity(pod *api.Pod,
|
||||
glog.V(10).Infof("Failed to get Affinity from Pod %+v, err: %+v", podName(pod), err)
|
||||
return false
|
||||
}
|
||||
if epAffinity.PodAntiAffinity != nil {
|
||||
var epAntiAffinityTerms []api.PodAffinityTerm
|
||||
if len(epAffinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
|
||||
epAntiAffinityTerms = epAffinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
epNode, err := checker.info.GetNodeInfo(ep.Spec.NodeName)
|
||||
if err != nil {
|
||||
glog.V(10).Infof("Failed to get node from Pod %+v, err: %+v", podName(ep), err)
|
||||
return false
|
||||
}
|
||||
for _, epAntiAffinityTerm := range getPodAntiAffinityTerms(epAffinity.PodAntiAffinity) {
|
||||
match, err := priorityutil.PodMatchesTermsNamespaceAndSelector(pod, ep, &epAntiAffinityTerm)
|
||||
if err != nil {
|
||||
glog.V(10).Infof("Failed to get label selector from anti-affinityterm %+v of existing pod %+v, err: %+v", epAntiAffinityTerm, podName(pod), err)
|
||||
return false
|
||||
}
|
||||
// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.
|
||||
//if len(epAffinity.PodAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 {
|
||||
// epAntiAffinityTerms = append(epAntiAffinityTerms, epAffinity.PodAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution...)
|
||||
//}
|
||||
|
||||
for _, epAntiAffinityTerm := range epAntiAffinityTerms {
|
||||
labelSelector, err := unversioned.LabelSelectorAsSelector(epAntiAffinityTerm.LabelSelector)
|
||||
if err != nil {
|
||||
glog.V(10).Infof("Failed to get label selector from anti-affinityterm %+v of existing pod %+v, err: %+v", epAntiAffinityTerm, podName(pod), err)
|
||||
return false
|
||||
}
|
||||
|
||||
names := priorityutil.GetNamespacesFromPodAffinityTerm(ep, epAntiAffinityTerm)
|
||||
if (len(names) == 0 || names.Has(pod.Namespace)) && labelSelector.Matches(labels.Set(pod.Labels)) {
|
||||
epNode, err := checker.info.GetNodeInfo(ep.Spec.NodeName)
|
||||
if err != nil || checker.failureDomains.NodesHaveSameTopologyKey(node, epNode, epAntiAffinityTerm.TopologyKey) {
|
||||
glog.V(10).Infof("Cannot schedule Pod %+v, onto node %v because the pod would break the PodAntiAffinityTerm %+v, of existing pod %+v, err: %v",
|
||||
podName(pod), node.Name, epAntiAffinityTerm, podName(ep), err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
if match && checker.failureDomains.NodesHaveSameTopologyKey(node, epNode, epAntiAffinityTerm.TopologyKey) {
|
||||
glog.V(10).Infof("Cannot schedule Pod %+v, onto node %v because the pod would break the PodAntiAffinityTerm %+v, of existing pod %+v, err: %v",
|
||||
podName(pod), node.Name, epAntiAffinityTerm, podName(ep), err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -948,28 +956,6 @@ func (checker *PodAffinityChecker) NodeMatchesHardPodAntiAffinity(pod *api.Pod,
|
||||
return true
|
||||
}
|
||||
|
||||
// NodeMatchPodAffinityAntiAffinity checks if the node matches
|
||||
// the requiredDuringScheduling affinity/anti-affinity rules indicated by the pod.
|
||||
func (checker *PodAffinityChecker) NodeMatchPodAffinityAntiAffinity(pod *api.Pod, allPods []*api.Pod, node *api.Node) bool {
|
||||
// Parse required affinity scheduling rules.
|
||||
affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||
if err != nil {
|
||||
glog.V(10).Infof("Failed to get Affinity from Pod %+v, err: %+v", podName(pod), err)
|
||||
return false
|
||||
}
|
||||
|
||||
// check if the current node match the inter-pod affinity scheduling rules.
|
||||
// hard inter-pod affinity is not symmetric, check only when affinity.PodAffinity is not nil.
|
||||
if affinity.PodAffinity != nil {
|
||||
if !checker.NodeMatchesHardPodAffinity(pod, allPods, node, affinity.PodAffinity) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// hard inter-pod anti-affinity is symmetric, check both when affinity.PodAntiAffinity is nil and not nil.
|
||||
return checker.NodeMatchesHardPodAntiAffinity(pod, allPods, node, affinity.PodAntiAffinity)
|
||||
}
|
||||
|
||||
func PodToleratesNodeTaints(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
|
@ -19,7 +19,6 @@ package priorities
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
||||
@ -52,19 +51,48 @@ func NewInterPodAffinityPriority(
|
||||
return interPodAffinity.CalculateInterPodAffinityPriority
|
||||
}
|
||||
|
||||
// TODO: Share it with predicates by moving to better location.
|
||||
// TODO: Can we avoid error handling here - this is only a matter of non-parsable selector?
|
||||
func podMatchesNamespaceAndSelector(pod *api.Pod, affinityPod *api.Pod, term *api.PodAffinityTerm) (bool, error) {
|
||||
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(affinityPod, *term)
|
||||
if len(namespaces) != 0 && !namespaces.Has(pod.Namespace) {
|
||||
return false, nil
|
||||
}
|
||||
type podAffinityPriorityMap struct {
|
||||
// nodes contain all nodes that should be considered
|
||||
nodes []*api.Node
|
||||
// counts store the mapping from node name to so-far computed score of
|
||||
// the node.
|
||||
counts map[string]float64
|
||||
// failureDomains contain default failure domains keys
|
||||
failureDomains priorityutil.Topologies
|
||||
// The first error that we faced.
|
||||
firstError error
|
||||
}
|
||||
|
||||
selector, err := unversioned.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil || !selector.Matches(labels.Set(pod.Labels)) {
|
||||
return false, err
|
||||
func newPodAffinityPriorityMap(nodes []*api.Node, failureDomains priorityutil.Topologies) *podAffinityPriorityMap {
|
||||
return &podAffinityPriorityMap{
|
||||
nodes: nodes,
|
||||
counts: make(map[string]float64, len(nodes)),
|
||||
failureDomains: failureDomains,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *podAffinityPriorityMap) processTerm(term *api.PodAffinityTerm, podDefiningAffinityTerm, podToCheck *api.Pod, fixedNode *api.Node, weight float64) {
|
||||
match, err := priorityutil.PodMatchesTermsNamespaceAndSelector(podToCheck, podDefiningAffinityTerm, term)
|
||||
if err != nil {
|
||||
if p.firstError == nil {
|
||||
p.firstError = err
|
||||
}
|
||||
return
|
||||
}
|
||||
if match {
|
||||
for _, node := range p.nodes {
|
||||
if p.failureDomains.NodesHaveSameTopologyKey(node, fixedNode, term.TopologyKey) {
|
||||
p.counts[node.Name] += weight
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *podAffinityPriorityMap) processTerms(terms []api.WeightedPodAffinityTerm, podDefiningAffinityTerm, podToCheck *api.Pod, fixedNode *api.Node, multiplier int) {
|
||||
for i := range terms {
|
||||
term := &terms[i]
|
||||
p.processTerm(&term.PodAffinityTerm, podDefiningAffinityTerm, podToCheck, fixedNode, float64(term.Weight*multiplier))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// compute a sum by iterating through the elements of weightedPodAffinityTerm and adding
|
||||
@ -85,32 +113,9 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nod
|
||||
// convert the topology key based weights to the node name based weights
|
||||
var maxCount float64
|
||||
var minCount float64
|
||||
// counts store the mapping from node name to so-far computed score of
|
||||
// priorityMap stores the mapping from node name to so-far computed score of
|
||||
// the node.
|
||||
counts := make(map[string]float64, len(nodes))
|
||||
|
||||
processTerm := func(term *api.PodAffinityTerm, affinityPod, podToCheck *api.Pod, fixedNode *api.Node, weight float64) error {
|
||||
match, err := podMatchesNamespaceAndSelector(podToCheck, affinityPod, term)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if match {
|
||||
for _, node := range nodes {
|
||||
if ipa.failureDomains.NodesHaveSameTopologyKey(node, fixedNode, term.TopologyKey) {
|
||||
counts[node.Name] += weight
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
processTerms := func(terms []api.WeightedPodAffinityTerm, affinityPod, podToCheck *api.Pod, fixedNode *api.Node, multiplier int) error {
|
||||
for _, weightedTerm := range terms {
|
||||
if err := processTerm(&weightedTerm.PodAffinityTerm, affinityPod, podToCheck, fixedNode, float64(weightedTerm.Weight*multiplier)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
pm := newPodAffinityPriorityMap(nodes, ipa.failureDomains)
|
||||
|
||||
for _, existingPod := range allPods {
|
||||
existingPodNode, err := ipa.info.GetNodeInfo(existingPod.Spec.NodeName)
|
||||
@ -124,26 +129,22 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nod
|
||||
|
||||
if affinity.PodAffinity != nil {
|
||||
// For every soft pod affinity term of <pod>, if <existingPod> matches the term,
|
||||
// increment <counts> for every node in the cluster with the same <term.TopologyKey>
|
||||
// increment <pm.counts> for every node in the cluster with the same <term.TopologyKey>
|
||||
// value as that of <existingPods>`s node by the term`s weight.
|
||||
terms := affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution
|
||||
if err := processTerms(terms, pod, existingPod, existingPodNode, 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pm.processTerms(terms, pod, existingPod, existingPodNode, 1)
|
||||
}
|
||||
if affinity.PodAntiAffinity != nil {
|
||||
// For every soft pod anti-affinity term of <pod>, if <existingPod> matches the term,
|
||||
// decrement <counts> for every node in the cluster with the same <term.TopologyKey>
|
||||
// decrement <pm.counts> for every node in the cluster with the same <term.TopologyKey>
|
||||
// value as that of <existingPod>`s node by the term`s weight.
|
||||
terms := affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution
|
||||
if err := processTerms(terms, pod, existingPod, existingPodNode, -1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pm.processTerms(terms, pod, existingPod, existingPodNode, -1)
|
||||
}
|
||||
|
||||
if existingPodAffinity.PodAffinity != nil {
|
||||
// For every hard pod affinity term of <existingPod>, if <pod> matches the term,
|
||||
// increment <counts> for every node in the cluster with the same <term.TopologyKey>
|
||||
// increment <pm.counts> for every node in the cluster with the same <term.TopologyKey>
|
||||
// value as that of <existingPod>'s node by the constant <ipa.hardPodAffinityWeight>
|
||||
if ipa.hardPodAffinityWeight > 0 {
|
||||
terms := existingPodAffinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
@ -152,36 +153,33 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nod
|
||||
// terms = append(terms, existingPodAffinity.PodAffinity.RequiredDuringSchedulingRequiredDuringExecution...)
|
||||
//}
|
||||
for _, term := range terms {
|
||||
if err := processTerm(&term, existingPod, pod, existingPodNode, float64(ipa.hardPodAffinityWeight)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pm.processTerm(&term, existingPod, pod, existingPodNode, float64(ipa.hardPodAffinityWeight))
|
||||
}
|
||||
}
|
||||
// For every soft pod affinity term of <existingPod>, if <pod> matches the term,
|
||||
// increment <counts> for every node in the cluster with the same <term.TopologyKey>
|
||||
// increment <pm.counts> for every node in the cluster with the same <term.TopologyKey>
|
||||
// value as that of <existingPod>'s node by the term's weight.
|
||||
terms := existingPodAffinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution
|
||||
if err := processTerms(terms, existingPod, pod, existingPodNode, 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pm.processTerms(terms, existingPod, pod, existingPodNode, 1)
|
||||
}
|
||||
if existingPodAffinity.PodAntiAffinity != nil {
|
||||
// For every soft pod anti-affinity term of <existingPod>, if <pod> matches the term,
|
||||
// decrement <counts> for every node in the cluster with the same <term.TopologyKey>
|
||||
// decrement <pm.counts> for every node in the cluster with the same <term.TopologyKey>
|
||||
// value as that of <existingPod>'s node by the term's weight.
|
||||
terms := existingPodAffinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution
|
||||
if err := processTerms(terms, existingPod, pod, existingPodNode, -1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pm.processTerms(terms, existingPod, pod, existingPodNode, -1)
|
||||
}
|
||||
}
|
||||
if pm.firstError != nil {
|
||||
return nil, pm.firstError
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
if counts[node.Name] > maxCount {
|
||||
maxCount = counts[node.Name]
|
||||
if pm.counts[node.Name] > maxCount {
|
||||
maxCount = pm.counts[node.Name]
|
||||
}
|
||||
if counts[node.Name] < minCount {
|
||||
minCount = counts[node.Name]
|
||||
if pm.counts[node.Name] < minCount {
|
||||
minCount = pm.counts[node.Name]
|
||||
}
|
||||
}
|
||||
|
||||
@ -190,7 +188,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nod
|
||||
for _, node := range nodes {
|
||||
fScore := float64(0)
|
||||
if (maxCount - minCount) > 0 {
|
||||
fScore = 10 * ((counts[node.Name] - minCount) / (maxCount - minCount))
|
||||
fScore = 10 * ((pm.counts[node.Name] - minCount) / (maxCount - minCount))
|
||||
}
|
||||
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)})
|
||||
if glog.V(10) {
|
||||
|
@ -23,26 +23,11 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
// FilterPodsByNameSpaces filters the pods based the given list of namespaces,
|
||||
// empty set of namespaces means all namespaces.
|
||||
func FilterPodsByNameSpaces(names sets.String, pods []*api.Pod) []*api.Pod {
|
||||
if len(pods) == 0 || len(names) == 0 {
|
||||
return pods
|
||||
}
|
||||
result := []*api.Pod{}
|
||||
for _, pod := range pods {
|
||||
if names.Has(pod.Namespace) {
|
||||
result = append(result, pod)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// GetNamespacesFromPodAffinityTerm returns a set of names
|
||||
// according to the namespaces indicated in podAffinityTerm.
|
||||
// if the NameSpaces is nil considers the given pod's namespace
|
||||
// if the Namespaces is empty list then considers all the namespaces
|
||||
func GetNamespacesFromPodAffinityTerm(pod *api.Pod, podAffinityTerm api.PodAffinityTerm) sets.String {
|
||||
// 1. If the namespaces is nil considers the given pod's namespace
|
||||
// 2. If the namespaces is empty list then considers all the namespaces
|
||||
func getNamespacesFromPodAffinityTerm(pod *api.Pod, podAffinityTerm api.PodAffinityTerm) sets.String {
|
||||
names := sets.String{}
|
||||
if podAffinityTerm.Namespaces == nil {
|
||||
names.Insert(pod.Namespace)
|
||||
@ -52,6 +37,21 @@ func GetNamespacesFromPodAffinityTerm(pod *api.Pod, podAffinityTerm api.PodAffin
|
||||
return names
|
||||
}
|
||||
|
||||
// PodMatchesTermsNamespaceAndSelector returns true if the given <pod>
|
||||
// matches the namespace and selector defined by <affinityPod>`s <term>.
|
||||
func PodMatchesTermsNamespaceAndSelector(pod *api.Pod, affinityPod *api.Pod, term *api.PodAffinityTerm) (bool, error) {
|
||||
namespaces := getNamespacesFromPodAffinityTerm(affinityPod, *term)
|
||||
if len(namespaces) != 0 && !namespaces.Has(pod.Namespace) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
selector, err := unversioned.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil || !selector.Matches(labels.Set(pod.Labels)) {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// nodesHaveSameTopologyKeyInternal checks if nodeA and nodeB have same label value with given topologyKey as label key.
|
||||
func nodesHaveSameTopologyKeyInternal(nodeA, nodeB *api.Node, topologyKey string) bool {
|
||||
return nodeA.Labels != nil && nodeB.Labels != nil && len(nodeA.Labels[topologyKey]) > 0 && nodeA.Labels[topologyKey] == nodeB.Labels[topologyKey]
|
||||
@ -76,20 +76,3 @@ func (tps *Topologies) NodesHaveSameTopologyKey(nodeA, nodeB *api.Node, topology
|
||||
return nodesHaveSameTopologyKeyInternal(nodeA, nodeB, topologyKey)
|
||||
}
|
||||
}
|
||||
|
||||
// CheckIfPodMatchPodAffinityTerm checks if podB's affinity request is compatible with podA
|
||||
// TODO: Get rid this method. We should avoid computing Namespaces and selectors multiple times
|
||||
// and check them on higher levels and then use NodesHaveSameTopologyKey method.
|
||||
func (tps *Topologies) CheckIfPodMatchPodAffinityTerm(podA *api.Pod, nodeA, nodeB *api.Node, podB *api.Pod, podBAffinityTerm api.PodAffinityTerm) (bool, error) {
|
||||
names := GetNamespacesFromPodAffinityTerm(podB, podBAffinityTerm)
|
||||
if len(names) != 0 && !names.Has(podA.Namespace) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
labelSelector, err := unversioned.LabelSelectorAsSelector(podBAffinityTerm.LabelSelector)
|
||||
if err != nil || !labelSelector.Matches(labels.Set(podA.Labels)) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return tps.NodesHaveSameTopologyKey(nodeA, nodeB, podBAffinityTerm.TopologyKey), nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user