Merge pull request #83286 from draveness/feature/refactor-predicate-metadata

feat(scheduler): refactor predicateMetadata into a collection of sub types
This commit is contained in:
Kubernetes Prow Robot 2019-10-08 15:42:42 -07:00 committed by GitHub
commit 464952fcc8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 414 additions and 297 deletions

View File

@ -24,7 +24,7 @@ import (
"k8s.io/klog"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
@ -110,11 +110,11 @@ func (paths *criticalPaths) update(tpVal string, num int32) {
}
}
// podSpreadCache combines tpKeyToCriticalPaths and tpPairToMatchNum
// evenPodsSpreadMetadata combines tpKeyToCriticalPaths and tpPairToMatchNum
// to represent:
// (1) critical paths where the least pods are matched on each spread constraint.
// (2) number of pods matched on each spread constraint.
type podSpreadCache struct {
type evenPodsSpreadMetadata struct {
// We record 2 critical paths instead of all critical paths here.
// criticalPaths[0].matchNum always holds the minimum matching number.
// criticalPaths[1].matchNum is always greater or equal to criticalPaths[0].matchNum, but
@ -124,14 +124,57 @@ type podSpreadCache struct {
tpPairToMatchNum map[topologyPair]int32
}
// NOTE: When new fields are added/removed or logic is changed, please make sure that
// RemovePod, AddPod, and ShallowCopy functions are updated to work with the new changes.
type predicateMetadata struct {
pod *v1.Pod
podBestEffort bool
podRequest *schedulernodeinfo.Resource
podPorts []*v1.ContainerPort
type serviceAffinityMetadata struct {
matchingPodList []*v1.Pod
matchingPodServices []*v1.Service
}
func (m *serviceAffinityMetadata) addPod(addedPod *v1.Pod, pod *v1.Pod, node *v1.Node) {
// If addedPod is in the same namespace as the pod, update the list
// of matching pods if applicable.
if m == nil || addedPod.Namespace != pod.Namespace {
return
}
selector := CreateSelectorFromLabels(pod.Labels)
if selector.Matches(labels.Set(addedPod.Labels)) {
m.matchingPodList = append(m.matchingPodList, addedPod)
}
}
func (m *serviceAffinityMetadata) removePod(deletedPod *v1.Pod, node *v1.Node) {
deletedPodFullName := schedutil.GetPodFullName(deletedPod)
if m == nil ||
len(m.matchingPodList) == 0 ||
deletedPod.Namespace != m.matchingPodList[0].Namespace {
return
}
for i, pod := range m.matchingPodList {
if schedutil.GetPodFullName(pod) == deletedPodFullName {
m.matchingPodList = append(m.matchingPodList[:i], m.matchingPodList[i+1:]...)
break
}
}
}
func (m *serviceAffinityMetadata) clone() *serviceAffinityMetadata {
if m == nil {
return nil
}
copy := serviceAffinityMetadata{}
copy.matchingPodServices = append([]*v1.Service(nil),
m.matchingPodServices...)
copy.matchingPodList = append([]*v1.Pod(nil),
m.matchingPodList...)
return &copy
}
type podAffinityMetadata struct {
topologyPairsAntiAffinityPodsMap *topologyPairsMaps
// A map of topology pairs to a list of Pods that can potentially match
// the affinity terms of the "pod" and its inverse.
@ -139,9 +182,70 @@ type predicateMetadata struct {
// A map of topology pairs to a list of Pods that can potentially match
// the anti-affinity terms of the "pod" and its inverse.
topologyPairsPotentialAntiAffinityPods *topologyPairsMaps
serviceAffinityInUse bool
serviceAffinityMatchingPodList []*v1.Pod
serviceAffinityMatchingPodServices []*v1.Service
}
func (m *podAffinityMetadata) addPod(addedPod *v1.Pod, pod *v1.Pod, node *v1.Node) error {
// Add matching anti-affinity terms of the addedPod to the map.
topologyPairsMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(pod, addedPod, node)
if err != nil {
return err
}
m.topologyPairsAntiAffinityPodsMap.appendMaps(topologyPairsMaps)
// Add the pod to nodeNameToMatchingAffinityPods and nodeNameToMatchingAntiAffinityPods if needed.
affinity := pod.Spec.Affinity
podNodeName := addedPod.Spec.NodeName
if affinity != nil && len(podNodeName) > 0 {
// It is assumed that when the added pod matches affinity of the pod, all the terms must match,
// this should be changed when the implementation of targetPodMatchesAffinityOfPod/podMatchesAffinityTermProperties
// is changed
if targetPodMatchesAffinityOfPod(pod, addedPod) {
affinityTerms := GetPodAffinityTerms(affinity.PodAffinity)
for _, term := range affinityTerms {
if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
m.topologyPairsPotentialAffinityPods.addTopologyPair(pair, addedPod)
}
}
}
if targetPodMatchesAntiAffinityOfPod(pod, addedPod) {
antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity)
for _, term := range antiAffinityTerms {
if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
m.topologyPairsPotentialAntiAffinityPods.addTopologyPair(pair, addedPod)
}
}
}
}
return nil
}
func (m *podAffinityMetadata) removePod(deletedPod *v1.Pod) {
if m == nil {
return
}
m.topologyPairsAntiAffinityPodsMap.removePod(deletedPod)
// Delete pod from the matching affinity or anti-affinity topology pairs maps.
m.topologyPairsPotentialAffinityPods.removePod(deletedPod)
m.topologyPairsPotentialAntiAffinityPods.removePod(deletedPod)
}
func (m *podAffinityMetadata) clone() *podAffinityMetadata {
if m == nil {
return nil
}
copy := podAffinityMetadata{}
copy.topologyPairsPotentialAffinityPods = m.topologyPairsPotentialAffinityPods.clone()
copy.topologyPairsPotentialAntiAffinityPods = m.topologyPairsPotentialAntiAffinityPods.clone()
copy.topologyPairsAntiAffinityPodsMap = m.topologyPairsAntiAffinityPodsMap.clone()
return &copy
}
type podFitsResourcesMetadata struct {
// ignoredExtendedResources is a set of extended resource names that will
// be ignored in the PodFitsResources predicate.
//
@ -149,9 +253,50 @@ type predicateMetadata struct {
// which should be accounted only by the extenders. This set is synthesized
// from scheduler extender configuration and does not change per pod.
ignoredExtendedResources sets.String
// podSpreadCache holds info of the minimum match number on each topology spread constraint,
podRequest *schedulernodeinfo.Resource
}
func (m *podFitsResourcesMetadata) clone() *podFitsResourcesMetadata {
if m == nil {
return nil
}
copy := podFitsResourcesMetadata{}
copy.ignoredExtendedResources = m.ignoredExtendedResources
copy.podRequest = m.podRequest
return &copy
}
type podFitsHostPortsMetadata struct {
podPorts []*v1.ContainerPort
}
func (m *podFitsHostPortsMetadata) clone() *podFitsHostPortsMetadata {
if m == nil {
return nil
}
copy := podFitsHostPortsMetadata{}
copy.podPorts = append([]*v1.ContainerPort(nil), m.podPorts...)
return &copy
}
// NOTE: When new fields are added/removed or logic is changed, please make sure that
// RemovePod, AddPod, and ShallowCopy functions are updated to work with the new changes.
type predicateMetadata struct {
pod *v1.Pod
podBestEffort bool
// evenPodsSpreadMetadata holds info of the minimum match number on each topology spread constraint,
// and the match number of all valid topology pairs.
podSpreadCache *podSpreadCache
evenPodsSpreadMetadata *evenPodsSpreadMetadata
serviceAffinityMetadata *serviceAffinityMetadata
podAffinityMetadata *podAffinityMetadata
podFitsResourcesMetadata *podFitsResourcesMetadata
podFitsHostPortsMetadata *podFitsHostPortsMetadata
}
// Ensure that predicateMetadata implements algorithm.PredicateMetadata.
@ -180,7 +325,7 @@ func EmptyPredicateMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*sche
// See the comments in "predicateMetadata" for the explanation of the options.
func RegisterPredicateMetadataProducerWithExtendedResourceOptions(ignoredExtendedResources sets.String) {
RegisterPredicateMetadataProducer("PredicateWithExtendedResourceOptions", func(pm *predicateMetadata) {
pm.ignoredExtendedResources = ignoredExtendedResources
pm.podFitsResourcesMetadata.ignoredExtendedResources = ignoredExtendedResources
})
}
@ -190,35 +335,27 @@ func GetPredicateMetadata(pod *v1.Pod, nodeNameToInfoMap map[string]*schedulerno
if pod == nil {
return nil
}
// existingPodSpreadCache represents how existing pods match "pod"
// evenPodsSpreadMetadata represents how existing pods match "pod"
// on its spread constraints
existingPodSpreadCache, err := getExistingPodSpreadCache(pod, nodeNameToInfoMap)
evenPodsSpreadMetadata, err := getEvenPodsSpreadMetadata(pod, nodeNameToInfoMap)
if err != nil {
klog.Errorf("Error calculating spreadConstraintsMap: %v", err)
return nil
}
// existingPodAntiAffinityMap will be used later for efficient check on existing pods' anti-affinity
existingPodAntiAffinityMap, err := getTPMapMatchingExistingAntiAffinity(pod, nodeNameToInfoMap)
podAffinityMetadata, err := getPodAffinityMetadata(pod, nodeNameToInfoMap)
if err != nil {
klog.Errorf("Error calculating existingPodAntiAffinityMap: %v", err)
return nil
}
// incomingPodAffinityMap will be used later for efficient check on incoming pod's affinity
// incomingPodAntiAffinityMap will be used later for efficient check on incoming pod's anti-affinity
incomingPodAffinityMap, incomingPodAntiAffinityMap, err := getTPMapMatchingIncomingAffinityAntiAffinity(pod, nodeNameToInfoMap)
if err != nil {
klog.Errorf("Error calculating incomingPod(Anti)AffinityMap: %v", err)
klog.Errorf("Error calculating podAffinityMetadata: %v", err)
return nil
}
predicateMetadata := &predicateMetadata{
pod: pod,
podBestEffort: isPodBestEffort(pod),
podRequest: GetResourceRequest(pod),
podPorts: schedutil.GetContainerPorts(pod),
topologyPairsPotentialAffinityPods: incomingPodAffinityMap,
topologyPairsPotentialAntiAffinityPods: incomingPodAntiAffinityMap,
topologyPairsAntiAffinityPodsMap: existingPodAntiAffinityMap,
podSpreadCache: existingPodSpreadCache,
pod: pod,
podBestEffort: isPodBestEffort(pod),
evenPodsSpreadMetadata: evenPodsSpreadMetadata,
podAffinityMetadata: podAffinityMetadata,
podFitsResourcesMetadata: getPodFitsResourcesMetedata(pod),
podFitsHostPortsMetadata: getPodFitsHostPortsMetadata(pod),
}
for predicateName, precomputeFunc := range predicateMetadataProducers {
klog.V(10).Infof("Precompute: %v", predicateName)
@ -227,7 +364,39 @@ func GetPredicateMetadata(pod *v1.Pod, nodeNameToInfoMap map[string]*schedulerno
return predicateMetadata
}
func getExistingPodSpreadCache(pod *v1.Pod, nodeInfoMap map[string]*schedulernodeinfo.NodeInfo) (*podSpreadCache, error) {
func getPodFitsHostPortsMetadata(pod *v1.Pod) *podFitsHostPortsMetadata {
return &podFitsHostPortsMetadata{
podPorts: schedutil.GetContainerPorts(pod),
}
}
func getPodFitsResourcesMetedata(pod *v1.Pod) *podFitsResourcesMetadata {
return &podFitsResourcesMetadata{
podRequest: GetResourceRequest(pod),
}
}
func getPodAffinityMetadata(pod *v1.Pod, nodeNameToInfoMap map[string]*schedulernodeinfo.NodeInfo) (*podAffinityMetadata, error) {
// existingPodAntiAffinityMap will be used later for efficient check on existing pods' anti-affinity
existingPodAntiAffinityMap, err := getTPMapMatchingExistingAntiAffinity(pod, nodeNameToInfoMap)
if err != nil {
return nil, err
}
// incomingPodAffinityMap will be used later for efficient check on incoming pod's affinity
// incomingPodAntiAffinityMap will be used later for efficient check on incoming pod's anti-affinity
incomingPodAffinityMap, incomingPodAntiAffinityMap, err := getTPMapMatchingIncomingAffinityAntiAffinity(pod, nodeNameToInfoMap)
if err != nil {
return nil, err
}
return &podAffinityMetadata{
topologyPairsPotentialAffinityPods: incomingPodAffinityMap,
topologyPairsPotentialAntiAffinityPods: incomingPodAntiAffinityMap,
topologyPairsAntiAffinityPodsMap: existingPodAntiAffinityMap,
}, nil
}
func getEvenPodsSpreadMetadata(pod *v1.Pod, nodeInfoMap map[string]*schedulernodeinfo.NodeInfo) (*evenPodsSpreadMetadata, error) {
// We have feature gating in APIServer to strip the spec
// so don't need to re-check feature gate, just check length of constraints.
constraints := getHardTopologySpreadConstraints(pod)
@ -245,7 +414,7 @@ func getExistingPodSpreadCache(pod *v1.Pod, nodeInfoMap map[string]*schedulernod
// TODO(Huang-Wei): It might be possible to use "make(map[topologyPair]*int32)".
// In that case, need to consider how to init each tpPairToCount[pair] in an atomic fashion.
m := podSpreadCache{
m := evenPodsSpreadMetadata{
tpKeyToCriticalPaths: make(map[string]*criticalPaths, len(constraints)),
tpPairToMatchNum: make(map[topologyPair]int32),
}
@ -396,15 +565,15 @@ func (m *topologyPairsMaps) clone() *topologyPairsMaps {
return copy
}
func (c *podSpreadCache) addPod(addedPod, preemptorPod *v1.Pod, node *v1.Node) error {
func (c *evenPodsSpreadMetadata) addPod(addedPod, preemptorPod *v1.Pod, node *v1.Node) error {
return c.updatePod(addedPod, preemptorPod, node, 1)
}
func (c *podSpreadCache) removePod(deletedPod, preemptorPod *v1.Pod, node *v1.Node) error {
func (c *evenPodsSpreadMetadata) removePod(deletedPod, preemptorPod *v1.Pod, node *v1.Node) error {
return c.updatePod(deletedPod, preemptorPod, node, -1)
}
func (c *podSpreadCache) updatePod(updatedPod, preemptorPod *v1.Pod, node *v1.Node, delta int32) error {
func (c *evenPodsSpreadMetadata) updatePod(updatedPod, preemptorPod *v1.Pod, node *v1.Node, delta int32) error {
if updatedPod.Namespace != preemptorPod.Namespace || node == nil {
return nil
}
@ -430,12 +599,12 @@ func (c *podSpreadCache) updatePod(updatedPod, preemptorPod *v1.Pod, node *v1.No
return nil
}
func (c *podSpreadCache) clone() *podSpreadCache {
func (c *evenPodsSpreadMetadata) clone() *evenPodsSpreadMetadata {
// c could be nil when EvenPodsSpread feature is disabled
if c == nil {
return nil
}
copy := podSpreadCache{
copy := evenPodsSpreadMetadata{
tpKeyToCriticalPaths: make(map[string]*criticalPaths),
tpPairToMatchNum: make(map[topologyPair]int32),
}
@ -456,29 +625,13 @@ func (meta *predicateMetadata) RemovePod(deletedPod *v1.Pod, node *v1.Node) erro
if deletedPodFullName == schedutil.GetPodFullName(meta.pod) {
return fmt.Errorf("deletedPod and meta.pod must not be the same")
}
meta.topologyPairsAntiAffinityPodsMap.removePod(deletedPod)
// Delete pod from the matching affinity or anti-affinity topology pairs maps.
meta.topologyPairsPotentialAffinityPods.removePod(deletedPod)
meta.topologyPairsPotentialAntiAffinityPods.removePod(deletedPod)
meta.podAffinityMetadata.removePod(deletedPod)
// Delete pod from the pod spread topology maps.
if err := meta.podSpreadCache.removePod(deletedPod, meta.pod, node); err != nil {
if err := meta.evenPodsSpreadMetadata.removePod(deletedPod, meta.pod, node); err != nil {
return err
}
// All pods in the serviceAffinityMatchingPodList are in the same namespace.
// So, if the namespace of the first one is not the same as the namespace of the
// deletedPod, we don't need to check the list, as deletedPod isn't in the list.
if meta.serviceAffinityInUse &&
len(meta.serviceAffinityMatchingPodList) > 0 &&
deletedPod.Namespace == meta.serviceAffinityMatchingPodList[0].Namespace {
for i, pod := range meta.serviceAffinityMatchingPodList {
if schedutil.GetPodFullName(pod) == deletedPodFullName {
meta.serviceAffinityMatchingPodList = append(
meta.serviceAffinityMatchingPodList[:i],
meta.serviceAffinityMatchingPodList[i+1:]...)
break
}
}
}
meta.serviceAffinityMetadata.removePod(deletedPod, node)
return nil
}
@ -492,53 +645,18 @@ func (meta *predicateMetadata) AddPod(addedPod *v1.Pod, node *v1.Node) error {
if node == nil {
return fmt.Errorf("node not found")
}
// Add matching anti-affinity terms of the addedPod to the map.
topologyPairsMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(meta.pod, addedPod, node)
if err != nil {
if err := meta.podAffinityMetadata.addPod(addedPod, meta.pod, node); err != nil {
return err
}
meta.topologyPairsAntiAffinityPodsMap.appendMaps(topologyPairsMaps)
// Add the pod to nodeNameToMatchingAffinityPods and nodeNameToMatchingAntiAffinityPods if needed.
affinity := meta.pod.Spec.Affinity
podNodeName := addedPod.Spec.NodeName
if affinity != nil && len(podNodeName) > 0 {
// It is assumed that when the added pod matches affinity of the meta.pod, all the terms must match,
// this should be changed when the implementation of targetPodMatchesAffinityOfPod/podMatchesAffinityTermProperties
// is changed
if targetPodMatchesAffinityOfPod(meta.pod, addedPod) {
affinityTerms := GetPodAffinityTerms(affinity.PodAffinity)
for _, term := range affinityTerms {
if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
meta.topologyPairsPotentialAffinityPods.addTopologyPair(pair, addedPod)
}
}
}
if targetPodMatchesAntiAffinityOfPod(meta.pod, addedPod) {
antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity)
for _, term := range antiAffinityTerms {
if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
meta.topologyPairsPotentialAntiAffinityPods.addTopologyPair(pair, addedPod)
}
}
}
}
// Update meta.podSpreadCache if meta.pod has hard spread constraints
// Update meta.evenPodsSpreadMetadata if meta.pod has hard spread constraints
// and addedPod matches that
if err := meta.podSpreadCache.addPod(addedPod, meta.pod, node); err != nil {
if err := meta.evenPodsSpreadMetadata.addPod(addedPod, meta.pod, node); err != nil {
return err
}
// If addedPod is in the same namespace as the meta.pod, update the list
// of matching pods if applicable.
if meta.serviceAffinityInUse && addedPod.Namespace == meta.pod.Namespace {
selector := CreateSelectorFromLabels(meta.pod.Labels)
if selector.Matches(labels.Set(addedPod.Labels)) {
meta.serviceAffinityMatchingPodList = append(meta.serviceAffinityMatchingPodList,
addedPod)
}
}
meta.serviceAffinityMetadata.addPod(addedPod, meta.pod, node)
return nil
}
@ -546,21 +664,14 @@ func (meta *predicateMetadata) AddPod(addedPod *v1.Pod, node *v1.Node) error {
// its maps and slices, but it does not copy the contents of pointer values.
func (meta *predicateMetadata) ShallowCopy() PredicateMetadata {
newPredMeta := &predicateMetadata{
pod: meta.pod,
podBestEffort: meta.podBestEffort,
podRequest: meta.podRequest,
serviceAffinityInUse: meta.serviceAffinityInUse,
ignoredExtendedResources: meta.ignoredExtendedResources,
pod: meta.pod,
podBestEffort: meta.podBestEffort,
}
newPredMeta.podPorts = append([]*v1.ContainerPort(nil), meta.podPorts...)
newPredMeta.topologyPairsPotentialAffinityPods = meta.topologyPairsPotentialAffinityPods.clone()
newPredMeta.topologyPairsPotentialAntiAffinityPods = meta.topologyPairsPotentialAntiAffinityPods.clone()
newPredMeta.topologyPairsAntiAffinityPodsMap = meta.topologyPairsAntiAffinityPodsMap.clone()
newPredMeta.podSpreadCache = meta.podSpreadCache.clone()
newPredMeta.serviceAffinityMatchingPodServices = append([]*v1.Service(nil),
meta.serviceAffinityMatchingPodServices...)
newPredMeta.serviceAffinityMatchingPodList = append([]*v1.Pod(nil),
meta.serviceAffinityMatchingPodList...)
newPredMeta.podFitsHostPortsMetadata = meta.podFitsHostPortsMetadata.clone()
newPredMeta.podAffinityMetadata = meta.podAffinityMetadata.clone()
newPredMeta.evenPodsSpreadMetadata = meta.evenPodsSpreadMetadata.clone()
newPredMeta.serviceAffinityMetadata = meta.serviceAffinityMetadata.clone()
newPredMeta.podFitsResourcesMetadata = meta.podFitsResourcesMetadata.clone()
return (PredicateMetadata)(newPredMeta)
}

View File

@ -22,7 +22,7 @@ import (
"sort"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
@ -62,41 +62,38 @@ func predicateMetadataEquivalent(meta1, meta2 *predicateMetadata) error {
if meta1.podBestEffort != meta2.podBestEffort {
return fmt.Errorf("podBestEfforts are not equal")
}
if meta1.serviceAffinityInUse != meta2.serviceAffinityInUse {
return fmt.Errorf("serviceAffinityInUses are not equal")
}
if len(meta1.podPorts) != len(meta2.podPorts) {
if len(meta1.podFitsHostPortsMetadata.podPorts) != len(meta2.podFitsHostPortsMetadata.podPorts) {
return fmt.Errorf("podPorts are not equal")
}
for !reflect.DeepEqual(meta1.podPorts, meta2.podPorts) {
for !reflect.DeepEqual(meta1.podFitsHostPortsMetadata.podPorts, meta2.podFitsHostPortsMetadata.podPorts) {
return fmt.Errorf("podPorts are not equal")
}
if !reflect.DeepEqual(meta1.topologyPairsPotentialAffinityPods, meta2.topologyPairsPotentialAffinityPods) {
if !reflect.DeepEqual(meta1.podAffinityMetadata.topologyPairsPotentialAffinityPods, meta2.podAffinityMetadata.topologyPairsPotentialAffinityPods) {
return fmt.Errorf("topologyPairsPotentialAffinityPods are not equal")
}
if !reflect.DeepEqual(meta1.topologyPairsPotentialAntiAffinityPods, meta2.topologyPairsPotentialAntiAffinityPods) {
if !reflect.DeepEqual(meta1.podAffinityMetadata.topologyPairsPotentialAntiAffinityPods, meta2.podAffinityMetadata.topologyPairsPotentialAntiAffinityPods) {
return fmt.Errorf("topologyPairsPotentialAntiAffinityPods are not equal")
}
if !reflect.DeepEqual(meta1.topologyPairsAntiAffinityPodsMap.podToTopologyPairs,
meta2.topologyPairsAntiAffinityPodsMap.podToTopologyPairs) {
if !reflect.DeepEqual(meta1.podAffinityMetadata.topologyPairsAntiAffinityPodsMap.podToTopologyPairs,
meta2.podAffinityMetadata.topologyPairsAntiAffinityPodsMap.podToTopologyPairs) {
return fmt.Errorf("topologyPairsAntiAffinityPodsMap.podToTopologyPairs are not equal")
}
if !reflect.DeepEqual(meta1.topologyPairsAntiAffinityPodsMap.topologyPairToPods,
meta2.topologyPairsAntiAffinityPodsMap.topologyPairToPods) {
if !reflect.DeepEqual(meta1.podAffinityMetadata.topologyPairsAntiAffinityPodsMap.topologyPairToPods,
meta2.podAffinityMetadata.topologyPairsAntiAffinityPodsMap.topologyPairToPods) {
return fmt.Errorf("topologyPairsAntiAffinityPodsMap.topologyPairToPods are not equal")
}
if meta1.serviceAffinityInUse {
sortablePods1 := sortablePods(meta1.serviceAffinityMatchingPodList)
if meta1.serviceAffinityMetadata != nil {
sortablePods1 := sortablePods(meta1.serviceAffinityMetadata.matchingPodList)
sort.Sort(sortablePods1)
sortablePods2 := sortablePods(meta2.serviceAffinityMatchingPodList)
sortablePods2 := sortablePods(meta2.serviceAffinityMetadata.matchingPodList)
sort.Sort(sortablePods2)
if !reflect.DeepEqual(sortablePods1, sortablePods2) {
return fmt.Errorf("serviceAffinityMatchingPodLists are not euqal")
}
sortableServices1 := sortableServices(meta1.serviceAffinityMatchingPodServices)
sortableServices1 := sortableServices(meta1.serviceAffinityMetadata.matchingPodServices)
sort.Sort(sortableServices1)
sortableServices2 := sortableServices(meta2.serviceAffinityMatchingPodServices)
sortableServices2 := sortableServices(meta2.serviceAffinityMetadata.matchingPodServices)
sort.Sort(sortableServices2)
if !reflect.DeepEqual(sortableServices1, sortableServices2) {
return fmt.Errorf("serviceAffinityMatchingPodServices are not euqal")
@ -407,111 +404,117 @@ func TestPredicateMetadata_ShallowCopy(t *testing.T) {
},
},
podBestEffort: true,
podRequest: &schedulernodeinfo.Resource{
MilliCPU: 1000,
Memory: 300,
AllowedPodNumber: 4,
},
podPorts: []*v1.ContainerPort{
{
Name: "name",
HostPort: 10,
ContainerPort: 20,
Protocol: "TCP",
HostIP: "1.2.3.4",
podFitsResourcesMetadata: &podFitsResourcesMetadata{
podRequest: &schedulernodeinfo.Resource{
MilliCPU: 1000,
Memory: 300,
AllowedPodNumber: 4,
},
},
topologyPairsAntiAffinityPodsMap: &topologyPairsMaps{
topologyPairToPods: map[topologyPair]podSet{
{key: "name", value: "machine1"}: {
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p2", Labels: selector1},
Spec: v1.PodSpec{NodeName: "nodeC"},
}: struct{}{},
},
{key: "name", value: "machine2"}: {
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: selector1},
Spec: v1.PodSpec{NodeName: "nodeA"},
}: struct{}{},
},
},
podToTopologyPairs: map[string]topologyPairSet{
"p2_": {
topologyPair{key: "name", value: "machine1"}: struct{}{},
},
"p1_": {
topologyPair{key: "name", value: "machine2"}: struct{}{},
podFitsHostPortsMetadata: &podFitsHostPortsMetadata{
podPorts: []*v1.ContainerPort{
{
Name: "name",
HostPort: 10,
ContainerPort: 20,
Protocol: "TCP",
HostIP: "1.2.3.4",
},
},
},
topologyPairsPotentialAffinityPods: &topologyPairsMaps{
topologyPairToPods: map[topologyPair]podSet{
{key: "name", value: "nodeA"}: {
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: selector1},
Spec: v1.PodSpec{NodeName: "nodeA"},
}: struct{}{},
podAffinityMetadata: &podAffinityMetadata{
topologyPairsAntiAffinityPodsMap: &topologyPairsMaps{
topologyPairToPods: map[topologyPair]podSet{
{key: "name", value: "machine1"}: {
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p2", Labels: selector1},
Spec: v1.PodSpec{NodeName: "nodeC"},
}: struct{}{},
},
{key: "name", value: "machine2"}: {
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: selector1},
Spec: v1.PodSpec{NodeName: "nodeA"},
}: struct{}{},
},
},
{key: "name", value: "nodeC"}: {
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p2"},
Spec: v1.PodSpec{
NodeName: "nodeC",
},
}: struct{}{},
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p6", Labels: selector1},
Spec: v1.PodSpec{NodeName: "nodeC"},
}: struct{}{},
podToTopologyPairs: map[string]topologyPairSet{
"p2_": {
topologyPair{key: "name", value: "machine1"}: struct{}{},
},
"p1_": {
topologyPair{key: "name", value: "machine2"}: struct{}{},
},
},
},
podToTopologyPairs: map[string]topologyPairSet{
"p1_": {
topologyPair{key: "name", value: "nodeA"}: struct{}{},
topologyPairsPotentialAffinityPods: &topologyPairsMaps{
topologyPairToPods: map[topologyPair]podSet{
{key: "name", value: "nodeA"}: {
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: selector1},
Spec: v1.PodSpec{NodeName: "nodeA"},
}: struct{}{},
},
{key: "name", value: "nodeC"}: {
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p2"},
Spec: v1.PodSpec{
NodeName: "nodeC",
},
}: struct{}{},
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p6", Labels: selector1},
Spec: v1.PodSpec{NodeName: "nodeC"},
}: struct{}{},
},
},
"p2_": {
topologyPair{key: "name", value: "nodeC"}: struct{}{},
podToTopologyPairs: map[string]topologyPairSet{
"p1_": {
topologyPair{key: "name", value: "nodeA"}: struct{}{},
},
"p2_": {
topologyPair{key: "name", value: "nodeC"}: struct{}{},
},
"p6_": {
topologyPair{key: "name", value: "nodeC"}: struct{}{},
},
},
"p6_": {
topologyPair{key: "name", value: "nodeC"}: struct{}{},
},
topologyPairsPotentialAntiAffinityPods: &topologyPairsMaps{
topologyPairToPods: map[topologyPair]podSet{
{key: "name", value: "nodeN"}: {
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: selector1},
Spec: v1.PodSpec{NodeName: "nodeN"},
}: struct{}{},
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p2"},
Spec: v1.PodSpec{
NodeName: "nodeM",
},
}: struct{}{},
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p3"},
Spec: v1.PodSpec{
NodeName: "nodeM",
},
}: struct{}{},
},
{key: "name", value: "nodeM"}: {
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p6", Labels: selector1},
Spec: v1.PodSpec{NodeName: "nodeM"},
}: struct{}{},
},
},
podToTopologyPairs: map[string]topologyPairSet{
"p1_": {
topologyPair{key: "name", value: "nodeN"}: struct{}{},
},
"p2_": {
topologyPair{key: "name", value: "nodeN"}: struct{}{},
},
"p3_": {
topologyPair{key: "name", value: "nodeN"}: struct{}{},
},
"p6_": {
topologyPair{key: "name", value: "nodeM"}: struct{}{},
},
},
},
},
topologyPairsPotentialAntiAffinityPods: &topologyPairsMaps{
topologyPairToPods: map[topologyPair]podSet{
{key: "name", value: "nodeN"}: {
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: selector1},
Spec: v1.PodSpec{NodeName: "nodeN"},
}: struct{}{},
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p2"},
Spec: v1.PodSpec{
NodeName: "nodeM",
},
}: struct{}{},
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p3"},
Spec: v1.PodSpec{
NodeName: "nodeM",
},
}: struct{}{},
},
{key: "name", value: "nodeM"}: {
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p6", Labels: selector1},
Spec: v1.PodSpec{NodeName: "nodeM"},
}: struct{}{},
},
},
podToTopologyPairs: map[string]topologyPairSet{
"p1_": {
topologyPair{key: "name", value: "nodeN"}: struct{}{},
},
"p2_": {
topologyPair{key: "name", value: "nodeN"}: struct{}{},
},
"p3_": {
topologyPair{key: "name", value: "nodeN"}: struct{}{},
},
"p6_": {
topologyPair{key: "name", value: "nodeM"}: struct{}{},
},
},
},
podSpreadCache: &podSpreadCache{
evenPodsSpreadMetadata: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"name": {{"nodeA", 1}, {"nodeC", 2}},
},
@ -520,13 +523,14 @@ func TestPredicateMetadata_ShallowCopy(t *testing.T) {
{key: "name", value: "nodeC"}: 2,
},
},
serviceAffinityInUse: true,
serviceAffinityMatchingPodList: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "pod1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "pod2"}},
},
serviceAffinityMatchingPodServices: []*v1.Service{
{ObjectMeta: metav1.ObjectMeta{Name: "service1"}},
serviceAffinityMetadata: &serviceAffinityMetadata{
matchingPodList: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "pod1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "pod2"}},
},
matchingPodServices: []*v1.Service{
{ObjectMeta: metav1.ObjectMeta{Name: "service1"}},
},
},
}
@ -896,7 +900,7 @@ func TestGetTPMapMatchingSpreadConstraints(t *testing.T) {
pod *v1.Pod
nodes []*v1.Node
existingPods []*v1.Pod
want *podSpreadCache
want *evenPodsSpreadMetadata
}{
{
name: "clean cluster with one spreadConstraint",
@ -909,7 +913,7 @@ func TestGetTPMapMatchingSpreadConstraints(t *testing.T) {
st.MakeNode().Name("node-x").Label("zone", "zone2").Label("node", "node-x").Obj(),
st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(),
},
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"zone": {{"zone1", 0}, {"zone2", 0}},
},
@ -937,7 +941,7 @@ func TestGetTPMapMatchingSpreadConstraints(t *testing.T) {
st.MakePod().Name("p-y1").Node("node-y").Label("foo", "").Obj(),
st.MakePod().Name("p-y2").Node("node-y").Label("foo", "").Obj(),
},
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"zone": {{"zone2", 2}, {"zone1", 3}},
},
@ -967,7 +971,7 @@ func TestGetTPMapMatchingSpreadConstraints(t *testing.T) {
st.MakePod().Name("p-y1").Node("node-y").Label("foo", "").Obj(),
st.MakePod().Name("p-y2").Node("node-y").Label("foo", "").Obj(),
},
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"zone": {{"zone3", 0}, {"zone2", 2}},
},
@ -996,7 +1000,7 @@ func TestGetTPMapMatchingSpreadConstraints(t *testing.T) {
st.MakePod().Name("p-y1").Namespace("ns2").Node("node-y").Label("foo", "").Obj(),
st.MakePod().Name("p-y2").Node("node-y").Label("foo", "").Obj(),
},
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"zone": {{"zone2", 1}, {"zone1", 2}},
},
@ -1027,7 +1031,7 @@ func TestGetTPMapMatchingSpreadConstraints(t *testing.T) {
st.MakePod().Name("p-y3").Node("node-y").Label("foo", "").Obj(),
st.MakePod().Name("p-y4").Node("node-y").Label("foo", "").Obj(),
},
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"zone": {{"zone1", 3}, {"zone2", 4}},
"node": {{"node-x", 0}, {"node-b", 1}},
@ -1064,7 +1068,7 @@ func TestGetTPMapMatchingSpreadConstraints(t *testing.T) {
st.MakePod().Name("p-y3").Node("node-y").Label("foo", "").Obj(),
st.MakePod().Name("p-y4").Node("node-y").Label("foo", "").Obj(),
},
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"zone": {{"zone1", 3}, {"zone2", 4}},
"node": {{"node-b", 1}, {"node-a", 2}},
@ -1093,7 +1097,7 @@ func TestGetTPMapMatchingSpreadConstraints(t *testing.T) {
st.MakePod().Name("p-a").Node("node-a").Label("foo", "").Obj(),
st.MakePod().Name("p-b").Node("node-b").Label("bar", "").Obj(),
},
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"zone": {{"zone2", 0}, {"zone1", 1}},
"node": {{"node-a", 0}, {"node-y", 0}},
@ -1127,7 +1131,7 @@ func TestGetTPMapMatchingSpreadConstraints(t *testing.T) {
st.MakePod().Name("p-y3").Node("node-y").Label("foo", "").Obj(),
st.MakePod().Name("p-y4").Node("node-y").Label("foo", "").Label("bar", "").Obj(),
},
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"zone": {{"zone1", 3}, {"zone2", 4}},
"node": {{"node-b", 0}, {"node-a", 1}},
@ -1163,7 +1167,7 @@ func TestGetTPMapMatchingSpreadConstraints(t *testing.T) {
st.MakePod().Name("p-y3").Node("node-y").Label("foo", "").Obj(),
st.MakePod().Name("p-y4").Node("node-y").Label("foo", "").Obj(),
},
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"zone": {{"zone1", 3}, {"zone2", 4}},
"node": {{"node-b", 1}, {"node-a", 2}},
@ -1181,10 +1185,10 @@ func TestGetTPMapMatchingSpreadConstraints(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
nodeInfoMap := schedulernodeinfo.CreateNodeNameToInfoMap(tt.existingPods, tt.nodes)
got, _ := getExistingPodSpreadCache(tt.pod, nodeInfoMap)
got, _ := getEvenPodsSpreadMetadata(tt.pod, nodeInfoMap)
got.sortCriticalPaths()
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("getExistingPodSpreadCache() = %v, want %v", *got, *tt.want)
t.Errorf("getEvenPodsSpreadMetadata() = %v, want %v", *got, *tt.want)
}
})
}
@ -1198,7 +1202,7 @@ func TestPodSpreadCache_addPod(t *testing.T) {
existingPods []*v1.Pod
nodeIdx int // denotes which node 'addedPod' belongs to
nodes []*v1.Node
want *podSpreadCache
want *evenPodsSpreadMetadata
}{
{
name: "node a and b both impact current min match",
@ -1212,7 +1216,7 @@ func TestPodSpreadCache_addPod(t *testing.T) {
st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(),
st.MakeNode().Name("node-b").Label("zone", "zone1").Label("node", "node-b").Obj(),
},
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"node": {{"node-b", 0}, {"node-a", 1}},
},
@ -1236,7 +1240,7 @@ func TestPodSpreadCache_addPod(t *testing.T) {
st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(),
st.MakeNode().Name("node-b").Label("zone", "zone1").Label("node", "node-b").Obj(),
},
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"node": {{"node-a", 1}, {"node-b", 1}},
},
@ -1260,7 +1264,7 @@ func TestPodSpreadCache_addPod(t *testing.T) {
st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(),
st.MakeNode().Name("node-b").Label("zone", "zone1").Label("node", "node-b").Obj(),
},
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"node": {{"node-a", 0}, {"node-b", 1}},
},
@ -1284,7 +1288,7 @@ func TestPodSpreadCache_addPod(t *testing.T) {
st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(),
st.MakeNode().Name("node-b").Label("zone", "zone1").Label("node", "node-b").Obj(),
},
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"node": {{"node-a", 0}, {"node-b", 2}},
},
@ -1307,7 +1311,7 @@ func TestPodSpreadCache_addPod(t *testing.T) {
st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(),
st.MakeNode().Name("node-x").Label("zone", "zone2").Label("node", "node-x").Obj(),
},
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"zone": {{"zone2", 0}, {"zone1", 1}},
"node": {{"node-x", 0}, {"node-a", 1}},
@ -1335,7 +1339,7 @@ func TestPodSpreadCache_addPod(t *testing.T) {
st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(),
st.MakeNode().Name("node-x").Label("zone", "zone2").Label("node", "node-x").Obj(),
},
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"zone": {{"zone1", 1}, {"zone2", 1}},
"node": {{"node-a", 1}, {"node-x", 1}},
@ -1366,7 +1370,7 @@ func TestPodSpreadCache_addPod(t *testing.T) {
st.MakeNode().Name("node-b").Label("zone", "zone1").Label("node", "node-b").Obj(),
st.MakeNode().Name("node-x").Label("zone", "zone2").Label("node", "node-x").Obj(),
},
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"zone": {{"zone2", 1}, {"zone1", 3}},
"node": {{"node-a", 1}, {"node-x", 1}},
@ -1398,7 +1402,7 @@ func TestPodSpreadCache_addPod(t *testing.T) {
st.MakeNode().Name("node-b").Label("zone", "zone1").Label("node", "node-b").Obj(),
st.MakeNode().Name("node-x").Label("zone", "zone2").Label("node", "node-x").Obj(),
},
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"zone": {{"zone2", 1}, {"zone1", 2}},
"node": {{"node-a", 0}, {"node-b", 1}},
@ -1430,7 +1434,7 @@ func TestPodSpreadCache_addPod(t *testing.T) {
st.MakeNode().Name("node-b").Label("zone", "zone1").Label("node", "node-b").Obj(),
st.MakeNode().Name("node-x").Label("zone", "zone2").Label("node", "node-x").Obj(),
},
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"zone": {{"zone1", 1}, {"zone2", 1}},
"node": {{"node-a", 1}, {"node-b", 1}},
@ -1448,12 +1452,12 @@ func TestPodSpreadCache_addPod(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
nodeInfoMap := schedulernodeinfo.CreateNodeNameToInfoMap(tt.existingPods, tt.nodes)
podSpreadCache, _ := getExistingPodSpreadCache(tt.preemptor, nodeInfoMap)
evenPodsSpreadMetadata, _ := getEvenPodsSpreadMetadata(tt.preemptor, nodeInfoMap)
podSpreadCache.addPod(tt.addedPod, tt.preemptor, tt.nodes[tt.nodeIdx])
podSpreadCache.sortCriticalPaths()
if !reflect.DeepEqual(podSpreadCache, tt.want) {
t.Errorf("podSpreadCache#addPod() = %v, want %v", podSpreadCache, tt.want)
evenPodsSpreadMetadata.addPod(tt.addedPod, tt.preemptor, tt.nodes[tt.nodeIdx])
evenPodsSpreadMetadata.sortCriticalPaths()
if !reflect.DeepEqual(evenPodsSpreadMetadata, tt.want) {
t.Errorf("evenPodsSpreadMetadata#addPod() = %v, want %v", evenPodsSpreadMetadata, tt.want)
}
})
}
@ -1468,7 +1472,7 @@ func TestPodSpreadCache_removePod(t *testing.T) {
deletedPodIdx int // need to reuse *Pod of existingPods[i]
deletedPod *v1.Pod // this field is used only when deletedPodIdx is -1
nodeIdx int // denotes which node "deletedPod" belongs to
want *podSpreadCache
want *evenPodsSpreadMetadata
}{
{
// A high priority pod may not be scheduled due to node taints or resource shortage.
@ -1489,7 +1493,7 @@ func TestPodSpreadCache_removePod(t *testing.T) {
},
deletedPodIdx: 0, // remove pod "p-a1"
nodeIdx: 0, // node-a
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"zone": {{"zone1", 1}, {"zone2", 1}},
},
@ -1518,7 +1522,7 @@ func TestPodSpreadCache_removePod(t *testing.T) {
},
deletedPodIdx: 0, // remove pod "p-a1"
nodeIdx: 0, // node-a
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"zone": {{"zone1", 1}, {"zone2", 2}},
},
@ -1548,7 +1552,7 @@ func TestPodSpreadCache_removePod(t *testing.T) {
},
deletedPodIdx: 0, // remove pod "p-a0"
nodeIdx: 0, // node-a
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"zone": {{"zone1", 2}, {"zone2", 2}},
},
@ -1578,7 +1582,7 @@ func TestPodSpreadCache_removePod(t *testing.T) {
deletedPodIdx: -1,
deletedPod: st.MakePod().Name("p-a0").Node("node-a").Label("bar", "").Obj(),
nodeIdx: 0, // node-a
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"zone": {{"zone1", 2}, {"zone2", 2}},
},
@ -1608,7 +1612,7 @@ func TestPodSpreadCache_removePod(t *testing.T) {
},
deletedPodIdx: 3, // remove pod "p-x1"
nodeIdx: 2, // node-x
want: &podSpreadCache{
want: &evenPodsSpreadMetadata{
tpKeyToCriticalPaths: map[string]*criticalPaths{
"zone": {{"zone2", 1}, {"zone1", 3}},
"node": {{"node-b", 1}, {"node-x", 1}},
@ -1626,7 +1630,7 @@ func TestPodSpreadCache_removePod(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
nodeInfoMap := schedulernodeinfo.CreateNodeNameToInfoMap(tt.existingPods, tt.nodes)
podSpreadCache, _ := getExistingPodSpreadCache(tt.preemptor, nodeInfoMap)
evenPodsSpreadMetadata, _ := getEvenPodsSpreadMetadata(tt.preemptor, nodeInfoMap)
var deletedPod *v1.Pod
if tt.deletedPodIdx < len(tt.existingPods) && tt.deletedPodIdx >= 0 {
@ -1634,10 +1638,10 @@ func TestPodSpreadCache_removePod(t *testing.T) {
} else {
deletedPod = tt.deletedPod
}
podSpreadCache.removePod(deletedPod, tt.preemptor, tt.nodes[tt.nodeIdx])
podSpreadCache.sortCriticalPaths()
if !reflect.DeepEqual(podSpreadCache, tt.want) {
t.Errorf("podSpreadCache#removePod() = %v, want %v", podSpreadCache, tt.want)
evenPodsSpreadMetadata.removePod(deletedPod, tt.preemptor, tt.nodes[tt.nodeIdx])
evenPodsSpreadMetadata.sortCriticalPaths()
if !reflect.DeepEqual(evenPodsSpreadMetadata, tt.want) {
t.Errorf("evenPodsSpreadMetadata#removePod() = %v, want %v", evenPodsSpreadMetadata, tt.want)
}
})
}
@ -1686,7 +1690,7 @@ func BenchmarkTestGetTPMapMatchingSpreadConstraints(b *testing.B) {
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(existingPods, allNodes)
b.ResetTimer()
for i := 0; i < b.N; i++ {
getExistingPodSpreadCache(tt.pod, nodeNameToInfo)
getEvenPodsSpreadMetadata(tt.pod, nodeNameToInfo)
}
})
}
@ -1698,7 +1702,7 @@ var (
)
// sortCriticalPaths is only served for testing purpose.
func (c *podSpreadCache) sortCriticalPaths() {
func (c *evenPodsSpreadMetadata) sortCriticalPaths() {
for _, paths := range c.tpKeyToCriticalPaths {
// If two paths both hold minimum matching number, and topologyValue is unordered.
if paths[0].matchNum == paths[1].matchNum && paths[0].topologyValue > paths[1].topologyValue {

View File

@ -867,10 +867,10 @@ func PodFitsResources(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulerno
ignoredExtendedResources := sets.NewString()
var podRequest *schedulernodeinfo.Resource
if predicateMeta, ok := meta.(*predicateMetadata); ok {
podRequest = predicateMeta.podRequest
if predicateMeta.ignoredExtendedResources != nil {
ignoredExtendedResources = predicateMeta.ignoredExtendedResources
if predicateMeta, ok := meta.(*predicateMetadata); ok && predicateMeta.podFitsResourcesMetadata != nil {
podRequest = predicateMeta.podFitsResourcesMetadata.podRequest
if predicateMeta.podFitsResourcesMetadata.ignoredExtendedResources != nil {
ignoredExtendedResources = predicateMeta.podFitsResourcesMetadata.ignoredExtendedResources
}
} else {
// We couldn't parse metadata - fallback to computing it.
@ -1062,10 +1062,8 @@ func (s *ServiceAffinity) serviceAffinityMetadataProducer(pm *predicateMetadata)
klog.Errorf("Cannot precompute service affinity, a pod is required to calculate service affinity.")
return
}
pm.serviceAffinityInUse = true
var err error
// Store services which match the pod.
pm.serviceAffinityMatchingPodServices, err = s.serviceLister.GetPodServices(pm.pod)
matchingPodServices, err := s.serviceLister.GetPodServices(pm.pod)
if err != nil {
klog.Errorf("Error precomputing service affinity: could not list services: %v", err)
}
@ -1076,7 +1074,11 @@ func (s *ServiceAffinity) serviceAffinityMetadataProducer(pm *predicateMetadata)
}
// consider only the pods that belong to the same namespace
pm.serviceAffinityMatchingPodList = FilterPodsByNamespace(allMatches, pm.pod.Namespace)
matchingPodList := FilterPodsByNamespace(allMatches, pm.pod.Namespace)
pm.serviceAffinityMetadata = &serviceAffinityMetadata{
matchingPodList: matchingPodList,
matchingPodServices: matchingPodServices,
}
}
// NewServiceAffinityPredicate creates a ServiceAffinity.
@ -1120,14 +1122,14 @@ func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister al
func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
var services []*v1.Service
var pods []*v1.Pod
if pm, ok := meta.(*predicateMetadata); ok && (pm.serviceAffinityMatchingPodList != nil || pm.serviceAffinityMatchingPodServices != nil) {
services = pm.serviceAffinityMatchingPodServices
pods = pm.serviceAffinityMatchingPodList
if pm, ok := meta.(*predicateMetadata); ok && pm.serviceAffinityMetadata != nil && (pm.serviceAffinityMetadata.matchingPodList != nil || pm.serviceAffinityMetadata.matchingPodServices != nil) {
services = pm.serviceAffinityMetadata.matchingPodServices
pods = pm.serviceAffinityMetadata.matchingPodList
} else {
// Make the predicate resilient in case metadata is missing.
pm = &predicateMetadata{pod: pod}
s.serviceAffinityMetadataProducer(pm)
pods, services = pm.serviceAffinityMatchingPodList, pm.serviceAffinityMatchingPodServices
pods, services = pm.serviceAffinityMetadata.matchingPodList, pm.serviceAffinityMetadata.matchingPodServices
}
filteredPods := nodeInfo.FilterOutPods(pods)
node := nodeInfo.Node()
@ -1158,8 +1160,8 @@ func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta PredicateMetada
// PodFitsHostPorts checks if a node has free ports for the requested pod ports.
func PodFitsHostPorts(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
var wantPorts []*v1.ContainerPort
if predicateMeta, ok := meta.(*predicateMetadata); ok {
wantPorts = predicateMeta.podPorts
if predicateMeta, ok := meta.(*predicateMetadata); ok && predicateMeta.podFitsHostPortsMetadata != nil {
wantPorts = predicateMeta.podFitsHostPortsMetadata.podPorts
} else {
// We couldn't parse metadata - fallback to computing it.
wantPorts = schedutil.GetContainerPorts(pod)
@ -1410,7 +1412,7 @@ func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta
}
var topologyMaps *topologyPairsMaps
if predicateMeta, ok := meta.(*predicateMetadata); ok {
topologyMaps = predicateMeta.topologyPairsAntiAffinityPodsMap
topologyMaps = predicateMeta.podAffinityMetadata.topologyPairsAntiAffinityPodsMap
} else {
// Filter out pods whose nodeName is equal to nodeInfo.node.Name, but are not
// present in nodeInfo. Pods on other nodes pass the filter.
@ -1486,7 +1488,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
}
if predicateMeta, ok := meta.(*predicateMetadata); ok {
// Check all affinity terms.
topologyPairsPotentialAffinityPods := predicateMeta.topologyPairsPotentialAffinityPods
topologyPairsPotentialAffinityPods := predicateMeta.podAffinityMetadata.topologyPairsPotentialAffinityPods
if affinityTerms := GetPodAffinityTerms(affinity.PodAffinity); len(affinityTerms) > 0 {
matchExists := c.nodeMatchesAllTopologyTerms(pod, topologyPairsPotentialAffinityPods, nodeInfo, affinityTerms)
if !matchExists {
@ -1503,7 +1505,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
}
// Check all anti-affinity terms.
topologyPairsPotentialAntiAffinityPods := predicateMeta.topologyPairsPotentialAntiAffinityPods
topologyPairsPotentialAntiAffinityPods := predicateMeta.podAffinityMetadata.topologyPairsPotentialAntiAffinityPods
if antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity); len(antiAffinityTerms) > 0 {
matchExists := c.nodeMatchesAnyTopologyTerm(pod, topologyPairsPotentialAntiAffinityPods, nodeInfo, antiAffinityTerms)
if matchExists {
@ -1783,15 +1785,15 @@ func EvenPodsSpreadPredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *sche
return true, nil, nil
}
var podSpreadCache *podSpreadCache
var evenPodsSpreadMetadata *evenPodsSpreadMetadata
if predicateMeta, ok := meta.(*predicateMetadata); ok {
podSpreadCache = predicateMeta.podSpreadCache
evenPodsSpreadMetadata = predicateMeta.evenPodsSpreadMetadata
} else { // We don't have precomputed metadata. We have to follow a slow path to check spread constraints.
// TODO(autoscaler): get it implemented
return false, nil, errors.New("metadata not pre-computed for EvenPodsSpreadPredicate")
}
if podSpreadCache == nil || len(podSpreadCache.tpPairToMatchNum) == 0 {
if evenPodsSpreadMetadata == nil || len(evenPodsSpreadMetadata.tpPairToMatchNum) == 0 {
return true, nil, nil
}
@ -1814,16 +1816,16 @@ func EvenPodsSpreadPredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *sche
}
pair := topologyPair{key: tpKey, value: tpVal}
paths, ok := podSpreadCache.tpKeyToCriticalPaths[tpKey]
paths, ok := evenPodsSpreadMetadata.tpKeyToCriticalPaths[tpKey]
if !ok {
// error which should not happen
klog.Errorf("internal error: get paths from key %q of %#v", tpKey, podSpreadCache.tpKeyToCriticalPaths)
klog.Errorf("internal error: get paths from key %q of %#v", tpKey, evenPodsSpreadMetadata.tpKeyToCriticalPaths)
continue
}
// judging criteria:
// 'existing matching num' + 'if self-match (1 or 0)' - 'global min matching num' <= 'maxSkew'
minMatchNum := paths[0].matchNum
matchNum := podSpreadCache.tpPairToMatchNum[pair]
matchNum := evenPodsSpreadMetadata.tpPairToMatchNum[pair]
skew := matchNum + selfMatchNum - minMatchNum
if skew > constraint.MaxSkew {
klog.V(5).Infof("node '%s' failed spreadConstraint[%s]: matchNum(%d) + selfMatchNum(%d) - minMatchNum(%d) > maxSkew(%d)", node.Name, tpKey, matchNum, selfMatchNum, minMatchNum, constraint.MaxSkew)