Merging selectors for spreading into one

Signed-off-by: Aldo Culquicondor <acondor@google.com>
This commit is contained in:
Aldo Culquicondor 2019-11-11 16:02:36 -05:00
parent bcb171b375
commit c35fe2c801
6 changed files with 48 additions and 44 deletions

View File

@ -92,6 +92,7 @@ go_test(
"//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
"//vendor/github.com/google/go-cmp/cmp:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library",
], ],
) )

View File

@ -58,7 +58,7 @@ type priorityMetadata struct {
podLimits *schedulernodeinfo.Resource podLimits *schedulernodeinfo.Resource
podTolerations []v1.Toleration podTolerations []v1.Toleration
affinity *v1.Affinity affinity *v1.Affinity
podSelectors []labels.Selector podSelector labels.Selector
controllerRef *metav1.OwnerReference controllerRef *metav1.OwnerReference
podFirstServiceSelector labels.Selector podFirstServiceSelector labels.Selector
totalNumNodes int totalNumNodes int
@ -88,7 +88,7 @@ func (pmf *MetadataFactory) PriorityMetadata(
podLimits: getResourceLimits(pod), podLimits: getResourceLimits(pod),
podTolerations: getAllTolerationPreferNoSchedule(pod.Spec.Tolerations), podTolerations: getAllTolerationPreferNoSchedule(pod.Spec.Tolerations),
affinity: pod.Spec.Affinity, affinity: pod.Spec.Affinity,
podSelectors: getSelectors(pod, pmf.serviceLister, pmf.controllerLister, pmf.replicaSetLister, pmf.statefulSetLister), podSelector: getSelector(pod, pmf.serviceLister, pmf.controllerLister, pmf.replicaSetLister, pmf.statefulSetLister),
controllerRef: metav1.GetControllerOf(pod), controllerRef: metav1.GetControllerOf(pod),
podFirstServiceSelector: getFirstServiceSelector(pod, pmf.serviceLister), podFirstServiceSelector: getFirstServiceSelector(pod, pmf.serviceLister),
totalNumNodes: totalNumNodes, totalNumNodes: totalNumNodes,
@ -105,37 +105,48 @@ func getFirstServiceSelector(pod *v1.Pod, sl corelisters.ServiceLister) (firstSe
return nil return nil
} }
// getSelectors returns selectors of services, RCs and RSs matching the given pod. // getSelector returns a selector for the services, RCs, RSs, and SSs matching the given pod.
func getSelectors(pod *v1.Pod, sl corelisters.ServiceLister, cl corelisters.ReplicationControllerLister, rsl appslisters.ReplicaSetLister, ssl appslisters.StatefulSetLister) []labels.Selector { func getSelector(pod *v1.Pod, sl corelisters.ServiceLister, cl corelisters.ReplicationControllerLister, rsl appslisters.ReplicaSetLister, ssl appslisters.StatefulSetLister) labels.Selector {
var selectors []labels.Selector labelSet := make(labels.Set)
// Since services, RCs, RSs and SSs match the pod, they won't have conflicting
// labels. Merging is safe.
if services, err := sl.GetPodServices(pod); err == nil { if services, err := sl.GetPodServices(pod); err == nil {
for _, service := range services { for _, service := range services {
selectors = append(selectors, labels.SelectorFromSet(service.Spec.Selector)) labelSet = labels.Merge(labelSet, service.Spec.Selector)
} }
} }
if rcs, err := cl.GetPodControllers(pod); err == nil { if rcs, err := cl.GetPodControllers(pod); err == nil {
for _, rc := range rcs { for _, rc := range rcs {
selectors = append(selectors, labels.SelectorFromSet(rc.Spec.Selector)) labelSet = labels.Merge(labelSet, rc.Spec.Selector)
} }
} }
selector := labels.NewSelector()
if len(labelSet) != 0 {
selector = labelSet.AsSelector()
}
if rss, err := rsl.GetPodReplicaSets(pod); err == nil { if rss, err := rsl.GetPodReplicaSets(pod); err == nil {
for _, rs := range rss { for _, rs := range rss {
if selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector); err == nil { if other, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector); err == nil {
selectors = append(selectors, selector) if r, ok := other.Requirements(); ok {
selector = selector.Add(r...)
}
} }
} }
} }
if sss, err := ssl.GetPodStatefulSets(pod); err == nil { if sss, err := ssl.GetPodStatefulSets(pod); err == nil {
for _, ss := range sss { for _, ss := range sss {
if selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector); err == nil { if other, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector); err == nil {
selectors = append(selectors, selector) if r, ok := other.Requirements(); ok {
selector = selector.Add(r...)
}
} }
} }
} }
return selectors return selector
} }

View File

@ -23,6 +23,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
clientsetfake "k8s.io/client-go/kubernetes/fake" clientsetfake "k8s.io/client-go/kubernetes/fake"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
@ -140,6 +141,7 @@ func TestPriorityMetadata(t *testing.T) {
podLimits: nonPodLimits, podLimits: nonPodLimits,
podTolerations: tolerations, podTolerations: tolerations,
affinity: podAffinity, affinity: podAffinity,
podSelector: labels.NewSelector(),
}, },
name: "Produce a priorityMetadata with default requests", name: "Produce a priorityMetadata with default requests",
}, },
@ -149,8 +151,9 @@ func TestPriorityMetadata(t *testing.T) {
podLimits: nonPodLimits, podLimits: nonPodLimits,
podTolerations: tolerations, podTolerations: tolerations,
affinity: nil, affinity: nil,
podSelector: labels.NewSelector(),
}, },
name: "Produce a priorityMetadata with specified requests", name: "Produce a priorityMetadata with tolerations and requests",
}, },
{ {
pod: podWithAffinityAndRequests, pod: podWithAffinityAndRequests,
@ -158,8 +161,9 @@ func TestPriorityMetadata(t *testing.T) {
podLimits: specifiedPodLimits, podLimits: specifiedPodLimits,
podTolerations: nil, podTolerations: nil,
affinity: podAffinity, affinity: podAffinity,
podSelector: labels.NewSelector(),
}, },
name: "Produce a priorityMetadata with specified requests", name: "Produce a priorityMetadata with affinity and requests",
}, },
} }
client := clientsetfake.NewSimpleClientset() client := clientsetfake.NewSimpleClientset()

View File

@ -66,7 +66,7 @@ func NewSelectorSpreadPriority(
// i.e. it pushes the scheduler towards a node where there's the smallest number of // i.e. it pushes the scheduler towards a node where there's the smallest number of
// pods which match the same service, RC,RSs or StatefulSets selectors as the pod being scheduled. // pods which match the same service, RC,RSs or StatefulSets selectors as the pod being scheduled.
func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) { func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
var selectors []labels.Selector var selector labels.Selector
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return framework.NodeScore{}, fmt.Errorf("node not found") return framework.NodeScore{}, fmt.Errorf("node not found")
@ -74,20 +74,12 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{
priorityMeta, ok := meta.(*priorityMetadata) priorityMeta, ok := meta.(*priorityMetadata)
if ok { if ok {
selectors = priorityMeta.podSelectors selector = priorityMeta.podSelector
} else { } else {
selectors = getSelectors(pod, s.serviceLister, s.controllerLister, s.replicaSetLister, s.statefulSetLister) selector = getSelector(pod, s.serviceLister, s.controllerLister, s.replicaSetLister, s.statefulSetLister)
} }
if len(selectors) == 0 { count := countMatchingPods(pod.Namespace, selector, nodeInfo)
return framework.NodeScore{
Name: node.Name,
Score: 0,
}, nil
}
count := countMatchingPods(pod.Namespace, selectors, nodeInfo)
return framework.NodeScore{ return framework.NodeScore{
Name: node.Name, Name: node.Name,
Score: int64(count), Score: int64(count),
@ -179,9 +171,9 @@ func NewServiceAntiAffinityPriority(podLister schedulerlisters.PodLister, servic
return antiAffinity.CalculateAntiAffinityPriorityMap, antiAffinity.CalculateAntiAffinityPriorityReduce return antiAffinity.CalculateAntiAffinityPriorityMap, antiAffinity.CalculateAntiAffinityPriorityReduce
} }
// countMatchingPods cout pods based on namespace and matching all selectors // countMatchingPods counts pods based on namespace and matching all selectors
func countMatchingPods(namespace string, selectors []labels.Selector, nodeInfo *schedulernodeinfo.NodeInfo) int { func countMatchingPods(namespace string, selector labels.Selector, nodeInfo *schedulernodeinfo.NodeInfo) int {
if nodeInfo.Pods() == nil || len(nodeInfo.Pods()) == 0 || len(selectors) == 0 { if nodeInfo.Pods() == nil || len(nodeInfo.Pods()) == 0 || selector.Empty() {
return 0 return 0
} }
count := 0 count := 0
@ -189,14 +181,7 @@ func countMatchingPods(namespace string, selectors []labels.Selector, nodeInfo *
// Ignore pods being deleted for spreading purposes // Ignore pods being deleted for spreading purposes
// Similar to how it is done for SelectorSpreadPriority // Similar to how it is done for SelectorSpreadPriority
if namespace == pod.Namespace && pod.DeletionTimestamp == nil { if namespace == pod.Namespace && pod.DeletionTimestamp == nil {
matches := true if selector.Matches(labels.Set(pod.Labels)) {
for _, selector := range selectors {
if !selector.Matches(labels.Set(pod.Labels)) {
matches = false
break
}
}
if matches {
count++ count++
} }
} }
@ -219,12 +204,14 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta
} else { } else {
firstServiceSelector = getFirstServiceSelector(pod, s.serviceLister) firstServiceSelector = getFirstServiceSelector(pod, s.serviceLister)
} }
//pods matched namespace,selector on current node // Pods matched namespace,selector on current node.
var selectors []labels.Selector var selector labels.Selector
if firstServiceSelector != nil { if firstServiceSelector != nil {
selectors = append(selectors, firstServiceSelector) selector = firstServiceSelector
} else {
selector = labels.NewSelector()
} }
score := countMatchingPods(pod.Namespace, selectors, nodeInfo) score := countMatchingPods(pod.Namespace, selector, nodeInfo)
return framework.NodeScore{ return framework.NodeScore{
Name: node.Name, Name: node.Name,

View File

@ -20,6 +20,7 @@ import (
"reflect" "reflect"
"testing" "testing"
"github.com/google/go-cmp/cmp"
apps "k8s.io/api/apps/v1" apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -359,8 +360,8 @@ func TestSelectorSpreadPriority(t *testing.T) {
if err != nil { if err != nil {
t.Errorf("unexpected error: %v \n", err) t.Errorf("unexpected error: %v \n", err)
} }
if !reflect.DeepEqual(test.expectedList, list) { if diff := cmp.Diff(test.expectedList, list); diff != "" {
t.Errorf("expected %#v, got %#v", test.expectedList, list) t.Errorf("wrong priorities produced (-want, +got): %s", diff)
} }
}) })
} }

View File

@ -96,7 +96,7 @@ func BenchmarkTestSelectorSpreadPriority(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
meta := &priorityMetadata{ meta := &priorityMetadata{
podSelectors: getSelectors(pod, ss.serviceLister, ss.controllerLister, ss.replicaSetLister, ss.statefulSetLister), podSelector: getSelector(pod, ss.serviceLister, ss.controllerLister, ss.replicaSetLister, ss.statefulSetLister),
} }
_, err := runMapReducePriority(ss.CalculateSpreadPriorityMap, ss.CalculateSpreadPriorityReduce, meta, pod, snapshot, filteredNodes) _, err := runMapReducePriority(ss.CalculateSpreadPriorityMap, ss.CalculateSpreadPriorityReduce, meta, pod, snapshot, filteredNodes)
if err != nil { if err != nil {