From 73ad38593a0ebd78a3e826f60ebd99c50619cc79 Mon Sep 17 00:00:00 2001 From: Aldo Culquicondor Date: Tue, 25 Feb 2020 14:44:44 -0500 Subject: [PATCH] Add default constraints to PodTopologySpread And update benchmark for even pod spreading to use default constraints Signed-off-by: Aldo Culquicondor --- .../plugins/defaultpodtopologyspread/BUILD | 5 +- .../default_pod_topology_spread.go | 55 +--- .../default_pod_topology_spread_perf_test.go | 17 +- pkg/scheduler/framework/plugins/helper/BUILD | 7 + .../framework/plugins/helper/spread.go | 95 +++++++ .../plugins/helper/spread_test.go} | 4 +- .../framework/plugins/podtopologyspread/BUILD | 10 + .../plugins/podtopologyspread/common.go | 19 ++ .../plugins/podtopologyspread/filtering.go | 40 +-- .../podtopologyspread/filtering_test.go | 267 ++++++++++++------ .../plugins/podtopologyspread/plugin.go | 121 +++++++- .../plugins/podtopologyspread/plugin_test.go | 160 +++++++++++ .../plugins/podtopologyspread/scoring.go | 27 +- .../plugins/podtopologyspread/scoring_test.go | 207 ++++++++++---- .../framework/plugins/serviceaffinity/BUILD | 1 + .../serviceaffinity/service_affinity.go | 5 +- pkg/scheduler/listers/BUILD | 14 +- 17 files changed, 801 insertions(+), 253 deletions(-) create mode 100644 pkg/scheduler/framework/plugins/helper/spread.go rename pkg/scheduler/{listers/listers_test.go => framework/plugins/helper/spread_test.go} (97%) create mode 100644 pkg/scheduler/framework/plugins/podtopologyspread/plugin_test.go diff --git a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD index b63d0938c67..3547d3f9c47 100644 --- a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD +++ b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD @@ -6,16 +6,13 @@ go_library( importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread", visibility = ["//visibility:public"], deps = [ + "//pkg/scheduler/framework/plugins/helper:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/util/node:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library", - "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread.go b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread.go index 1de9363cccd..15def49ae53 100644 --- a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread.go +++ b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread.go @@ -23,13 +23,10 @@ import ( "k8s.io/klog" v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - appslisters "k8s.io/client-go/listers/apps/v1" - corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" utilnode "k8s.io/kubernetes/pkg/util/node" ) @@ -69,6 +66,8 @@ func (s *preScoreState) Clone() framework.StateData { } // skipDefaultPodTopologySpread returns true if the pod's TopologySpreadConstraints are specified. +// Note that this doesn't take into account default constraints defined for +// the PodTopologySpread plugin. func skipDefaultPodTopologySpread(pod *v1.Pod) bool { return len(pod.Spec.TopologySpreadConstraints) != 0 } @@ -182,7 +181,7 @@ func (pl *DefaultPodTopologySpread) ScoreExtensions() framework.ScoreExtensions func (pl *DefaultPodTopologySpread) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*v1.Node) *framework.Status { var selector labels.Selector informerFactory := pl.handle.SharedInformerFactory() - selector = getSelector( + selector = helper.DefaultSelector( pod, informerFactory.Core().V1().Services().Lister(), informerFactory.Core().V1().ReplicationControllers().Lister(), @@ -220,49 +219,3 @@ func countMatchingPods(namespace string, selector labels.Selector, nodeInfo *sch } return count } - -// getSelector returns a selector for the services, RCs, RSs, and SSs matching the given pod. -func getSelector(pod *v1.Pod, sl corelisters.ServiceLister, cl corelisters.ReplicationControllerLister, rsl appslisters.ReplicaSetLister, ssl appslisters.StatefulSetLister) labels.Selector { - labelSet := make(labels.Set) - // Since services, RCs, RSs and SSs match the pod, they won't have conflicting - // labels. Merging is safe. - - if services, err := schedulerlisters.GetPodServices(sl, pod); err == nil { - for _, service := range services { - labelSet = labels.Merge(labelSet, service.Spec.Selector) - } - } - - if rcs, err := cl.GetPodControllers(pod); err == nil { - for _, rc := range rcs { - labelSet = labels.Merge(labelSet, rc.Spec.Selector) - } - } - - selector := labels.NewSelector() - if len(labelSet) != 0 { - selector = labelSet.AsSelector() - } - - if rss, err := rsl.GetPodReplicaSets(pod); err == nil { - for _, rs := range rss { - if other, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector); err == nil { - if r, ok := other.Requirements(); ok { - selector = selector.Add(r...) - } - } - } - } - - if sss, err := ssl.GetPodStatefulSets(pod); err == nil { - for _, ss := range sss { - if other, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector); err == nil { - if r, ok := other.Requirements(); ok { - selector = selector.Add(r...) - } - } - } - } - - return selector -} diff --git a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_perf_test.go b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_perf_test.go index 9f87b27ccdf..5e03d3bce9d 100644 --- a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_perf_test.go +++ b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_perf_test.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/client-go/informers" - clientsetfake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/kubernetes/fake" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "k8s.io/kubernetes/pkg/scheduler/internal/cache" st "k8s.io/kubernetes/pkg/scheduler/testing" @@ -53,10 +53,9 @@ func BenchmarkTestSelectorSpreadPriority(b *testing.B) { pod := st.MakePod().Name("p").Label("foo", "").Obj() existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.allNodesNum) snapshot := cache.NewSnapshot(existingPods, allNodes) - services := &v1.ServiceList{ - Items: []v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"foo": ""}}}}, - } - client := clientsetfake.NewSimpleClientset(services) + client := fake.NewSimpleClientset( + &v1.Service{Spec: v1.ServiceSpec{Selector: map[string]string{"foo": ""}}}, + ) ctx := context.Background() informerFactory := informers.NewSharedInformerFactory(client, 0) _ = informerFactory.Core().V1().Services().Lister() @@ -77,11 +76,17 @@ func BenchmarkTestSelectorSpreadPriority(b *testing.B) { if !status.IsSuccess() { b.Fatalf("unexpected error: %v", status) } + var gotList framework.NodeScoreList for _, node := range filteredNodes { - _, status := plugin.Score(ctx, state, pod, node.Name) + score, status := plugin.Score(ctx, state, pod, node.Name) if !status.IsSuccess() { b.Errorf("unexpected error: %v", status) } + gotList = append(gotList, framework.NodeScore{Name: node.Name, Score: score}) + } + status = plugin.NormalizeScore(context.Background(), state, pod, gotList) + if !status.IsSuccess() { + b.Fatal(status) } } }) diff --git a/pkg/scheduler/framework/plugins/helper/BUILD b/pkg/scheduler/framework/plugins/helper/BUILD index fb519fe4116..a4639e8e000 100644 --- a/pkg/scheduler/framework/plugins/helper/BUILD +++ b/pkg/scheduler/framework/plugins/helper/BUILD @@ -5,6 +5,7 @@ go_library( srcs = [ "node_affinity.go", "normalize_score.go", + "spread.go", ], importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper", visibility = ["//visibility:public"], @@ -12,8 +13,11 @@ go_library( "//pkg/apis/core/v1/helper:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library", + "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", ], ) @@ -22,6 +26,7 @@ go_test( srcs = [ "node_affinity_test.go", "normalize_score_test.go", + "spread_test.go", ], embed = [":go_default_library"], deps = [ @@ -29,6 +34,8 @@ go_test( "//pkg/scheduler/framework/v1alpha1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", ], ) diff --git a/pkg/scheduler/framework/plugins/helper/spread.go b/pkg/scheduler/framework/plugins/helper/spread.go new file mode 100644 index 00000000000..4f06f1f5326 --- /dev/null +++ b/pkg/scheduler/framework/plugins/helper/spread.go @@ -0,0 +1,95 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helper + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + appslisters "k8s.io/client-go/listers/apps/v1" + corelisters "k8s.io/client-go/listers/core/v1" +) + +// DefaultSelector returns a selector deduced from the Services, Replication +// Controllers, Replica Sets, and Stateful Sets matching the given pod. +func DefaultSelector(pod *v1.Pod, sl corelisters.ServiceLister, cl corelisters.ReplicationControllerLister, rsl appslisters.ReplicaSetLister, ssl appslisters.StatefulSetLister) labels.Selector { + labelSet := make(labels.Set) + // Since services, RCs, RSs and SSs match the pod, they won't have conflicting + // labels. Merging is safe. + + if services, err := GetPodServices(sl, pod); err == nil { + for _, service := range services { + labelSet = labels.Merge(labelSet, service.Spec.Selector) + } + } + + if rcs, err := cl.GetPodControllers(pod); err == nil { + for _, rc := range rcs { + labelSet = labels.Merge(labelSet, rc.Spec.Selector) + } + } + + selector := labels.NewSelector() + if len(labelSet) != 0 { + selector = labelSet.AsSelector() + } + + if rss, err := rsl.GetPodReplicaSets(pod); err == nil { + for _, rs := range rss { + if other, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector); err == nil { + if r, ok := other.Requirements(); ok { + selector = selector.Add(r...) + } + } + } + } + + if sss, err := ssl.GetPodStatefulSets(pod); err == nil { + for _, ss := range sss { + if other, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector); err == nil { + if r, ok := other.Requirements(); ok { + selector = selector.Add(r...) + } + } + } + } + + return selector +} + +// GetPodServices gets the services that have the selector that match the labels on the given pod. +func GetPodServices(sl corelisters.ServiceLister, pod *v1.Pod) ([]*v1.Service, error) { + allServices, err := sl.Services(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var services []*v1.Service + for i := range allServices { + service := allServices[i] + if service.Spec.Selector == nil { + // services with nil selectors match nothing, not everything. + continue + } + selector := labels.Set(service.Spec.Selector).AsSelectorPreValidated() + if selector.Matches(labels.Set(pod.Labels)) { + services = append(services, service) + } + } + + return services, nil +} diff --git a/pkg/scheduler/listers/listers_test.go b/pkg/scheduler/framework/plugins/helper/spread_test.go similarity index 97% rename from pkg/scheduler/listers/listers_test.go rename to pkg/scheduler/framework/plugins/helper/spread_test.go index 238df6325a5..dda1679a6e2 100644 --- a/pkg/scheduler/listers/listers_test.go +++ b/pkg/scheduler/framework/plugins/helper/spread_test.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package listers +package helper import ( "fmt" diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/BUILD b/pkg/scheduler/framework/plugins/podtopologyspread/BUILD index e65cfbaddf5..2829a8ad5d4 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/BUILD +++ b/pkg/scheduler/framework/plugins/podtopologyspread/BUILD @@ -17,9 +17,14 @@ go_library( "//pkg/scheduler/nodeinfo:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library", + "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], @@ -29,6 +34,7 @@ go_test( name = "go_default_test", srcs = [ "filtering_test.go", + "plugin_test.go", "scoring_test.go", ], embed = [":go_default_library"], @@ -37,10 +43,14 @@ go_test( "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/testing:go_default_library", + "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/github.com/google/go-cmp/cmp:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library", ], diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/common.go b/pkg/scheduler/framework/plugins/podtopologyspread/common.go index 97bc9006472..b87af00c88e 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/common.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/common.go @@ -20,6 +20,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" ) type topologyPair struct { @@ -36,6 +37,24 @@ type topologySpreadConstraint struct { Selector labels.Selector } +// defaultConstraints builds the constraints for a pod using +// .DefaultConstraints and the selectors from the services, replication +// controllers, replica sets and stateful sets that match the pod. +func (pl *PodTopologySpread) defaultConstraints(p *v1.Pod, action v1.UnsatisfiableConstraintAction) ([]topologySpreadConstraint, error) { + constraints, err := filterTopologySpreadConstraints(pl.DefaultConstraints, action) + if err != nil || len(constraints) == 0 { + return nil, err + } + selector := helper.DefaultSelector(p, pl.services, pl.replicationCtrls, pl.replicaSets, pl.statefulSets) + if selector.Empty() { + return nil, nil + } + for i := range constraints { + constraints[i].Selector = selector + } + return constraints, nil +} + // nodeLabelsMatchSpreadConstraints checks if ALL topology keys in spread Constraints are present in node labels. func nodeLabelsMatchSpreadConstraints(nodeLabels map[string]string, constraints []topologySpreadConstraint) bool { for _, c := range constraints { diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go b/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go index 36505b00c91..448d47f309b 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go @@ -91,32 +91,32 @@ func newCriticalPaths() *criticalPaths { return &criticalPaths{{MatchNum: math.MaxInt32}, {MatchNum: math.MaxInt32}} } -func (paths *criticalPaths) update(tpVal string, num int32) { +func (p *criticalPaths) update(tpVal string, num int32) { // first verify if `tpVal` exists or not i := -1 - if tpVal == paths[0].TopologyValue { + if tpVal == p[0].TopologyValue { i = 0 - } else if tpVal == paths[1].TopologyValue { + } else if tpVal == p[1].TopologyValue { i = 1 } if i >= 0 { // `tpVal` exists - paths[i].MatchNum = num - if paths[0].MatchNum > paths[1].MatchNum { + p[i].MatchNum = num + if p[0].MatchNum > p[1].MatchNum { // swap paths[0] and paths[1] - paths[0], paths[1] = paths[1], paths[0] + p[0], p[1] = p[1], p[0] } } else { // `tpVal` doesn't exist - if num < paths[0].MatchNum { + if num < p[0].MatchNum { // update paths[1] with paths[0] - paths[1] = paths[0] + p[1] = p[0] // update paths[0] - paths[0].TopologyValue, paths[0].MatchNum = tpVal, num - } else if num < paths[1].MatchNum { + p[0].TopologyValue, p[0].MatchNum = tpVal, num + } else if num < p[1].MatchNum { // update paths[1] - paths[1].TopologyValue, paths[1].MatchNum = tpVal, num + p[1].TopologyValue, p[1].MatchNum = tpVal, num } } } @@ -201,11 +201,19 @@ func (pl *PodTopologySpread) calPreFilterState(pod *v1.Pod) (*preFilterState, er if err != nil { return nil, fmt.Errorf("listing NodeInfos: %v", err) } - // We have feature gating in APIServer to strip the spec - // so don't need to re-check feature gate, just check length of Constraints. - constraints, err := filterTopologySpreadConstraints(pod.Spec.TopologySpreadConstraints, v1.DoNotSchedule) - if err != nil { - return nil, err + var constraints []topologySpreadConstraint + if len(pod.Spec.TopologySpreadConstraints) > 0 { + // We have feature gating in APIServer to strip the spec + // so don't need to re-check feature gate, just check length of Constraints. + constraints, err = filterTopologySpreadConstraints(pod.Spec.TopologySpreadConstraints, v1.DoNotSchedule) + if err != nil { + return nil, fmt.Errorf("obtaining pod's hard topology spread constraints: %v", err) + } + } else { + constraints, err = pl.defaultConstraints(pod, v1.DoNotSchedule) + if err != nil { + return nil, fmt.Errorf("setting default hard topology spread constraints: %v", err) + } } if len(constraints) == 0 { return &preFilterState{}, nil diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go b/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go index f4a59370ac6..574b8e953fa 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go @@ -25,48 +25,49 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "k8s.io/kubernetes/pkg/scheduler/internal/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" st "k8s.io/kubernetes/pkg/scheduler/testing" ) -var ( - hardSpread = v1.DoNotSchedule - softSpread = v1.ScheduleAnyway -) +var cmpOpts = []cmp.Option{ + cmp.Comparer(func(s1 labels.Selector, s2 labels.Selector) bool { + return reflect.DeepEqual(s1, s2) + }), + cmp.Comparer(func(p1, p2 criticalPaths) bool { + p1.sort() + p2.sort() + return p1[0] == p2[0] && p1[1] == p2[1] + }), +} -func (s preFilterState) Equal(o preFilterState) bool { - type internal preFilterState - type internalCP criticalPaths - return cmp.Equal(internal(s), internal(o), - cmp.Comparer(func(s1 labels.Selector, s2 labels.Selector) bool { - return reflect.DeepEqual(s1, s2) - }), - cmp.Transformer("sort", func(p criticalPaths) internalCP { - if p[0].MatchNum == p[1].MatchNum && p[0].TopologyValue > p[1].TopologyValue { - // Swap TopologyValue to make them sorted alphabetically. - p[0].TopologyValue, p[1].TopologyValue = p[1].TopologyValue, p[0].TopologyValue - } - return internalCP(p) - }), - ) +func (p *criticalPaths) sort() { + if p[0].MatchNum == p[1].MatchNum && p[0].TopologyValue > p[1].TopologyValue { + // Swap TopologyValue to make them sorted alphabetically. + p[0].TopologyValue, p[1].TopologyValue = p[1].TopologyValue, p[0].TopologyValue + } } func TestPreFilterState(t *testing.T) { fooSelector := st.MakeLabelSelector().Exists("foo").Obj() barSelector := st.MakeLabelSelector().Exists("bar").Obj() tests := []struct { - name string - pod *v1.Pod - nodes []*v1.Node - existingPods []*v1.Pod - want *preFilterState + name string + pod *v1.Pod + nodes []*v1.Node + existingPods []*v1.Pod + objs []runtime.Object + defaultConstraints []v1.TopologySpreadConstraint + want *preFilterState }{ { name: "clean cluster with one spreadConstraint", pod: st.MakePod().Name("p").Label("foo", "").SpreadConstraint( - 5, "zone", hardSpread, st.MakeLabelSelector().Label("foo", "bar").Obj(), + 5, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Label("foo", "bar").Obj(), ).Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -94,7 +95,7 @@ func TestPreFilterState(t *testing.T) { { name: "normal case with one spreadConstraint", pod: st.MakePod().Name("p").Label("foo", "").SpreadConstraint( - 1, "zone", hardSpread, fooSelector, + 1, "zone", v1.DoNotSchedule, fooSelector, ).Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -129,7 +130,7 @@ func TestPreFilterState(t *testing.T) { { name: "normal case with one spreadConstraint, on a 3-zone cluster", pod: st.MakePod().Name("p").Label("foo", "").SpreadConstraint( - 1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj(), + 1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj(), ).Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -167,7 +168,7 @@ func TestPreFilterState(t *testing.T) { { name: "namespace mismatch doesn't count", pod: st.MakePod().Name("p").Label("foo", "").SpreadConstraint( - 1, "zone", hardSpread, fooSelector, + 1, "zone", v1.DoNotSchedule, fooSelector, ).Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -202,8 +203,8 @@ func TestPreFilterState(t *testing.T) { { name: "normal case with two spreadConstraints", pod: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "zone", hardSpread, fooSelector). - SpreadConstraint(1, "node", hardSpread, fooSelector). + SpreadConstraint(1, "zone", v1.DoNotSchedule, fooSelector). + SpreadConstraint(1, "node", v1.DoNotSchedule, fooSelector). Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -250,10 +251,10 @@ func TestPreFilterState(t *testing.T) { { name: "soft spreadConstraints should be bypassed", pod: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "zone", softSpread, fooSelector). - SpreadConstraint(1, "zone", hardSpread, fooSelector). - SpreadConstraint(1, "node", softSpread, fooSelector). - SpreadConstraint(1, "node", hardSpread, fooSelector). + SpreadConstraint(1, "zone", v1.ScheduleAnyway, fooSelector). + SpreadConstraint(1, "zone", v1.DoNotSchedule, fooSelector). + SpreadConstraint(1, "node", v1.ScheduleAnyway, fooSelector). + SpreadConstraint(1, "node", v1.DoNotSchedule, fooSelector). Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -298,8 +299,8 @@ func TestPreFilterState(t *testing.T) { { name: "different labelSelectors - simple version", pod: st.MakePod().Name("p").Label("foo", "").Label("bar", ""). - SpreadConstraint(1, "zone", hardSpread, fooSelector). - SpreadConstraint(1, "node", hardSpread, barSelector). + SpreadConstraint(1, "zone", v1.DoNotSchedule, fooSelector). + SpreadConstraint(1, "node", v1.DoNotSchedule, barSelector). Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -339,8 +340,8 @@ func TestPreFilterState(t *testing.T) { { name: "different labelSelectors - complex pods", pod: st.MakePod().Name("p").Label("foo", "").Label("bar", ""). - SpreadConstraint(1, "zone", hardSpread, fooSelector). - SpreadConstraint(1, "node", hardSpread, barSelector). + SpreadConstraint(1, "zone", v1.DoNotSchedule, fooSelector). + SpreadConstraint(1, "node", v1.DoNotSchedule, barSelector). Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -386,8 +387,8 @@ func TestPreFilterState(t *testing.T) { name: "two spreadConstraints, and with podAffinity", pod: st.MakePod().Name("p").Label("foo", ""). NodeAffinityNotIn("node", []string{"node-x"}). // exclude node-x - SpreadConstraint(1, "zone", hardSpread, fooSelector). - SpreadConstraint(1, "node", hardSpread, fooSelector). + SpreadConstraint(1, "zone", v1.DoNotSchedule, fooSelector). + SpreadConstraint(1, "node", v1.DoNotSchedule, fooSelector). Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -430,21 +431,107 @@ func TestPreFilterState(t *testing.T) { }, }, }, + { + name: "default constraints and a service", + pod: st.MakePod().Name("p").Label("foo", "bar").Label("baz", "kar").Obj(), + defaultConstraints: []v1.TopologySpreadConstraint{ + {MaxSkew: 3, TopologyKey: "node", WhenUnsatisfiable: v1.DoNotSchedule}, + {MaxSkew: 2, TopologyKey: "node", WhenUnsatisfiable: v1.ScheduleAnyway}, + {MaxSkew: 5, TopologyKey: "rack", WhenUnsatisfiable: v1.DoNotSchedule}, + }, + objs: []runtime.Object{ + &v1.Service{Spec: v1.ServiceSpec{Selector: map[string]string{"foo": "bar"}}}, + }, + want: &preFilterState{ + Constraints: []topologySpreadConstraint{ + { + MaxSkew: 3, + TopologyKey: "node", + Selector: mustConvertLabelSelectorAsSelector(t, st.MakeLabelSelector().Label("foo", "bar").Obj()), + }, + { + MaxSkew: 5, + TopologyKey: "rack", + Selector: mustConvertLabelSelectorAsSelector(t, st.MakeLabelSelector().Label("foo", "bar").Obj()), + }, + }, + TpKeyToCriticalPaths: map[string]*criticalPaths{ + "node": newCriticalPaths(), + "rack": newCriticalPaths(), + }, + TpPairToMatchNum: make(map[topologyPair]int32), + }, + }, + { + name: "default constraints and a service that doesn't match", + pod: st.MakePod().Name("p").Label("foo", "bar").Obj(), + defaultConstraints: []v1.TopologySpreadConstraint{ + {MaxSkew: 3, TopologyKey: "node", WhenUnsatisfiable: v1.DoNotSchedule}, + }, + objs: []runtime.Object{ + &v1.Service{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "kep"}}}, + }, + want: &preFilterState{}, + }, + { + name: "default constraints and a service, but pod has constraints", + pod: st.MakePod().Name("p").Label("foo", "bar").Label("baz", "tar"). + SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Label("baz", "tar").Obj()). + SpreadConstraint(2, "planet", v1.ScheduleAnyway, st.MakeLabelSelector().Label("fot", "rok").Obj()).Obj(), + defaultConstraints: []v1.TopologySpreadConstraint{ + {MaxSkew: 2, TopologyKey: "node", WhenUnsatisfiable: v1.DoNotSchedule}, + }, + objs: []runtime.Object{ + &v1.Service{Spec: v1.ServiceSpec{Selector: map[string]string{"foo": "bar"}}}, + }, + want: &preFilterState{ + Constraints: []topologySpreadConstraint{ + { + MaxSkew: 1, + TopologyKey: "zone", + Selector: mustConvertLabelSelectorAsSelector(t, st.MakeLabelSelector().Label("baz", "tar").Obj()), + }, + }, + TpKeyToCriticalPaths: map[string]*criticalPaths{ + "zone": newCriticalPaths(), + }, + TpPairToMatchNum: make(map[topologyPair]int32), + }, + }, + { + name: "default soft constraints and a service", + pod: st.MakePod().Name("p").Label("foo", "bar").Obj(), + defaultConstraints: []v1.TopologySpreadConstraint{ + {MaxSkew: 2, TopologyKey: "node", WhenUnsatisfiable: v1.ScheduleAnyway}, + }, + objs: []runtime.Object{ + &v1.Service{Spec: v1.ServiceSpec{Selector: map[string]string{"foo": "bar"}}}, + }, + want: &preFilterState{}, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() + informerFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(tt.objs...), 0) pl := PodTopologySpread{ sharedLister: cache.NewSnapshot(tt.existingPods, tt.nodes), + Args: Args{ + DefaultConstraints: tt.defaultConstraints, + }, } + pl.setListers(informerFactory) + informerFactory.Start(ctx.Done()) + informerFactory.WaitForCacheSync(ctx.Done()) cs := framework.NewCycleState() - if s := pl.PreFilter(context.Background(), cs, tt.pod); !s.IsSuccess() { + if s := pl.PreFilter(ctx, cs, tt.pod); !s.IsSuccess() { t.Fatal(s.AsError()) } got, err := getPreFilterState(cs) if err != nil { t.Fatal(err) } - if diff := cmp.Diff(got, tt.want); diff != "" { + if diff := cmp.Diff(tt.want, got, cmpOpts...); diff != "" { t.Errorf("PodTopologySpread#PreFilter() returned diff (-want,+got):\n%s", diff) } }) @@ -471,7 +558,7 @@ func TestPreFilterStateAddPod(t *testing.T) { { name: "node a and b both impact current min match", preemptor: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), addedPod: st.MakePod().Name("p-a1").Node("node-a").Label("foo", "").Obj(), existingPods: nil, // it's an empty cluster @@ -494,7 +581,7 @@ func TestPreFilterStateAddPod(t *testing.T) { { name: "only node a impacts current min match", preemptor: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), addedPod: st.MakePod().Name("p-a1").Node("node-a").Label("foo", "").Obj(), existingPods: []*v1.Pod{ @@ -519,7 +606,7 @@ func TestPreFilterStateAddPod(t *testing.T) { { name: "add a pod in a different namespace doesn't change topologyKeyToMinPodsMap", preemptor: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), addedPod: st.MakePod().Name("p-a1").Namespace("ns1").Node("node-a").Label("foo", "").Obj(), existingPods: []*v1.Pod{ @@ -544,7 +631,7 @@ func TestPreFilterStateAddPod(t *testing.T) { { name: "add pod on non-critical node won't trigger re-calculation", preemptor: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), addedPod: st.MakePod().Name("p-b2").Node("node-b").Label("foo", "").Obj(), existingPods: []*v1.Pod{ @@ -569,8 +656,8 @@ func TestPreFilterStateAddPod(t *testing.T) { { name: "node a and x both impact topologyKeyToMinPodsMap on zone and node", preemptor: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). - SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), addedPod: st.MakePod().Name("p-a1").Node("node-a").Label("foo", "").Obj(), existingPods: nil, // it's an empty cluster @@ -596,8 +683,8 @@ func TestPreFilterStateAddPod(t *testing.T) { { name: "only node a impacts topologyKeyToMinPodsMap on zone and node", preemptor: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). - SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), addedPod: st.MakePod().Name("p-a1").Node("node-a").Label("foo", "").Obj(), existingPods: []*v1.Pod{ @@ -625,8 +712,8 @@ func TestPreFilterStateAddPod(t *testing.T) { { name: "node a impacts topologyKeyToMinPodsMap on node, node x impacts topologyKeyToMinPodsMap on zone", preemptor: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). - SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), addedPod: st.MakePod().Name("p-a1").Node("node-a").Label("foo", "").Obj(), existingPods: []*v1.Pod{ @@ -658,8 +745,8 @@ func TestPreFilterStateAddPod(t *testing.T) { { name: "Constraints hold different labelSelectors, node a impacts topologyKeyToMinPodsMap on zone", preemptor: st.MakePod().Name("p").Label("foo", "").Label("bar", ""). - SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). - SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("bar").Obj()). + SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("bar").Obj()). Obj(), addedPod: st.MakePod().Name("p-a1").Node("node-a").Label("foo", "").Obj(), existingPods: []*v1.Pod{ @@ -698,8 +785,8 @@ func TestPreFilterStateAddPod(t *testing.T) { { name: "Constraints hold different labelSelectors, node a impacts topologyKeyToMinPodsMap on both zone and node", preemptor: st.MakePod().Name("p").Label("foo", "").Label("bar", ""). - SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). - SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("bar").Obj()). + SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("bar").Obj()). Obj(), addedPod: st.MakePod().Name("p-a1").Node("node-a").Label("foo", "").Label("bar", "").Obj(), existingPods: []*v1.Pod{ @@ -758,7 +845,7 @@ func TestPreFilterStateAddPod(t *testing.T) { if err != nil { t.Fatal(err) } - if diff := cmp.Diff(state, tt.want); diff != "" { + if diff := cmp.Diff(state, tt.want, cmpOpts...); diff != "" { t.Errorf("PodTopologySpread.AddPod() returned diff (-want,+got):\n%s", diff) } }) @@ -788,7 +875,7 @@ func TestPreFilterStateRemovePod(t *testing.T) { // So preemption is triggered. name: "one spreadConstraint on zone, topologyKeyToMinPodsMap unchanged", preemptor: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -816,7 +903,7 @@ func TestPreFilterStateRemovePod(t *testing.T) { { name: "one spreadConstraint on node, topologyKeyToMinPodsMap changed", preemptor: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -846,7 +933,7 @@ func TestPreFilterStateRemovePod(t *testing.T) { { name: "delete an irrelevant pod won't help", preemptor: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -877,7 +964,7 @@ func TestPreFilterStateRemovePod(t *testing.T) { { name: "delete a non-existing pod won't help", preemptor: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -908,8 +995,8 @@ func TestPreFilterStateRemovePod(t *testing.T) { { name: "two spreadConstraints", preemptor: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). - SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -971,7 +1058,7 @@ func TestPreFilterStateRemovePod(t *testing.T) { if err != nil { t.Fatal(err) } - if diff := cmp.Diff(state, tt.want); diff != "" { + if diff := cmp.Diff(state, tt.want, cmpOpts...); diff != "" { t.Errorf("PodTopologySpread.RemovePod() returned diff (-want,+got):\n%s", diff) } }) @@ -989,7 +1076,7 @@ func BenchmarkTestCalPreFilterState(b *testing.B) { { name: "1000nodes/single-constraint-zone", pod: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, v1.LabelZoneFailureDomain, hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, v1.LabelZoneFailureDomain, v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), existingPodsNum: 10000, allNodesNum: 1000, @@ -998,7 +1085,7 @@ func BenchmarkTestCalPreFilterState(b *testing.B) { { name: "1000nodes/single-constraint-node", pod: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, v1.LabelHostname, hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, v1.LabelHostname, v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), existingPodsNum: 10000, allNodesNum: 1000, @@ -1007,8 +1094,8 @@ func BenchmarkTestCalPreFilterState(b *testing.B) { { name: "1000nodes/two-Constraints-zone-node", pod: st.MakePod().Name("p").Label("foo", "").Label("bar", ""). - SpreadConstraint(1, v1.LabelZoneFailureDomain, hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). - SpreadConstraint(1, v1.LabelHostname, hardSpread, st.MakeLabelSelector().Exists("bar").Obj()). + SpreadConstraint(1, v1.LabelZoneFailureDomain, v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, v1.LabelHostname, v1.DoNotSchedule, st.MakeLabelSelector().Exists("bar").Obj()). Obj(), existingPodsNum: 10000, allNodesNum: 1000, @@ -1052,7 +1139,7 @@ func TestSingleConstraint(t *testing.T) { { name: "no existing pods", pod: st.MakePod().Name("p").Label("foo", "").SpreadConstraint( - 1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj(), + 1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj(), ).Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -1070,7 +1157,7 @@ func TestSingleConstraint(t *testing.T) { { name: "no existing pods, incoming pod doesn't match itself", pod: st.MakePod().Name("p").Label("foo", "").SpreadConstraint( - 1, "zone", hardSpread, st.MakeLabelSelector().Exists("bar").Obj(), + 1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("bar").Obj(), ).Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -1088,7 +1175,7 @@ func TestSingleConstraint(t *testing.T) { { name: "existing pods in a different namespace do not count", pod: st.MakePod().Name("p").Label("foo", "").SpreadConstraint( - 1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj(), + 1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj(), ).Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -1112,7 +1199,7 @@ func TestSingleConstraint(t *testing.T) { { name: "pods spread across zones as 3/3, all nodes fit", pod: st.MakePod().Name("p").Label("foo", "").SpreadConstraint( - 1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj(), + 1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj(), ).Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -1140,7 +1227,7 @@ func TestSingleConstraint(t *testing.T) { // can cause unexpected behavior name: "pods spread across zones as 1/2 due to absence of label 'zone' on node-b", pod: st.MakePod().Name("p").Label("foo", "").SpreadConstraint( - 1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj(), + 1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj(), ).Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -1164,7 +1251,7 @@ func TestSingleConstraint(t *testing.T) { { name: "pods spread across nodes as 2/1/0/3, only node-x fits", pod: st.MakePod().Name("p").Label("foo", "").SpreadConstraint( - 1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj(), + 1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj(), ).Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -1190,7 +1277,7 @@ func TestSingleConstraint(t *testing.T) { { name: "pods spread across nodes as 2/1/0/3, maxSkew is 2, node-b and node-x fit", pod: st.MakePod().Name("p").Label("foo", "").SpreadConstraint( - 2, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj(), + 2, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj(), ).Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -1220,7 +1307,7 @@ func TestSingleConstraint(t *testing.T) { // as the incoming pod doesn't have label "foo" name: "pods spread across nodes as 2/1/0/3, but pod doesn't match itself", pod: st.MakePod().Name("p").Label("bar", "").SpreadConstraint( - 1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj(), + 1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj(), ).Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -1252,7 +1339,7 @@ func TestSingleConstraint(t *testing.T) { name: "incoming pod has nodeAffinity, pods spread as 2/~1~/~0~/3, hence node-a fits", pod: st.MakePod().Name("p").Label("foo", ""). NodeAffinityIn("node", []string{"node-a", "node-y"}). - SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -1278,7 +1365,7 @@ func TestSingleConstraint(t *testing.T) { { name: "terminating Pods should be excluded", pod: st.MakePod().Name("p").Label("foo", "").SpreadConstraint( - 1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj(), + 1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj(), ).Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("node", "node-a").Obj(), @@ -1329,8 +1416,8 @@ func TestMultipleConstraints(t *testing.T) { // intersection of (1) and (2) returns node-x name: "two Constraints on zone and node, spreads = [3/3, 2/1/0/3]", pod: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). - SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -1359,8 +1446,8 @@ func TestMultipleConstraints(t *testing.T) { // intersection of (1) and (2) returns no node name: "two Constraints on zone and node, spreads = [3/4, 2/1/0/4]", pod: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). - SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -1390,8 +1477,8 @@ func TestMultipleConstraints(t *testing.T) { // intersection of (1) and (2) returns node-x name: "Constraints hold different labelSelectors, spreads = [1/0, 1/0/0/1]", pod: st.MakePod().Name("p").Label("foo", "").Label("bar", ""). - SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). - SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("bar").Obj()). + SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("bar").Obj()). Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -1416,8 +1503,8 @@ func TestMultipleConstraints(t *testing.T) { // intersection of (1) and (2) returns no node name: "Constraints hold different labelSelectors, spreads = [1/0, 0/0/1/1]", pod: st.MakePod().Name("p").Label("foo", "").Label("bar", ""). - SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). - SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("bar").Obj()). + SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("bar").Obj()). Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -1443,8 +1530,8 @@ func TestMultipleConstraints(t *testing.T) { // intersection of (1) and (2) returns node-b name: "Constraints hold different labelSelectors, spreads = [2/3, 1/0/0/1]", pod: st.MakePod().Name("p").Label("foo", "").Label("bar", ""). - SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). - SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("bar").Obj()). + SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("bar").Obj()). Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -1472,8 +1559,8 @@ func TestMultipleConstraints(t *testing.T) { // intersection of (1) and (2) returns node-a and node-b name: "Constraints hold different labelSelectors but pod doesn't match itself on 'zone' constraint", pod: st.MakePod().Name("p").Label("bar", ""). - SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()). - SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("bar").Obj()). + SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.DoNotSchedule, st.MakeLabelSelector().Exists("bar").Obj()). Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/plugin.go b/pkg/scheduler/framework/plugins/podtopologyspread/plugin.go index 85338fdc5d9..e68eacbca86 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/plugin.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/plugin.go @@ -19,7 +19,14 @@ package podtopologyspread import ( "fmt" + "k8s.io/api/core/v1" + metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/informers" + appslisters "k8s.io/client-go/listers/apps/v1" + corelisters "k8s.io/client-go/listers/core/v1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" ) @@ -29,9 +36,31 @@ const ( ErrReasonConstraintsNotMatch = "node(s) didn't match pod topology spread constraints" ) +var ( + supportedScheduleActions = sets.NewString(string(v1.DoNotSchedule), string(v1.ScheduleAnyway)) +) + +// Args holds the arguments to configure the plugin. +type Args struct { + // DefaultConstraints defines topology spread constraints to be applied to + // pods that don't define any in `pod.spec.topologySpreadConstraints`. + // `topologySpreadConstraint.labelSelectors` must be empty, as they are + // deduced the pods' membership to Services, Replication Controllers, Replica + // Sets or Stateful Sets. + // Empty by default. + // +optional + // +listType=atomic + DefaultConstraints []v1.TopologySpreadConstraint `json:"defaultConstraints"` +} + // PodTopologySpread is a plugin that ensures pod's topologySpreadConstraints is satisfied. type PodTopologySpread struct { - sharedLister schedulerlisters.SharedLister + Args + sharedLister schedulerlisters.SharedLister + services corelisters.ServiceLister + replicationCtrls corelisters.ReplicationControllerLister + replicaSets appslisters.ReplicaSetLister + statefulSets appslisters.StatefulSetLister } var _ framework.PreFilterPlugin = &PodTopologySpread{} @@ -49,10 +78,96 @@ func (pl *PodTopologySpread) Name() string { return Name } +// BuildArgs returns the arguments used to build the plugin. +func (pl *PodTopologySpread) BuildArgs() interface{} { + return pl.Args +} + // New initializes a new plugin and returns it. -func New(_ *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin, error) { +func New(args *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin, error) { if h.SnapshotSharedLister() == nil { return nil, fmt.Errorf("SnapshotSharedlister is nil") } - return &PodTopologySpread{sharedLister: h.SnapshotSharedLister()}, nil + pl := &PodTopologySpread{sharedLister: h.SnapshotSharedLister()} + if err := framework.DecodeInto(args, &pl.Args); err != nil { + return nil, err + } + if err := validateArgs(&pl.Args); err != nil { + return nil, err + } + if len(pl.DefaultConstraints) != 0 { + if h.SharedInformerFactory() == nil { + return nil, fmt.Errorf("SharedInformerFactory is nil") + } + pl.setListers(h.SharedInformerFactory()) + } + return pl, nil +} + +func (pl *PodTopologySpread) setListers(factory informers.SharedInformerFactory) { + pl.services = factory.Core().V1().Services().Lister() + pl.replicationCtrls = factory.Core().V1().ReplicationControllers().Lister() + pl.replicaSets = factory.Apps().V1().ReplicaSets().Lister() + pl.statefulSets = factory.Apps().V1().StatefulSets().Lister() +} + +// validateArgs replicates the validation from +// pkg/apis/core/validation.validateTopologySpreadConstraints. +// This has the additional check for .labelSelector to be nil. +func validateArgs(args *Args) error { + var allErrs field.ErrorList + path := field.NewPath("defaultConstraints") + for i, c := range args.DefaultConstraints { + p := path.Index(i) + if c.MaxSkew <= 0 { + f := p.Child("maxSkew") + allErrs = append(allErrs, field.Invalid(f, c.MaxSkew, "must be greater than zero")) + } + allErrs = append(allErrs, validateTopologyKey(p.Child("topologyKey"), c.TopologyKey)...) + if err := validateWhenUnsatisfiable(p.Child("whenUnsatisfiable"), c.WhenUnsatisfiable); err != nil { + allErrs = append(allErrs, err) + } + if c.LabelSelector != nil { + f := field.Forbidden(p.Child("labelSelector"), "constraint must not define a selector, as they deduced for each pod") + allErrs = append(allErrs, f) + } + if err := validateConstraintNotRepeat(path, args.DefaultConstraints, i); err != nil { + allErrs = append(allErrs, err) + } + } + if len(allErrs) == 0 { + return nil + } + return allErrs.ToAggregate() +} + +func validateTopologyKey(p *field.Path, v string) field.ErrorList { + var allErrs field.ErrorList + if len(v) == 0 { + allErrs = append(allErrs, field.Required(p, "can not be empty")) + } else { + allErrs = append(allErrs, metav1validation.ValidateLabelName(v, p)...) + } + return allErrs +} + +func validateWhenUnsatisfiable(p *field.Path, v v1.UnsatisfiableConstraintAction) *field.Error { + if len(v) == 0 { + return field.Required(p, "can not be empty") + } + if !supportedScheduleActions.Has(string(v)) { + return field.NotSupported(p, v, supportedScheduleActions.List()) + } + return nil +} + +func validateConstraintNotRepeat(path *field.Path, constraints []v1.TopologySpreadConstraint, idx int) *field.Error { + c := &constraints[idx] + for i := range constraints[:idx] { + other := &constraints[i] + if c.TopologyKey == other.TopologyKey && c.WhenUnsatisfiable == other.WhenUnsatisfiable { + return field.Duplicate(path.Index(idx), fmt.Sprintf("{%v, %v}", c.TopologyKey, c.WhenUnsatisfiable)) + } + } + return nil } diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/plugin_test.go b/pkg/scheduler/framework/plugins/podtopologyspread/plugin_test.go new file mode 100644 index 00000000000..989cfdffacd --- /dev/null +++ b/pkg/scheduler/framework/plugins/podtopologyspread/plugin_test.go @@ -0,0 +1,160 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podtopologyspread + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/internal/cache" +) + +func TestNew(t *testing.T) { + cases := []struct { + name string + args runtime.Unknown + wantErr string + wantArgs Args + }{ + {name: "empty args"}, + { + name: "valid constraints", + args: runtime.Unknown{ + ContentType: runtime.ContentTypeYAML, + Raw: []byte(`defaultConstraints: + - maxSkew: 1 + topologyKey: "node" + whenUnsatisfiable: "ScheduleAnyway" + - maxSkew: 5 + topologyKey: "zone" + whenUnsatisfiable: "DoNotSchedule" +`), + }, + wantArgs: Args{ + DefaultConstraints: []v1.TopologySpreadConstraint{ + { + MaxSkew: 1, + TopologyKey: "node", + WhenUnsatisfiable: v1.ScheduleAnyway, + }, + { + MaxSkew: 5, + TopologyKey: "zone", + WhenUnsatisfiable: v1.DoNotSchedule, + }, + }, + }, + }, + { + name: "repeated constraints", + args: runtime.Unknown{ + ContentType: runtime.ContentTypeYAML, + Raw: []byte(`defaultConstraints: + - maxSkew: 1 + topologyKey: "node" + whenUnsatisfiable: "ScheduleAnyway" + - maxSkew: 5 + topologyKey: "node" + whenUnsatisfiable: "ScheduleAnyway" +`), + }, + wantErr: "Duplicate value", + }, + { + name: "unknown whenUnsatisfiable", + args: runtime.Unknown{ + ContentType: runtime.ContentTypeYAML, + Raw: []byte(`defaultConstraints: + - maxSkew: 1 + topologyKey: "node" + whenUnsatisfiable: "Unknown" +`), + }, + wantErr: "Unsupported value", + }, + { + name: "negative maxSkew", + args: runtime.Unknown{ + ContentType: runtime.ContentTypeYAML, + Raw: []byte(`defaultConstraints: + - maxSkew: -1 + topologyKey: "node" + whenUnsatisfiable: "ScheduleAnyway" +`), + }, + wantErr: "must be greater than zero", + }, + { + name: "empty topologyKey", + args: runtime.Unknown{ + ContentType: runtime.ContentTypeYAML, + Raw: []byte(`defaultConstraints: + - maxSkew: 1 + whenUnsatisfiable: "ScheduleAnyway" +`), + }, + wantErr: "can not be empty", + }, + { + name: "with label selector", + args: runtime.Unknown{ + ContentType: runtime.ContentTypeYAML, + Raw: []byte(`defaultConstraints: + - maxSkew: 1 + topologyKey: "rack" + whenUnsatisfiable: "ScheduleAnyway" + labelSelector: + matchLabels: + foo: "bar" +`), + }, + wantErr: "constraint must not define a selector", + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + informerFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(), 0) + f, err := framework.NewFramework(nil, nil, nil, + framework.WithSnapshotSharedLister(cache.NewSnapshot(nil, nil)), + framework.WithInformerFactory(informerFactory), + ) + if err != nil { + t.Fatal(err) + } + pl, err := New(&tc.args, f) + if len(tc.wantErr) != 0 { + if err == nil || !strings.Contains(err.Error(), tc.wantErr) { + t.Errorf("must fail, got error %q, want %q", err, tc.wantErr) + } + return + } + if err != nil { + t.Fatal(err) + } + plObj := pl.(*PodTopologySpread) + if diff := cmp.Diff(tc.wantArgs, plObj.BuildArgs()); diff != "" { + t.Errorf("wrong plugin build args (-want,+got):\n%s", diff) + } + }) + } +} diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go b/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go index 929d2b077ba..ccdc048c4dc 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go @@ -49,19 +49,26 @@ func (s *preScoreState) Clone() framework.StateData { return s } -// initialize iterates "filteredNodes" to filter out the nodes which don't have required topologyKey(s), -// and initialize two maps: +// initPreScoreState iterates "filteredNodes" to filter out the nodes which +// don't have required topologyKey(s), and initialize two maps: // 1) s.TopologyPairToPodCounts: keyed with both eligible topology pair and node names. // 2) s.NodeNameSet: keyed with node name, and valued with a *int64 pointer for eligible node only. -func (s *preScoreState) initialize(pod *v1.Pod, filteredNodes []*v1.Node) error { - constraints, err := filterTopologySpreadConstraints(pod.Spec.TopologySpreadConstraints, v1.ScheduleAnyway) - if err != nil { - return err +func (pl *PodTopologySpread) initPreScoreState(s *preScoreState, pod *v1.Pod, filteredNodes []*v1.Node) error { + var err error + if len(pod.Spec.TopologySpreadConstraints) > 0 { + s.Constraints, err = filterTopologySpreadConstraints(pod.Spec.TopologySpreadConstraints, v1.ScheduleAnyway) + if err != nil { + return fmt.Errorf("obtaining pod's soft topology spread constraints: %v", err) + } + } else { + s.Constraints, err = pl.defaultConstraints(pod, v1.ScheduleAnyway) + if err != nil { + return fmt.Errorf("setting default soft topology spread constraints: %v", err) + } } - if constraints == nil { + if len(s.Constraints) == 0 { return nil } - s.Constraints = constraints for _, node := range filteredNodes { if !nodeLabelsMatchSpreadConstraints(node.Labels, s.Constraints) { continue @@ -100,13 +107,13 @@ func (pl *PodTopologySpread) PreScore( NodeNameSet: sets.String{}, TopologyPairToPodCounts: make(map[topologyPair]*int64), } - err = state.initialize(pod, filteredNodes) + err = pl.initPreScoreState(state, pod, filteredNodes) if err != nil { return framework.NewStatus(framework.Error, fmt.Sprintf("error when calculating preScoreState: %v", err)) } // return if incoming pod doesn't have soft topology spread Constraints. - if state.Constraints == nil { + if len(state.Constraints) == 0 { cycleState.Write(preScoreStateKey, state) return nil } diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go b/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go index 4cca9cde0e0..60583b7f021 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go @@ -18,40 +18,35 @@ package podtopologyspread import ( "context" - "reflect" "testing" "github.com/google/go-cmp/cmp" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "k8s.io/kubernetes/pkg/scheduler/internal/cache" st "k8s.io/kubernetes/pkg/scheduler/testing" "k8s.io/utils/pointer" ) -func (s preScoreState) Equal(o preScoreState) bool { - type internal preScoreState - return cmp.Equal(internal(s), internal(o), - cmp.Comparer(func(s1 labels.Selector, s2 labels.Selector) bool { - return reflect.DeepEqual(s1, s2) - }), - ) -} - func TestPreScoreStateEmptyNodes(t *testing.T) { tests := []struct { - name string - pod *v1.Pod - nodes []*v1.Node - want *preScoreState + name string + pod *v1.Pod + nodes []*v1.Node + objs []runtime.Object + defaultConstraints []v1.TopologySpreadConstraint + want *preScoreState }{ { name: "normal case", pod: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "zone", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). - SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "zone", v1.ScheduleAnyway, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.ScheduleAnyway, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -84,8 +79,8 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { { name: "node-x doesn't have label zone", pod: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "zone", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). - SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("bar").Obj()). + SpreadConstraint(1, "zone", v1.ScheduleAnyway, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.ScheduleAnyway, st.MakeLabelSelector().Exists("bar").Obj()). Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("zone", "zone1").Label("node", "node-a").Obj(), @@ -113,12 +108,98 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { }, }, }, + { + name: "defaults constraints and a replica set", + pod: st.MakePod().Name("p").Label("foo", "tar").Label("baz", "sup").Obj(), + defaultConstraints: []v1.TopologySpreadConstraint{ + {MaxSkew: 1, TopologyKey: "node", WhenUnsatisfiable: v1.ScheduleAnyway}, + {MaxSkew: 2, TopologyKey: "rack", WhenUnsatisfiable: v1.DoNotSchedule}, + {MaxSkew: 2, TopologyKey: "planet", WhenUnsatisfiable: v1.ScheduleAnyway}, + }, + nodes: []*v1.Node{ + st.MakeNode().Name("node-a").Label("rack", "rack1").Label("node", "node-a").Label("planet", "mars").Obj(), + }, + objs: []runtime.Object{ + &appsv1.ReplicaSet{Spec: appsv1.ReplicaSetSpec{Selector: st.MakeLabelSelector().Exists("foo").Obj()}}, + }, + want: &preScoreState{ + Constraints: []topologySpreadConstraint{ + { + MaxSkew: 1, + TopologyKey: "node", + Selector: mustConvertLabelSelectorAsSelector(t, st.MakeLabelSelector().Exists("foo").Obj()), + }, + { + MaxSkew: 2, + TopologyKey: "planet", + Selector: mustConvertLabelSelectorAsSelector(t, st.MakeLabelSelector().Exists("foo").Obj()), + }, + }, + NodeNameSet: sets.NewString("node-a"), + TopologyPairToPodCounts: map[topologyPair]*int64{ + {key: "node", value: "node-a"}: pointer.Int64Ptr(0), + {key: "planet", value: "mars"}: pointer.Int64Ptr(0), + }, + }, + }, + { + name: "defaults constraints and a replica set that doesn't match", + pod: st.MakePod().Name("p").Label("foo", "bar").Label("baz", "sup").Obj(), + defaultConstraints: []v1.TopologySpreadConstraint{ + {MaxSkew: 2, TopologyKey: "planet", WhenUnsatisfiable: v1.ScheduleAnyway}, + }, + nodes: []*v1.Node{ + st.MakeNode().Name("node-a").Label("planet", "mars").Obj(), + }, + objs: []runtime.Object{ + &appsv1.ReplicaSet{Spec: appsv1.ReplicaSetSpec{Selector: st.MakeLabelSelector().Exists("tar").Obj()}}, + }, + want: &preScoreState{ + TopologyPairToPodCounts: make(map[topologyPair]*int64), + }, + }, + { + name: "defaults constraints and a replica set, but pod has constraints", + pod: st.MakePod().Name("p").Label("foo", "bar").Label("baz", "sup"). + SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Label("foo", "bar").Obj()). + SpreadConstraint(2, "planet", v1.ScheduleAnyway, st.MakeLabelSelector().Label("baz", "sup").Obj()).Obj(), + defaultConstraints: []v1.TopologySpreadConstraint{ + {MaxSkew: 2, TopologyKey: "galaxy", WhenUnsatisfiable: v1.ScheduleAnyway}, + }, + nodes: []*v1.Node{ + st.MakeNode().Name("node-a").Label("planet", "mars").Label("galaxy", "andromeda").Obj(), + }, + objs: []runtime.Object{ + &appsv1.ReplicaSet{Spec: appsv1.ReplicaSetSpec{Selector: st.MakeLabelSelector().Exists("foo").Obj()}}, + }, + want: &preScoreState{ + Constraints: []topologySpreadConstraint{ + { + MaxSkew: 2, + TopologyKey: "planet", + Selector: mustConvertLabelSelectorAsSelector(t, st.MakeLabelSelector().Label("baz", "sup").Obj()), + }, + }, + NodeNameSet: sets.NewString("node-a"), + TopologyPairToPodCounts: map[topologyPair]*int64{ + {"planet", "mars"}: pointer.Int64Ptr(0), + }, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() + informerFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(tt.objs...), 0) pl := PodTopologySpread{ sharedLister: cache.NewSnapshot(nil, tt.nodes), + Args: Args{ + DefaultConstraints: tt.defaultConstraints, + }, } + pl.setListers(informerFactory) + informerFactory.Start(ctx.Done()) + informerFactory.WaitForCacheSync(ctx.Done()) cs := framework.NewCycleState() if s := pl.PreScore(context.Background(), cs, tt.pod, tt.nodes); !s.IsSuccess() { t.Fatal(s.AsError()) @@ -128,7 +209,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) { if err != nil { t.Fatal(err) } - if diff := cmp.Diff(tt.want, got); diff != "" { + if diff := cmp.Diff(tt.want, got, cmpOpts...); diff != "" { t.Errorf("PodTopologySpread#PreScore() returned (-want, +got):\n%s", diff) } }) @@ -155,7 +236,7 @@ func TestPodTopologySpreadScore(t *testing.T) { // if there is only one candidate node, it should be scored to 10 name: "one constraint on node, no existing pods", pod: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.ScheduleAnyway, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("node", "node-a").Obj(), @@ -170,7 +251,7 @@ func TestPodTopologySpreadScore(t *testing.T) { // if there is only one candidate node, it should be scored to 10 name: "one constraint on node, only one node is candidate", pod: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.ScheduleAnyway, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), existingPods: []*v1.Pod{ st.MakePod().Name("p-a1").Node("node-a").Label("foo", "").Obj(), @@ -190,7 +271,7 @@ func TestPodTopologySpreadScore(t *testing.T) { { name: "one constraint on node, all nodes have the same number of matching pods", pod: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.ScheduleAnyway, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), existingPods: []*v1.Pod{ st.MakePod().Name("p-a1").Node("node-a").Label("foo", "").Obj(), @@ -211,7 +292,7 @@ func TestPodTopologySpreadScore(t *testing.T) { // so scores = 400/6, 500/6, 600/6, 300/6 name: "one constraint on node, all 4 nodes are candidates", pod: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.ScheduleAnyway, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), existingPods: []*v1.Pod{ st.MakePod().Name("p-a1").Node("node-a").Label("foo", "").Obj(), @@ -241,7 +322,7 @@ func TestPodTopologySpreadScore(t *testing.T) { // so scores = 300/6, 500/6, 600/6 name: "one constraint on node, 3 out of 4 nodes are candidates", pod: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.ScheduleAnyway, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), existingPods: []*v1.Pod{ st.MakePod().Name("p-a1").Node("node-a").Label("foo", "").Obj(), @@ -275,7 +356,7 @@ func TestPodTopologySpreadScore(t *testing.T) { // so scores = 100/4, 0, 400/4 name: "one constraint on node, 3 out of 4 nodes are candidates", pod: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.ScheduleAnyway, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), existingPods: []*v1.Pod{ st.MakePod().Name("p-a1").Node("node-a").Label("foo", "").Obj(), @@ -309,7 +390,7 @@ func TestPodTopologySpreadScore(t *testing.T) { // so scores = 1000/12, 1000/12, 1200/12 name: "one constraint on zone, 3 out of 4 nodes are candidates", pod: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "zone", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "zone", v1.ScheduleAnyway, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), existingPods: []*v1.Pod{ st.MakePod().Name("p-a1").Node("node-a").Label("foo", "").Obj(), @@ -343,8 +424,8 @@ func TestPodTopologySpreadScore(t *testing.T) { // so scores = 800/8, 500/8 name: "two Constraints on zone and node, 2 out of 4 nodes are candidates", pod: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "zone", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). - SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "zone", v1.ScheduleAnyway, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.ScheduleAnyway, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), existingPods: []*v1.Pod{ st.MakePod().Name("p-a1").Node("node-a").Label("foo", "").Obj(), @@ -386,8 +467,8 @@ func TestPodTopologySpreadScore(t *testing.T) { // so scores = 600/7, 500/7, 700/7, 600/7 name: "two Constraints on zone and node, with different labelSelectors", pod: st.MakePod().Name("p").Label("foo", "").Label("bar", ""). - SpreadConstraint(1, "zone", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). - SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("bar").Obj()). + SpreadConstraint(1, "zone", v1.ScheduleAnyway, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.ScheduleAnyway, st.MakeLabelSelector().Exists("bar").Obj()). Obj(), existingPods: []*v1.Pod{ st.MakePod().Name("p-a1").Node("node-a").Label("foo", "").Obj(), @@ -417,8 +498,8 @@ func TestPodTopologySpreadScore(t *testing.T) { // so scores = 600/6, 500/6, 400/6, 300/6 name: "two Constraints on zone and node, with different labelSelectors, some nodes have 0 pods", pod: st.MakePod().Name("p").Label("foo", "").Label("bar", ""). - SpreadConstraint(1, "zone", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). - SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("bar").Obj()). + SpreadConstraint(1, "zone", v1.ScheduleAnyway, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.ScheduleAnyway, st.MakeLabelSelector().Exists("bar").Obj()). Obj(), existingPods: []*v1.Pod{ st.MakePod().Name("p-b1").Node("node-b").Label("bar", "").Obj(), @@ -447,8 +528,8 @@ func TestPodTopologySpreadScore(t *testing.T) { // so scores = 400/5, 300/5, 500/5 name: "two Constraints on zone and node, with different labelSelectors, 3 out of 4 nodes are candidates", pod: st.MakePod().Name("p").Label("foo", "").Label("bar", ""). - SpreadConstraint(1, "zone", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). - SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("bar").Obj()). + SpreadConstraint(1, "zone", v1.ScheduleAnyway, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.ScheduleAnyway, st.MakeLabelSelector().Exists("bar").Obj()). Obj(), existingPods: []*v1.Pod{ st.MakePod().Name("p-a1").Node("node-a").Label("foo", "").Obj(), @@ -473,7 +554,7 @@ func TestPodTopologySpreadScore(t *testing.T) { { name: "existing pods in a different namespace do not count", pod: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, "node", v1.ScheduleAnyway, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), existingPods: []*v1.Pod{ st.MakePod().Name("p-a1").Namespace("ns1").Node("node-a").Label("foo", "").Obj(), @@ -493,7 +574,7 @@ func TestPodTopologySpreadScore(t *testing.T) { { name: "terminating Pods should be excluded", pod: st.MakePod().Name("p").Label("foo", "").SpreadConstraint( - 1, "node", softSpread, st.MakeLabelSelector().Exists("foo").Obj(), + 1, "node", v1.ScheduleAnyway, st.MakeLabelSelector().Exists("foo").Obj(), ).Obj(), nodes: []*v1.Node{ st.MakeNode().Name("node-a").Label("node", "node-a").Obj(), @@ -536,7 +617,7 @@ func TestPodTopologySpreadScore(t *testing.T) { if !status.IsSuccess() { t.Errorf("unexpected error: %v", status) } - if diff := cmp.Diff(tt.want, gotList); diff != "" { + if diff := cmp.Diff(tt.want, gotList, cmpOpts...); diff != "" { t.Errorf("unexpected scores (-want,+got):\n%s", diff) } }) @@ -554,7 +635,7 @@ func BenchmarkTestPodTopologySpreadScore(b *testing.B) { { name: "1000nodes/single-constraint-zone", pod: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, v1.LabelZoneFailureDomain, softSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, v1.LabelZoneFailureDomain, v1.ScheduleAnyway, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), existingPodsNum: 10000, allNodesNum: 1000, @@ -563,7 +644,7 @@ func BenchmarkTestPodTopologySpreadScore(b *testing.B) { { name: "1000nodes/single-constraint-node", pod: st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, v1.LabelHostname, softSpread, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, v1.LabelHostname, v1.ScheduleAnyway, st.MakeLabelSelector().Exists("foo").Obj()). Obj(), existingPodsNum: 10000, allNodesNum: 1000, @@ -572,8 +653,8 @@ func BenchmarkTestPodTopologySpreadScore(b *testing.B) { { name: "1000nodes/two-Constraints-zone-node", pod: st.MakePod().Name("p").Label("foo", "").Label("bar", ""). - SpreadConstraint(1, v1.LabelZoneFailureDomain, softSpread, st.MakeLabelSelector().Exists("foo").Obj()). - SpreadConstraint(1, v1.LabelHostname, softSpread, st.MakeLabelSelector().Exists("bar").Obj()). + SpreadConstraint(1, v1.LabelZoneFailureDomain, v1.ScheduleAnyway, st.MakeLabelSelector().Exists("foo").Obj()). + SpreadConstraint(1, v1.LabelHostname, v1.ScheduleAnyway, st.MakeLabelSelector().Exists("bar").Obj()). Obj(), existingPodsNum: 10000, allNodesNum: 1000, @@ -613,8 +694,10 @@ func BenchmarkTestPodTopologySpreadScore(b *testing.B) { } } -// The tests in this file compare the performance of SelectorSpreadPriority -// against EvenPodsSpreadPriority with a similar rule. +// The following test allows to compare PodTopologySpread.Score with +// DefaultPodTopologySpread.Score by using a similar rule. +// See pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_perf_test.go +// for the equivalent test. var ( tests = []struct { @@ -638,31 +721,43 @@ var ( func BenchmarkTestDefaultEvenPodsSpreadPriority(b *testing.B) { for _, tt := range tests { b.Run(tt.name, func(b *testing.B) { - pod := st.MakePod().Name("p").Label("foo", ""). - SpreadConstraint(1, v1.LabelHostname, softSpread, st.MakeLabelSelector().Exists("foo").Obj()). - SpreadConstraint(1, v1.LabelZoneFailureDomain, softSpread, st.MakeLabelSelector().Exists("foo").Obj()).Obj() + pod := st.MakePod().Name("p").Label("foo", "").Obj() existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.allNodesNum) state := framework.NewCycleState() snapshot := cache.NewSnapshot(existingPods, allNodes) - p := &PodTopologySpread{sharedLister: snapshot} - - status := p.PreScore(context.Background(), state, pod, filteredNodes) - if !status.IsSuccess() { - b.Fatalf("unexpected error: %v", status) + p := &PodTopologySpread{ + sharedLister: snapshot, + Args: Args{ + DefaultConstraints: []v1.TopologySpreadConstraint{ + {MaxSkew: 1, TopologyKey: v1.LabelHostname, WhenUnsatisfiable: v1.ScheduleAnyway}, + {MaxSkew: 1, TopologyKey: v1.LabelZoneFailureDomain, WhenUnsatisfiable: v1.ScheduleAnyway}, + }, + }, } + client := fake.NewSimpleClientset( + &v1.Service{Spec: v1.ServiceSpec{Selector: map[string]string{"foo": ""}}}, + ) + ctx := context.Background() + informerFactory := informers.NewSharedInformerFactory(client, 0) + p.setListers(informerFactory) + informerFactory.Start(ctx.Done()) + informerFactory.WaitForCacheSync(ctx.Done()) + b.ResetTimer() for i := 0; i < b.N; i++ { var gotList framework.NodeScoreList + status := p.PreScore(ctx, state, pod, filteredNodes) + if !status.IsSuccess() { + b.Fatalf("unexpected error: %v", status) + } for _, n := range filteredNodes { - nodeName := n.Name - score, status := p.Score(context.Background(), state, pod, nodeName) + score, status := p.Score(context.Background(), state, pod, n.Name) if !status.IsSuccess() { b.Fatalf("unexpected error: %v", status) } - gotList = append(gotList, framework.NodeScore{Name: nodeName, Score: score}) + gotList = append(gotList, framework.NodeScore{Name: n.Name, Score: score}) } - status = p.NormalizeScore(context.Background(), state, pod, gotList) if !status.IsSuccess() { b.Fatal(status) diff --git a/pkg/scheduler/framework/plugins/serviceaffinity/BUILD b/pkg/scheduler/framework/plugins/serviceaffinity/BUILD index 31e0779f982..b2b8552cb95 100644 --- a/pkg/scheduler/framework/plugins/serviceaffinity/BUILD +++ b/pkg/scheduler/framework/plugins/serviceaffinity/BUILD @@ -6,6 +6,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity", visibility = ["//visibility:public"], deps = [ + "//pkg/scheduler/framework/plugins/helper:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", diff --git a/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity.go b/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity.go index 1edc43bf3fa..7c0aa4e43b1 100644 --- a/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity.go +++ b/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" "k8s.io/kubernetes/pkg/scheduler/nodeinfo" @@ -109,7 +110,7 @@ func (pl *ServiceAffinity) createPreFilterState(pod *v1.Pod) (*preFilterState, e return nil, fmt.Errorf("a pod is required to calculate service affinity preFilterState") } // Store services which match the pod. - matchingPodServices, err := schedulerlisters.GetPodServices(pl.serviceLister, pod) + matchingPodServices, err := helper.GetPodServices(pl.serviceLister, pod) if err != nil { return nil, fmt.Errorf("listing pod services: %v", err.Error()) } @@ -282,7 +283,7 @@ func (pl *ServiceAffinity) Score(ctx context.Context, state *framework.CycleStat // Pods matched namespace,selector on current node. var selector labels.Selector - if services, err := schedulerlisters.GetPodServices(pl.serviceLister, pod); err == nil && len(services) > 0 { + if services, err := helper.GetPodServices(pl.serviceLister, pod); err == nil && len(services) > 0 { selector = labels.SelectorFromSet(services[0].Spec.Selector) } else { selector = labels.NewSelector() diff --git a/pkg/scheduler/listers/BUILD b/pkg/scheduler/listers/BUILD index 698e5f8c91a..162c80d4a3c 100644 --- a/pkg/scheduler/listers/BUILD +++ b/pkg/scheduler/listers/BUILD @@ -1,4 +1,4 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", @@ -13,18 +13,6 @@ go_library( ], ) -go_test( - name = "go_default_test", - srcs = ["listers_test.go"], - embed = [":go_default_library"], - deps = [ - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/client-go/informers:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", - ], -) - filegroup( name = "package-srcs", srcs = glob(["**"]),