diff --git a/pkg/scheduler/apis/config/testing/compatibility_test.go b/pkg/scheduler/apis/config/testing/compatibility_test.go index 1ac540bbf6b..1b5ebbdafe5 100644 --- a/pkg/scheduler/apis/config/testing/compatibility_test.go +++ b/pkg/scheduler/apis/config/testing/compatibility_test.go @@ -149,7 +149,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { ), wantPrioritizers: sets.NewString( "EqualPriority", - "SelectorSpreadPriority", "TestServiceAntiAffinity", ), wantPlugins: map[string][]config.Plugin{ @@ -167,6 +166,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "NodeResourcesBalancedAllocation", Weight: 2}, {Name: "NodeResourcesLeastAllocated", Weight: 2}, {Name: "NodeLabel", Weight: 4}, + {Name: "DefaultPodTopologySpread", Weight: 2}, }, }, }, @@ -205,7 +205,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { ), wantPrioritizers: sets.NewString( "EqualPriority", - "SelectorSpreadPriority", "TestServiceAntiAffinity", ), wantPlugins: map[string][]config.Plugin{ @@ -229,6 +228,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "NodeResourcesLeastAllocated", Weight: 2}, {Name: "NodeAffinity", Weight: 2}, {Name: "NodeLabel", Weight: 4}, + {Name: "DefaultPodTopologySpread", Weight: 2}, }, }, }, @@ -270,7 +270,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { ), wantPrioritizers: sets.NewString( "EqualPriority", - "SelectorSpreadPriority", "InterPodAffinityPriority", ), wantPlugins: map[string][]config.Plugin{ @@ -294,6 +293,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "ImageLocality", Weight: 2}, {Name: "NodeResourcesLeastAllocated", Weight: 2}, {Name: "NodeAffinity", Weight: 2}, + {Name: "DefaultPodTopologySpread", Weight: 2}, {Name: "TaintToleration", Weight: 2}, }, }, @@ -338,7 +338,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { ), wantPrioritizers: sets.NewString( "EqualPriority", - "SelectorSpreadPriority", "InterPodAffinityPriority", ), wantPlugins: map[string][]config.Plugin{ @@ -364,6 +363,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "NodeResourcesMostAllocated", Weight: 2}, {Name: "NodeAffinity", Weight: 2}, {Name: "NodePreferAvoidPods", Weight: 2}, + {Name: "DefaultPodTopologySpread", Weight: 2}, {Name: "TaintToleration", Weight: 2}, }, }, @@ -417,7 +417,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { ), wantPrioritizers: sets.NewString( "EqualPriority", - "SelectorSpreadPriority", "InterPodAffinityPriority", ), wantPlugins: map[string][]config.Plugin{ @@ -443,6 +442,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "NodeResourcesMostAllocated", Weight: 2}, {Name: "NodeAffinity", Weight: 2}, {Name: "NodePreferAvoidPods", Weight: 2}, + {Name: "DefaultPodTopologySpread", Weight: 2}, {Name: "TaintToleration", Weight: 2}, }, }, @@ -507,7 +507,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { ), wantPrioritizers: sets.NewString( "EqualPriority", - "SelectorSpreadPriority", "InterPodAffinityPriority", ), wantPlugins: map[string][]config.Plugin{ @@ -533,6 +532,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "NodeResourcesMostAllocated", Weight: 2}, {Name: "NodeAffinity", Weight: 2}, {Name: "NodePreferAvoidPods", Weight: 2}, + {Name: "DefaultPodTopologySpread", Weight: 2}, {Name: "TaintToleration", Weight: 2}, }, }, @@ -598,7 +598,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { ), wantPrioritizers: sets.NewString( "EqualPriority", - "SelectorSpreadPriority", "InterPodAffinityPriority", ), wantPlugins: map[string][]config.Plugin{ @@ -625,6 +624,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "NodeResourcesMostAllocated", Weight: 2}, {Name: "NodeAffinity", Weight: 2}, {Name: "NodePreferAvoidPods", Weight: 2}, + {Name: "DefaultPodTopologySpread", Weight: 2}, {Name: "TaintToleration", Weight: 2}, }, }, @@ -693,7 +693,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { ), wantPrioritizers: sets.NewString( "EqualPriority", - "SelectorSpreadPriority", "InterPodAffinityPriority", ), wantPlugins: map[string][]config.Plugin{ @@ -720,6 +719,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "NodeResourcesMostAllocated", Weight: 2}, {Name: "NodeAffinity", Weight: 2}, {Name: "NodePreferAvoidPods", Weight: 2}, + {Name: "DefaultPodTopologySpread", Weight: 2}, {Name: "TaintToleration", Weight: 2}, }, }, @@ -800,7 +800,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { ), wantPrioritizers: sets.NewString( "EqualPriority", - "SelectorSpreadPriority", "InterPodAffinityPriority", ), wantPlugins: map[string][]config.Plugin{ @@ -828,6 +827,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "NodeAffinity", Weight: 2}, {Name: "NodePreferAvoidPods", Weight: 2}, {Name: "RequestedToCapacityRatio", Weight: 2}, + {Name: "DefaultPodTopologySpread", Weight: 2}, {Name: "TaintToleration", Weight: 2}, }, }, @@ -909,7 +909,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { ), wantPrioritizers: sets.NewString( "EqualPriority", - "SelectorSpreadPriority", "InterPodAffinityPriority", ), wantPlugins: map[string][]config.Plugin{ @@ -938,6 +937,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "NodeAffinity", Weight: 2}, {Name: "NodePreferAvoidPods", Weight: 2}, {Name: "RequestedToCapacityRatio", Weight: 2}, + {Name: "DefaultPodTopologySpread", Weight: 2}, {Name: "TaintToleration", Weight: 2}, }, }, @@ -1018,7 +1018,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { ), wantPrioritizers: sets.NewString( "EqualPriority", - "SelectorSpreadPriority", "InterPodAffinityPriority", ), wantPlugins: map[string][]config.Plugin{ @@ -1048,6 +1047,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "NodeAffinity", Weight: 2}, {Name: "NodePreferAvoidPods", Weight: 2}, {Name: "RequestedToCapacityRatio", Weight: 2}, + {Name: "DefaultPodTopologySpread", Weight: 2}, {Name: "TaintToleration", Weight: 2}, }, }, @@ -1132,7 +1132,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { ), wantPrioritizers: sets.NewString( "EqualPriority", - "SelectorSpreadPriority", "InterPodAffinityPriority", ), wantPlugins: map[string][]config.Plugin{ @@ -1162,6 +1161,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "NodeAffinity", Weight: 2}, {Name: "NodePreferAvoidPods", Weight: 2}, {Name: "RequestedToCapacityRatio", Weight: 2}, + {Name: "DefaultPodTopologySpread", Weight: 2}, {Name: "TaintToleration", Weight: 2}, }, }, @@ -1255,6 +1255,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { "PodTopologySpread": "EvenPodsSpread", } scoreToPriorityMap := map[string]string{ + "DefaultPodTopologySpread": "SelectorSpreadPriority", "ImageLocality": "ImageLocalityPriority", "NodeAffinity": "NodeAffinityPriority", "NodePreferAvoidPods": "NodePreferAvoidPodsPriority", diff --git a/pkg/scheduler/framework/plugins/BUILD b/pkg/scheduler/framework/plugins/BUILD index 457637ad657..2a61f860b34 100644 --- a/pkg/scheduler/framework/plugins/BUILD +++ b/pkg/scheduler/framework/plugins/BUILD @@ -9,6 +9,7 @@ go_library( "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/priorities:go_default_library", "//pkg/scheduler/apis/config:go_default_library", + "//pkg/scheduler/framework/plugins/defaultpodtopologyspread:go_default_library", "//pkg/scheduler/framework/plugins/imagelocality:go_default_library", "//pkg/scheduler/framework/plugins/interpodaffinity:go_default_library", "//pkg/scheduler/framework/plugins/nodeaffinity:go_default_library", @@ -43,6 +44,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//pkg/scheduler/framework/plugins/defaultpodtopologyspread:all-srcs", "//pkg/scheduler/framework/plugins/examples:all-srcs", "//pkg/scheduler/framework/plugins/imagelocality:all-srcs", "//pkg/scheduler/framework/plugins/interpodaffinity:all-srcs", diff --git a/pkg/scheduler/framework/plugins/default_registry.go b/pkg/scheduler/framework/plugins/default_registry.go index 8a470cb125e..a25a91261bf 100644 --- a/pkg/scheduler/framework/plugins/default_registry.go +++ b/pkg/scheduler/framework/plugins/default_registry.go @@ -25,6 +25,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" "k8s.io/kubernetes/pkg/scheduler/apis/config" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity" @@ -55,6 +56,7 @@ type RegistryArgs struct { // plugins can register additional plugins through the WithFrameworkOutOfTreeRegistry option. func NewDefaultRegistry(args *RegistryArgs) framework.Registry { return framework.Registry{ + defaultpodtopologyspread.Name: defaultpodtopologyspread.New, imagelocality.Name: imagelocality.New, tainttoleration.Name: tainttoleration.New, nodename.Name: nodename.New, @@ -209,6 +211,11 @@ func NewDefaultConfigProducerRegistry() *ConfigProducerRegistry { }) // Register Priorities. + registry.RegisterPriority(priorities.SelectorSpreadPriority, + func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Score = appendToPluginSet(plugins.Score, defaultpodtopologyspread.Name, &args.Weight) + return + }) registry.RegisterPriority(priorities.TaintTolerationPriority, func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { plugins.Score = appendToPluginSet(plugins.Score, tainttoleration.Name, &args.Weight) diff --git a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD new file mode 100644 index 00000000000..630262587a5 --- /dev/null +++ b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD @@ -0,0 +1,45 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["default_pod_topology_spread.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/algorithm/priorities:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["default_pod_topology_spread_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/algorithm/priorities:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/listers/fake:go_default_library", + "//pkg/scheduler/nodeinfo/snapshot:go_default_library", + "//staging/src/k8s.io/api/apps/v1:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread.go b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread.go new file mode 100644 index 00000000000..1b0945dbe55 --- /dev/null +++ b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread.go @@ -0,0 +1,88 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package defaultpodtopologyspread + +import ( + "context" + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" +) + +// DefaultPodTopologySpread is a plugin that calculates selector spread priority. +type DefaultPodTopologySpread struct { + handle framework.FrameworkHandle + calculateSpreadPriorityMap priorities.PriorityMapFunction + calculateSpreadPriorityReduce priorities.PriorityReduceFunction +} + +var _ framework.ScorePlugin = &DefaultPodTopologySpread{} + +// Name is the name of the plugin used in the plugin registry and configurations. +const Name = "DefaultPodTopologySpread" + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *DefaultPodTopologySpread) Name() string { + return Name +} + +// Score invoked at the Score extension point. +// The "score" returned in this function is the matching number of pods on the `nodeName`, +// it is normalized later. +func (pl *DefaultPodTopologySpread) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) { + nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(nodeName) + if err != nil { + return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err)) + } + + meta := migration.PriorityMetadata(state) + s, err := pl.calculateSpreadPriorityMap(pod, meta, nodeInfo) + return s.Score, migration.ErrorToFrameworkStatus(err) +} + +// NormalizeScore invoked after scoring all nodes. +func (pl *DefaultPodTopologySpread) NormalizeScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status { + meta := migration.PriorityMetadata(state) + err := pl.calculateSpreadPriorityReduce(pod, meta, pl.handle.SnapshotSharedLister(), scores) + return migration.ErrorToFrameworkStatus(err) +} + +// ScoreExtensions of the Score plugin. +func (pl *DefaultPodTopologySpread) ScoreExtensions() framework.ScoreExtensions { + return pl +} + +// New initializes a new plugin and returns it. +func New(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) { + informerFactory := handle.SharedInformerFactory() + calculateSpreadPriorityMap, calculateSpreadPriorityReduce := priorities.NewSelectorSpreadPriority( + informerFactory.Core().V1().Services().Lister(), + informerFactory.Core().V1().ReplicationControllers().Lister(), + informerFactory.Apps().V1().ReplicaSets().Lister(), + informerFactory.Apps().V1().StatefulSets().Lister(), + ) + + return &DefaultPodTopologySpread{ + handle: handle, + calculateSpreadPriorityMap: calculateSpreadPriorityMap, + calculateSpreadPriorityReduce: calculateSpreadPriorityReduce, + }, nil +} diff --git a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_test.go b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_test.go new file mode 100644 index 00000000000..cc786f4864d --- /dev/null +++ b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_test.go @@ -0,0 +1,673 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package defaultpodtopologyspread + +import ( + "context" + "reflect" + "sort" + "testing" + + apps "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake" + nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" +) + +func controllerRef(kind, name, uid string) []metav1.OwnerReference { + // TODO: When ControllerRef will be implemented uncomment code below. + return nil + //trueVar := true + //return []metav1.OwnerReference{ + // {Kind: kind, Name: name, UID: types.UID(uid), Controller: &trueVar}, + //} +} + +func TestDefaultPodTopologySpreadScore(t *testing.T) { + labels1 := map[string]string{ + "foo": "bar", + "baz": "blah", + } + labels2 := map[string]string{ + "bar": "foo", + "baz": "blah", + } + zone1Spec := v1.PodSpec{ + NodeName: "machine1", + } + zone2Spec := v1.PodSpec{ + NodeName: "machine2", + } + tests := []struct { + pod *v1.Pod + pods []*v1.Pod + nodes []string + rcs []*v1.ReplicationController + rss []*apps.ReplicaSet + services []*v1.Service + sss []*apps.StatefulSet + expectedList framework.NodeScoreList + name string + }{ + { + pod: new(v1.Pod), + nodes: []string{"machine1", "machine2"}, + expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}}, + name: "nothing scheduled", + }, + { + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{{Spec: zone1Spec}}, + nodes: []string{"machine1", "machine2"}, + expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}}, + name: "no services", + }, + { + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}}, + nodes: []string{"machine1", "machine2"}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}}, + expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}}, + name: "different services", + }, + { + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, + }, + nodes: []string{"machine1", "machine2"}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, + expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}}, + name: "two pods, one service pod", + }, + { + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, + }, + nodes: []string{"machine1", "machine2"}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, + expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}}, + name: "five pods, one service pod in no namespace", + }, + { + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, + }, + nodes: []string{"machine1", "machine2"}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault}}}, + expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}}, + name: "four pods, one service pod in default namespace", + }, + { + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns2"}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, + }, + nodes: []string{"machine1", "machine2"}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: "ns1"}}}, + expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}}, + name: "five pods, one service pod in specific namespace", + }, + { + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, + }, + nodes: []string{"machine1", "machine2"}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, + expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}}, + name: "three pods, two service pods on different machines", + }, + { + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, + }, + nodes: []string{"machine1", "machine2"}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, + expectedList: []framework.NodeScore{{Name: "machine1", Score: 50}, {Name: "machine2", Score: 0}}, + name: "four pods, three service pods", + }, + { + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, + }, + nodes: []string{"machine1", "machine2"}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, + expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 50}}, + name: "service with partial pod label matches", + }, + { + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + }, + nodes: []string{"machine1", "machine2"}, + rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, + // "baz=blah" matches both labels1 and labels2, and "foo=bar" matches only labels 1. This means that we assume that we want to + // do spreading pod2 and pod3 and not pod1. + expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}}, + name: "service with partial pod label matches with service and replication controller", + }, + { + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + }, + nodes: []string{"machine1", "machine2"}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, + rss: []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, + // We use ReplicaSet, instead of ReplicationController. The result should be exactly as above. + expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}}, + name: "service with partial pod label matches with service and replica set", + }, + { + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, + }, + nodes: []string{"machine1", "machine2"}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, + sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, + expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}}, + name: "service with partial pod label matches with service and stateful set", + }, + { + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + }, + nodes: []string{"machine1", "machine2"}, + rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}}, + // Taken together Service and Replication Controller should match no pods. + expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}}, + name: "disjoined service and replication controller matches no pods", + }, + { + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + }, + nodes: []string{"machine1", "machine2"}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}}, + rss: []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, + // We use ReplicaSet, instead of ReplicationController. The result should be exactly as above. + expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}}, + name: "disjoined service and replica set matches no pods", + }, + { + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, + }, + nodes: []string{"machine1", "machine2"}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}}, + sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, + expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}}, + name: "disjoined service and stateful set matches no pods", + }, + { + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + }, + nodes: []string{"machine1", "machine2"}, + rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}}, + // Both Nodes have one pod from the given RC, hence both get 0 score. + expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}}, + name: "Replication controller with partial pod label matches", + }, + { + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + }, + nodes: []string{"machine1", "machine2"}, + rss: []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, + // We use ReplicaSet, instead of ReplicationController. The result should be exactly as above. + expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}}, + name: "Replica set with partial pod label matches", + }, + { + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, + }, + nodes: []string{"machine1", "machine2"}, + sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, + // We use StatefulSet, instead of ReplicationController. The result should be exactly as above. + expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}}, + name: "StatefulSet with partial pod label matches", + }, + { + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + }, + nodes: []string{"machine1", "machine2"}, + rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}}, + expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 50}}, + name: "Another replication controller with partial pod label matches", + }, + { + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + }, + nodes: []string{"machine1", "machine2"}, + rss: []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}}, + // We use ReplicaSet, instead of ReplicationController. The result should be exactly as above. + expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 50}}, + name: "Another replication set with partial pod label matches", + }, + { + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, + {Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, + {Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, + }, + nodes: []string{"machine1", "machine2"}, + sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}}, + // We use StatefulSet, instead of ReplicationController. The result should be exactly as above. + expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 50}}, + name: "Another stateful set with partial pod label matches", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + nodes := makeNodeList(test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(test.pods, nodes) + fh, _ := framework.NewFramework(nil, nil, nil, framework.WithNodeInfoSnapshot(snapshot)) + + mapFunction, reduceFunction := priorities.NewSelectorSpreadPriority( + fakelisters.ServiceLister(test.services), + fakelisters.ControllerLister(test.rcs), + fakelisters.ReplicaSetLister(test.rss), + fakelisters.StatefulSetLister(test.sss), + ) + + metaDataProducer := priorities.NewPriorityMetadataFactory( + fakelisters.ServiceLister(test.services), + fakelisters.ControllerLister(test.rcs), + fakelisters.ReplicaSetLister(test.rss), + fakelisters.StatefulSetLister(test.sss)) + metaData := metaDataProducer(test.pod, nodes, snapshot) + + state := framework.NewCycleState() + state.Write(migration.PrioritiesStateKey, &migration.PrioritiesStateData{Reference: metaData}) + + plugin := &DefaultPodTopologySpread{ + handle: fh, + calculateSpreadPriorityMap: mapFunction, + calculateSpreadPriorityReduce: reduceFunction, + } + + var gotList framework.NodeScoreList + for _, nodeName := range test.nodes { + score, status := plugin.Score(context.Background(), state, test.pod, nodeName) + if !status.IsSuccess() { + t.Errorf("unexpected error: %v", status) + } + gotList = append(gotList, framework.NodeScore{Name: nodeName, Score: score}) + } + + status := plugin.ScoreExtensions().NormalizeScore(context.Background(), state, test.pod, gotList) + if !status.IsSuccess() { + t.Errorf("unexpected error: %v", status) + } + + if !reflect.DeepEqual(test.expectedList, gotList) { + t.Errorf("expected:\n\t%+v,\ngot:\n\t%+v", test.expectedList, gotList) + } + }) + } +} + +func buildPod(nodeName string, labels map[string]string, ownerRefs []metav1.OwnerReference) *v1.Pod { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Labels: labels, OwnerReferences: ownerRefs}, + Spec: v1.PodSpec{NodeName: nodeName}, + } +} + +func TestZoneSelectorSpreadPriority(t *testing.T) { + labels1 := map[string]string{ + "label1": "l1", + "baz": "blah", + } + labels2 := map[string]string{ + "label2": "l2", + "baz": "blah", + } + + const nodeMachine1Zone1 = "machine1.zone1" + const nodeMachine1Zone2 = "machine1.zone2" + const nodeMachine2Zone2 = "machine2.zone2" + const nodeMachine1Zone3 = "machine1.zone3" + const nodeMachine2Zone3 = "machine2.zone3" + const nodeMachine3Zone3 = "machine3.zone3" + + buildNodeLabels := func(failureDomain string) map[string]string { + labels := map[string]string{ + v1.LabelZoneFailureDomain: failureDomain, + } + return labels + } + labeledNodes := map[string]map[string]string{ + nodeMachine1Zone1: buildNodeLabels("zone1"), + nodeMachine1Zone2: buildNodeLabels("zone2"), + nodeMachine2Zone2: buildNodeLabels("zone2"), + nodeMachine1Zone3: buildNodeLabels("zone3"), + nodeMachine2Zone3: buildNodeLabels("zone3"), + nodeMachine3Zone3: buildNodeLabels("zone3"), + } + + tests := []struct { + pod *v1.Pod + pods []*v1.Pod + rcs []*v1.ReplicationController + rss []*apps.ReplicaSet + services []*v1.Service + sss []*apps.StatefulSet + expectedList framework.NodeScoreList + name string + }{ + { + pod: new(v1.Pod), + expectedList: []framework.NodeScore{ + {Name: nodeMachine1Zone1, Score: framework.MaxNodeScore}, + {Name: nodeMachine1Zone2, Score: framework.MaxNodeScore}, + {Name: nodeMachine2Zone2, Score: framework.MaxNodeScore}, + {Name: nodeMachine1Zone3, Score: framework.MaxNodeScore}, + {Name: nodeMachine2Zone3, Score: framework.MaxNodeScore}, + {Name: nodeMachine3Zone3, Score: framework.MaxNodeScore}, + }, + name: "nothing scheduled", + }, + { + pod: buildPod("", labels1, nil), + pods: []*v1.Pod{buildPod(nodeMachine1Zone1, nil, nil)}, + expectedList: []framework.NodeScore{ + {Name: nodeMachine1Zone1, Score: framework.MaxNodeScore}, + {Name: nodeMachine1Zone2, Score: framework.MaxNodeScore}, + {Name: nodeMachine2Zone2, Score: framework.MaxNodeScore}, + {Name: nodeMachine1Zone3, Score: framework.MaxNodeScore}, + {Name: nodeMachine2Zone3, Score: framework.MaxNodeScore}, + {Name: nodeMachine3Zone3, Score: framework.MaxNodeScore}, + }, + name: "no services", + }, + { + pod: buildPod("", labels1, nil), + pods: []*v1.Pod{buildPod(nodeMachine1Zone1, labels2, nil)}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}}, + expectedList: []framework.NodeScore{ + {Name: nodeMachine1Zone1, Score: framework.MaxNodeScore}, + {Name: nodeMachine1Zone2, Score: framework.MaxNodeScore}, + {Name: nodeMachine2Zone2, Score: framework.MaxNodeScore}, + {Name: nodeMachine1Zone3, Score: framework.MaxNodeScore}, + {Name: nodeMachine2Zone3, Score: framework.MaxNodeScore}, + {Name: nodeMachine3Zone3, Score: framework.MaxNodeScore}, + }, + name: "different services", + }, + { + pod: buildPod("", labels1, nil), + pods: []*v1.Pod{ + buildPod(nodeMachine1Zone1, labels2, nil), + buildPod(nodeMachine1Zone2, labels2, nil), + }, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, + expectedList: []framework.NodeScore{ + {Name: nodeMachine1Zone1, Score: framework.MaxNodeScore}, + {Name: nodeMachine1Zone2, Score: framework.MaxNodeScore}, + {Name: nodeMachine2Zone2, Score: framework.MaxNodeScore}, + {Name: nodeMachine1Zone3, Score: framework.MaxNodeScore}, + {Name: nodeMachine2Zone3, Score: framework.MaxNodeScore}, + {Name: nodeMachine3Zone3, Score: framework.MaxNodeScore}, + }, + name: "two pods, 0 matching", + }, + { + pod: buildPod("", labels1, nil), + pods: []*v1.Pod{ + buildPod(nodeMachine1Zone1, labels2, nil), + buildPod(nodeMachine1Zone2, labels1, nil), + }, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, + expectedList: []framework.NodeScore{ + {Name: nodeMachine1Zone1, Score: framework.MaxNodeScore}, + {Name: nodeMachine1Zone2, Score: 0}, // Already have pod on machine + {Name: nodeMachine2Zone2, Score: 33}, // Already have pod in zone + {Name: nodeMachine1Zone3, Score: framework.MaxNodeScore}, + {Name: nodeMachine2Zone3, Score: framework.MaxNodeScore}, + {Name: nodeMachine3Zone3, Score: framework.MaxNodeScore}, + }, + name: "two pods, 1 matching (in z2)", + }, + { + pod: buildPod("", labels1, nil), + pods: []*v1.Pod{ + buildPod(nodeMachine1Zone1, labels2, nil), + buildPod(nodeMachine1Zone2, labels1, nil), + buildPod(nodeMachine2Zone2, labels1, nil), + buildPod(nodeMachine1Zone3, labels2, nil), + buildPod(nodeMachine2Zone3, labels1, nil), + }, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, + expectedList: []framework.NodeScore{ + {Name: nodeMachine1Zone1, Score: framework.MaxNodeScore}, + {Name: nodeMachine1Zone2, Score: 0}, // Pod on node + {Name: nodeMachine2Zone2, Score: 0}, // Pod on node + {Name: nodeMachine1Zone3, Score: 66}, // Pod in zone + {Name: nodeMachine2Zone3, Score: 33}, // Pod on node + {Name: nodeMachine3Zone3, Score: 66}, // Pod in zone + }, + name: "five pods, 3 matching (z2=2, z3=1)", + }, + { + pod: buildPod("", labels1, nil), + pods: []*v1.Pod{ + buildPod(nodeMachine1Zone1, labels1, nil), + buildPod(nodeMachine1Zone2, labels1, nil), + buildPod(nodeMachine2Zone2, labels2, nil), + buildPod(nodeMachine1Zone3, labels1, nil), + }, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, + expectedList: []framework.NodeScore{ + {Name: nodeMachine1Zone1, Score: 0}, // Pod on node + {Name: nodeMachine1Zone2, Score: 0}, // Pod on node + {Name: nodeMachine2Zone2, Score: 33}, // Pod in zone + {Name: nodeMachine1Zone3, Score: 0}, // Pod on node + {Name: nodeMachine2Zone3, Score: 33}, // Pod in zone + {Name: nodeMachine3Zone3, Score: 33}, // Pod in zone + }, + name: "four pods, 3 matching (z1=1, z2=1, z3=1)", + }, + { + pod: buildPod("", labels1, nil), + pods: []*v1.Pod{ + buildPod(nodeMachine1Zone1, labels1, nil), + buildPod(nodeMachine1Zone2, labels1, nil), + buildPod(nodeMachine1Zone3, labels1, nil), + buildPod(nodeMachine2Zone2, labels2, nil), + }, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, + expectedList: []framework.NodeScore{ + {Name: nodeMachine1Zone1, Score: 0}, // Pod on node + {Name: nodeMachine1Zone2, Score: 0}, // Pod on node + {Name: nodeMachine2Zone2, Score: 33}, // Pod in zone + {Name: nodeMachine1Zone3, Score: 0}, // Pod on node + {Name: nodeMachine2Zone3, Score: 33}, // Pod in zone + {Name: nodeMachine3Zone3, Score: 33}, // Pod in zone + }, + name: "four pods, 3 matching (z1=1, z2=1, z3=1)", + }, + { + pod: buildPod("", labels1, controllerRef("ReplicationController", "name", "abc123")), + pods: []*v1.Pod{ + buildPod(nodeMachine1Zone3, labels1, controllerRef("ReplicationController", "name", "abc123")), + buildPod(nodeMachine1Zone2, labels1, controllerRef("ReplicationController", "name", "abc123")), + buildPod(nodeMachine1Zone3, labels1, controllerRef("ReplicationController", "name", "abc123")), + }, + rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: labels1}}}, + expectedList: []framework.NodeScore{ + // Note that because we put two pods on the same node (nodeMachine1Zone3), + // the values here are questionable for zone2, in particular for nodeMachine1Zone2. + // However they kind of make sense; zone1 is still most-highly favored. + // zone3 is in general least favored, and m1.z3 particularly low priority. + // We would probably prefer to see a bigger gap between putting a second + // pod on m1.z2 and putting a pod on m2.z2, but the ordering is correct. + // This is also consistent with what we have already. + {Name: nodeMachine1Zone1, Score: framework.MaxNodeScore}, // No pods in zone + {Name: nodeMachine1Zone2, Score: 50}, // Pod on node + {Name: nodeMachine2Zone2, Score: 66}, // Pod in zone + {Name: nodeMachine1Zone3, Score: 0}, // Two pods on node + {Name: nodeMachine2Zone3, Score: 33}, // Pod in zone + {Name: nodeMachine3Zone3, Score: 33}, // Pod in zone + }, + name: "Replication controller spreading (z1=0, z2=1, z3=2)", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + nodes := makeLabeledNodeList(labeledNodes) + snapshot := nodeinfosnapshot.NewSnapshot(test.pods, nodes) + fh, _ := framework.NewFramework(nil, nil, nil, framework.WithNodeInfoSnapshot(snapshot)) + + mapFunction, reduceFunction := priorities.NewSelectorSpreadPriority( + fakelisters.ServiceLister(test.services), + fakelisters.ControllerLister(test.rcs), + fakelisters.ReplicaSetLister(test.rss), + fakelisters.StatefulSetLister(test.sss), + ) + metaDataProducer := priorities.NewPriorityMetadataFactory( + fakelisters.ServiceLister(test.services), + fakelisters.ControllerLister(test.rcs), + fakelisters.ReplicaSetLister(test.rss), + fakelisters.StatefulSetLister(test.sss)) + metaData := metaDataProducer(test.pod, nodes, snapshot) + + plugin := &DefaultPodTopologySpread{ + handle: fh, + calculateSpreadPriorityMap: mapFunction, + calculateSpreadPriorityReduce: reduceFunction, + } + + state := framework.NewCycleState() + state.Write(migration.PrioritiesStateKey, &migration.PrioritiesStateData{Reference: metaData}) + var gotList framework.NodeScoreList + for _, n := range nodes { + nodeName := n.ObjectMeta.Name + score, status := plugin.Score(context.Background(), state, test.pod, nodeName) + if !status.IsSuccess() { + t.Errorf("unexpected error: %v", status) + } + gotList = append(gotList, framework.NodeScore{Name: nodeName, Score: score}) + } + + status := plugin.ScoreExtensions().NormalizeScore(context.Background(), state, test.pod, gotList) + if !status.IsSuccess() { + t.Errorf("unexpected error: %v", status) + } + + sortNodeScoreList(test.expectedList) + sortNodeScoreList(gotList) + if !reflect.DeepEqual(test.expectedList, gotList) { + t.Errorf("expected:\n\t%+v,\ngot:\n\t%+v", test.expectedList, gotList) + } + }) + } +} + +func makeLabeledNodeList(nodeMap map[string]map[string]string) []*v1.Node { + nodes := make([]*v1.Node, 0, len(nodeMap)) + for nodeName, labels := range nodeMap { + nodes = append(nodes, &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName, Labels: labels}}) + } + return nodes +} + +func makeNodeList(nodeNames []string) []*v1.Node { + nodes := make([]*v1.Node, 0, len(nodeNames)) + for _, nodeName := range nodeNames { + nodes = append(nodes, &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}}) + } + return nodes +} + +func sortNodeScoreList(out framework.NodeScoreList) { + sort.Slice(out, func(i, j int) bool { + if out[i].Score == out[j].Score { + return out[i].Name < out[j].Name + } + return out[i].Score < out[j].Score + }) +} diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go index 343f60ab2b3..0a23ae3aa8b 100644 --- a/test/integration/scheduler/scheduler_test.go +++ b/test/integration/scheduler/scheduler_test.go @@ -132,7 +132,6 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) { }`, expectedPrioritizers: sets.NewString( "InterPodAffinityPriority", - "SelectorSpreadPriority", ), expectedPlugins: map[string][]kubeschedulerconfig.Plugin{ "FilterPlugin": { @@ -157,6 +156,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) { {Name: "NodeResourcesLeastAllocated", Weight: 1}, {Name: "NodeAffinity", Weight: 1}, {Name: "NodePreferAvoidPods", Weight: 10000}, + {Name: "DefaultPodTopologySpread", Weight: 1}, {Name: "TaintToleration", Weight: 1}, }, }, @@ -210,7 +210,6 @@ kind: Policy `, expectedPrioritizers: sets.NewString( "InterPodAffinityPriority", - "SelectorSpreadPriority", ), expectedPlugins: map[string][]kubeschedulerconfig.Plugin{ "FilterPlugin": { @@ -235,6 +234,7 @@ kind: Policy {Name: "NodeResourcesLeastAllocated", Weight: 1}, {Name: "NodeAffinity", Weight: 1}, {Name: "NodePreferAvoidPods", Weight: 10000}, + {Name: "DefaultPodTopologySpread", Weight: 1}, {Name: "TaintToleration", Weight: 1}, }, },