Merge pull request #117720 from kerthcet/feat/remove-selector-spread

Remove deprecated selectorSpread
This commit is contained in:
Kubernetes Prow Robot 2023-08-29 00:25:22 -07:00 committed by GitHub
commit cd91351dff
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 4 additions and 1201 deletions

View File

@ -142,7 +142,7 @@ type invalidPlugins struct {
var invalidPluginsByVersion = []invalidPlugins{
{
schemeGroupVersion: v1.SchemeGroupVersion.String(),
plugins: []string{"SelectorSpread"},
plugins: []string{},
},
}

View File

@ -195,9 +195,6 @@ func TestValidateKubeSchedulerConfigurationV1(t *testing.T) {
validPlugins := validConfig.DeepCopy()
validPlugins.Profiles[0].Plugins.Score.Enabled = append(validPlugins.Profiles[0].Plugins.Score.Enabled, config.Plugin{Name: "PodTopologySpread", Weight: 2})
invalidPlugins := validConfig.DeepCopy()
invalidPlugins.Profiles[0].Plugins.Score.Enabled = append(invalidPlugins.Profiles[0].Plugins.Score.Enabled, config.Plugin{Name: "SelectorSpread"})
scenarios := map[string]struct {
config *config.KubeSchedulerConfiguration
wantErrs field.ErrorList
@ -394,15 +391,6 @@ func TestValidateKubeSchedulerConfigurationV1(t *testing.T) {
},
},
},
"invalid-plugins": {
config: invalidPlugins,
wantErrs: field.ErrorList{
&field.Error{
Type: field.ErrorTypeInvalid,
Field: "profiles[0].plugins.score.enabled[0]",
},
},
},
"valid-plugins": {
config: validPlugins,
},

View File

@ -36,7 +36,6 @@ const (
GCEPDLimits = "GCEPDLimits"
PodTopologySpread = "PodTopologySpread"
SchedulingGates = "SchedulingGates"
SelectorSpread = "SelectorSpread"
TaintToleration = "TaintToleration"
VolumeBinding = "VolumeBinding"
VolumeRestrictions = "VolumeRestrictions"

View File

@ -1442,83 +1442,3 @@ func BenchmarkTestPodTopologySpreadScore(b *testing.B) {
})
}
}
// The following test allows to compare PodTopologySpread.Score with
// SelectorSpread.Score by using a similar rule.
// See pkg/scheduler/framework/plugins/selectorspread/selector_spread_perf_test.go
// for the equivalent test.
var (
tests = []struct {
name string
existingPodsNum int
allNodesNum int
}{
{
name: "100nodes",
existingPodsNum: 1000,
allNodesNum: 100,
},
{
name: "1000nodes",
existingPodsNum: 10000,
allNodesNum: 1000,
},
{
name: "5000nodes",
existingPodsNum: 50000,
allNodesNum: 5000,
},
}
)
func BenchmarkTestDefaultEvenPodsSpreadPriority(b *testing.B) {
for _, tt := range tests {
b.Run(tt.name, func(b *testing.B) {
pod := st.MakePod().Name("p").Label("foo", "").Obj()
existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.allNodesNum)
state := framework.NewCycleState()
snapshot := cache.NewSnapshot(existingPods, allNodes)
client := fake.NewSimpleClientset(
&v1.Service{Spec: v1.ServiceSpec{Selector: map[string]string{"foo": ""}}},
)
_, ctx := ktesting.NewTestContext(b)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
informerFactory := informers.NewSharedInformerFactory(client, 0)
f, err := frameworkruntime.NewFramework(ctx, nil, nil,
frameworkruntime.WithSnapshotSharedLister(snapshot),
frameworkruntime.WithInformerFactory(informerFactory))
if err != nil {
b.Fatalf("Failed creating framework runtime: %v", err)
}
pl, err := New(&config.PodTopologySpreadArgs{DefaultingType: config.SystemDefaulting}, f, feature.Features{})
if err != nil {
b.Fatalf("Failed creating plugin: %v", err)
}
p := pl.(*PodTopologySpread)
informerFactory.Start(ctx.Done())
informerFactory.WaitForCacheSync(ctx.Done())
b.ResetTimer()
for i := 0; i < b.N; i++ {
status := p.PreScore(ctx, state, pod, filteredNodes)
if !status.IsSuccess() {
b.Fatalf("unexpected error: %v", status)
}
gotList := make(framework.NodeScoreList, len(filteredNodes))
scoreNode := func(i int) {
n := filteredNodes[i]
score, _ := p.Score(ctx, state, pod, n.Name)
gotList[i] = framework.NodeScore{Name: n.Name, Score: score}
}
p.parallelizer.Until(ctx, len(filteredNodes), scoreNode, "")
status = p.NormalizeScore(ctx, state, pod, gotList)
if !status.IsSuccess() {
b.Fatal(status)
}
}
})
}
}

View File

@ -34,7 +34,6 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/schedulinggates"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/selectorspread"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions"
@ -61,7 +60,6 @@ func NewInTreeRegistry() runtime.Registry {
registry := runtime.Registry{
dynamicresources.Name: runtime.FactoryAdapter(fts, dynamicresources.New),
selectorspread.Name: selectorspread.New,
imagelocality.Name: imagelocality.New,
tainttoleration.Name: tainttoleration.New,
nodename.Name: nodename.New,

View File

@ -1,234 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package selectorspread
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
appslisters "k8s.io/client-go/listers/apps/v1"
corelisters "k8s.io/client-go/listers/core/v1"
utilnode "k8s.io/component-helpers/node/topology"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
)
// SelectorSpread is a plugin that calculates selector spread priority.
type SelectorSpread struct {
sharedLister framework.SharedLister
services corelisters.ServiceLister
replicationControllers corelisters.ReplicationControllerLister
replicaSets appslisters.ReplicaSetLister
statefulSets appslisters.StatefulSetLister
}
var _ framework.PreScorePlugin = &SelectorSpread{}
var _ framework.ScorePlugin = &SelectorSpread{}
const (
// Name is the name of the plugin used in the plugin registry and configurations.
Name = names.SelectorSpread
// preScoreStateKey is the key in CycleState to SelectorSpread pre-computed data for Scoring.
preScoreStateKey = "PreScore" + Name
// When zone information is present, give 2/3 of the weighting to zone spreading, 1/3 to node spreading
// TODO: Any way to justify this weighting?
zoneWeighting float64 = 2.0 / 3.0
)
// Name returns name of the plugin. It is used in logs, etc.
func (pl *SelectorSpread) Name() string {
return Name
}
// preScoreState computed at PreScore and used at Score.
type preScoreState struct {
selector labels.Selector
}
// Clone implements the mandatory Clone interface. We don't really copy the data since
// there is no need for that.
func (s *preScoreState) Clone() framework.StateData {
return s
}
// skipSelectorSpread returns true if the pod's TopologySpreadConstraints are specified.
// Note that this doesn't take into account default constraints defined for
// the PodTopologySpread plugin.
func skipSelectorSpread(pod *v1.Pod) bool {
return len(pod.Spec.TopologySpreadConstraints) != 0
}
// Score invoked at the Score extension point.
// The "score" returned in this function is the matching number of pods on the `nodeName`,
// it is normalized later.
func (pl *SelectorSpread) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) {
if skipSelectorSpread(pod) {
return 0, nil
}
c, err := state.Read(preScoreStateKey)
if err != nil {
return 0, framework.AsStatus(fmt.Errorf("reading %q from cycleState: %w", preScoreStateKey, err))
}
s, ok := c.(*preScoreState)
if !ok {
return 0, framework.AsStatus(fmt.Errorf("cannot convert saved state to selectorspread.preScoreState"))
}
nodeInfo, err := pl.sharedLister.NodeInfos().Get(nodeName)
if err != nil {
return 0, framework.AsStatus(fmt.Errorf("getting node %q from Snapshot: %w", nodeName, err))
}
count := countMatchingPods(pod.Namespace, s.selector, nodeInfo)
return int64(count), nil
}
// NormalizeScore invoked after scoring all nodes.
// For this plugin, it calculates the score of each node
// based on the number of existing matching pods on the node
// where zone information is included on the nodes, it favors nodes
// in zones with fewer existing matching pods.
func (pl *SelectorSpread) NormalizeScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status {
if skipSelectorSpread(pod) {
return nil
}
countsByZone := make(map[string]int64, 10)
maxCountByZone := int64(0)
maxCountByNodeName := int64(0)
for i := range scores {
if scores[i].Score > maxCountByNodeName {
maxCountByNodeName = scores[i].Score
}
nodeInfo, err := pl.sharedLister.NodeInfos().Get(scores[i].Name)
if err != nil {
return framework.AsStatus(fmt.Errorf("getting node %q from Snapshot: %w", scores[i].Name, err))
}
zoneID := utilnode.GetZoneKey(nodeInfo.Node())
if zoneID == "" {
continue
}
countsByZone[zoneID] += scores[i].Score
}
for zoneID := range countsByZone {
if countsByZone[zoneID] > maxCountByZone {
maxCountByZone = countsByZone[zoneID]
}
}
haveZones := len(countsByZone) != 0
maxCountByNodeNameFloat64 := float64(maxCountByNodeName)
maxCountByZoneFloat64 := float64(maxCountByZone)
MaxNodeScoreFloat64 := float64(framework.MaxNodeScore)
for i := range scores {
// initializing to the default/max node score of maxPriority
fScore := MaxNodeScoreFloat64
if maxCountByNodeName > 0 {
fScore = MaxNodeScoreFloat64 * (float64(maxCountByNodeName-scores[i].Score) / maxCountByNodeNameFloat64)
}
// If there is zone information present, incorporate it
if haveZones {
nodeInfo, err := pl.sharedLister.NodeInfos().Get(scores[i].Name)
if err != nil {
return framework.AsStatus(fmt.Errorf("getting node %q from Snapshot: %w", scores[i].Name, err))
}
zoneID := utilnode.GetZoneKey(nodeInfo.Node())
if zoneID != "" {
zoneScore := MaxNodeScoreFloat64
if maxCountByZone > 0 {
zoneScore = MaxNodeScoreFloat64 * (float64(maxCountByZone-countsByZone[zoneID]) / maxCountByZoneFloat64)
}
fScore = (fScore * (1.0 - zoneWeighting)) + (zoneWeighting * zoneScore)
}
}
scores[i].Score = int64(fScore)
}
return nil
}
// ScoreExtensions of the Score plugin.
func (pl *SelectorSpread) ScoreExtensions() framework.ScoreExtensions {
return pl
}
// PreScore builds and writes cycle state used by Score and NormalizeScore.
func (pl *SelectorSpread) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*v1.Node) *framework.Status {
if skipSelectorSpread(pod) {
return nil
}
selector := helper.DefaultSelector(
pod,
pl.services,
pl.replicationControllers,
pl.replicaSets,
pl.statefulSets,
)
state := &preScoreState{
selector: selector,
}
cycleState.Write(preScoreStateKey, state)
return nil
}
// New initializes a new plugin and returns it.
func New(_ runtime.Object, handle framework.Handle) (framework.Plugin, error) {
sharedLister := handle.SnapshotSharedLister()
if sharedLister == nil {
return nil, fmt.Errorf("SnapshotSharedLister is nil")
}
sharedInformerFactory := handle.SharedInformerFactory()
if sharedInformerFactory == nil {
return nil, fmt.Errorf("SharedInformerFactory is nil")
}
return &SelectorSpread{
sharedLister: sharedLister,
services: sharedInformerFactory.Core().V1().Services().Lister(),
replicationControllers: sharedInformerFactory.Core().V1().ReplicationControllers().Lister(),
replicaSets: sharedInformerFactory.Apps().V1().ReplicaSets().Lister(),
statefulSets: sharedInformerFactory.Apps().V1().StatefulSets().Lister(),
}, nil
}
// countMatchingPods counts pods based on namespace and matching all selectors
func countMatchingPods(namespace string, selector labels.Selector, nodeInfo *framework.NodeInfo) int {
if len(nodeInfo.Pods) == 0 || selector.Empty() {
return 0
}
count := 0
for _, p := range nodeInfo.Pods {
// Ignore pods being deleted for spreading purposes
// Similar to how it is done for SelectorSpreadPriority
if namespace == p.Pod.Namespace && p.Pod.DeletionTimestamp == nil {
if selector.Matches(labels.Set(p.Pod.Labels)) {
count++
}
}
}
return count
}

View File

@ -1,103 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package selectorspread
import (
"context"
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/parallelize"
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
st "k8s.io/kubernetes/pkg/scheduler/testing"
)
var (
tests = []struct {
name string
existingPodsNum int
allNodesNum int
}{
{
name: "100nodes",
existingPodsNum: 1000,
allNodesNum: 100,
},
{
name: "1000nodes",
existingPodsNum: 10000,
allNodesNum: 1000,
},
}
)
func BenchmarkTestSelectorSpreadPriority(b *testing.B) {
for _, tt := range tests {
b.Run(tt.name, func(b *testing.B) {
pod := st.MakePod().Name("p").Label("foo", "").Obj()
existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.allNodesNum)
snapshot := cache.NewSnapshot(existingPods, allNodes)
client := fake.NewSimpleClientset(
&v1.Service{Spec: v1.ServiceSpec{Selector: map[string]string{"foo": ""}}},
)
_, ctx := ktesting.NewTestContext(b)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
informerFactory := informers.NewSharedInformerFactory(client, 0)
_ = informerFactory.Core().V1().Services().Lister()
informerFactory.Start(ctx.Done())
caches := informerFactory.WaitForCacheSync(ctx.Done())
for _, synced := range caches {
if !synced {
b.Errorf("error waiting for informer cache sync")
}
}
fh, _ := runtime.NewFramework(ctx, nil, nil, runtime.WithSnapshotSharedLister(snapshot), runtime.WithInformerFactory(informerFactory))
pl, err := New(nil, fh)
if err != nil {
b.Fatal(err)
}
plugin := pl.(*SelectorSpread)
b.ResetTimer()
for i := 0; i < b.N; i++ {
state := framework.NewCycleState()
status := plugin.PreScore(ctx, state, pod, allNodes)
if !status.IsSuccess() {
b.Fatalf("unexpected error: %v", status)
}
gotList := make(framework.NodeScoreList, len(filteredNodes))
scoreNode := func(i int) {
n := filteredNodes[i]
score, _ := plugin.Score(ctx, state, pod, n.Name)
gotList[i] = framework.NodeScore{Name: n.Name, Score: score}
}
parallelizer := parallelize.NewParallelizer(parallelize.DefaultParallelism)
parallelizer.Until(ctx, len(filteredNodes), scoreNode, "")
status = plugin.NormalizeScore(ctx, state, pod, gotList)
if !status.IsSuccess() {
b.Fatal(status)
}
}
})
}
}

View File

@ -1,762 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package selectorspread
import (
"context"
"fmt"
"reflect"
"sort"
"testing"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/informers"
clientsetfake "k8s.io/client-go/kubernetes/fake"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/scheduler/framework"
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
st "k8s.io/kubernetes/pkg/scheduler/testing"
"k8s.io/utils/pointer"
)
var (
rcKind = v1.SchemeGroupVersion.WithKind("ReplicationController")
rsKind = apps.SchemeGroupVersion.WithKind("ReplicaSet")
ssKind = apps.SchemeGroupVersion.WithKind("StatefulSet")
)
func controllerRef(name string, gvk schema.GroupVersionKind) []metav1.OwnerReference {
return []metav1.OwnerReference{
{
APIVersion: gvk.GroupVersion().String(),
Kind: gvk.Kind,
Name: name,
Controller: pointer.Bool(true),
},
}
}
func TestSelectorSpreadScore(t *testing.T) {
labels1 := map[string]string{
"foo": "bar",
"baz": "blah",
}
labels2 := map[string]string{
"bar": "foo",
"baz": "blah",
}
zone1Spec := v1.PodSpec{
NodeName: "node1",
}
zone2Spec := v1.PodSpec{
NodeName: "node2",
}
tests := []struct {
pod *v1.Pod
pods []*v1.Pod
nodes []string
rcs []*v1.ReplicationController
rss []*apps.ReplicaSet
services []*v1.Service
sss []*apps.StatefulSet
expectedList framework.NodeScoreList
name string
}{
{
pod: new(v1.Pod),
nodes: []string{"node1", "node2"},
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}},
name: "nothing scheduled",
},
{
pod: st.MakePod().Labels(labels1).Obj(),
pods: []*v1.Pod{st.MakePod().Node("node1").Obj()},
nodes: []string{"node1", "node2"},
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}},
name: "no services",
},
{
pod: st.MakePod().Labels(labels1).Obj(),
pods: []*v1.Pod{st.MakePod().Labels(labels2).Node("node1").Obj()},
nodes: []string{"node1", "node2"},
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "s1"}, Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}},
name: "different services",
},
{
pod: st.MakePod().Labels(labels1).Obj(),
pods: []*v1.Pod{
st.MakePod().Labels(labels2).Node("node1").Obj(),
st.MakePod().Labels(labels1).Node("node2").Obj(),
},
nodes: []string{"node1", "node2"},
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "s1"}, Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}},
name: "two pods, one service pod",
},
{
pod: st.MakePod().Labels(labels1).Obj(),
pods: []*v1.Pod{
st.MakePod().Labels(labels2).Node("node1").Obj(),
st.MakePod().Labels(labels1).Node("node1").Namespace(metav1.NamespaceDefault).Obj(),
st.MakePod().Labels(labels1).Node("node1").Namespace("ns1").Obj(),
st.MakePod().Labels(labels1).Node("node2").Obj(),
st.MakePod().Labels(labels2).Node("node2").Obj(),
},
nodes: []string{"node1", "node2"},
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "s1"}, Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}},
name: "five pods, one service pod in no namespace",
},
{
pod: st.MakePod().Labels(labels1).Namespace(metav1.NamespaceDefault).Obj(),
pods: []*v1.Pod{
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}},
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
},
nodes: []string{"node1", "node2"},
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "s1"}, Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}},
name: "four pods, one service pod in default namespace",
},
{
pod: st.MakePod().Labels(labels1).Namespace("ns1").Obj(),
pods: []*v1.Pod{
st.MakePod().Labels(labels2).Node("node1").Obj(),
st.MakePod().Labels(labels1).Node("node1").Namespace(metav1.NamespaceDefault).Obj(),
st.MakePod().Labels(labels1).Node("node1").Namespace("ns2").Obj(),
st.MakePod().Labels(labels1).Node("node2").Namespace("ns1").Obj(),
st.MakePod().Labels(labels2).Node("node2").Obj(),
},
nodes: []string{"node1", "node2"},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: "ns1"}}},
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: 0}},
name: "five pods, one service pod in specific namespace",
},
{
pod: st.MakePod().Labels(labels1).Obj(),
pods: []*v1.Pod{
st.MakePod().Labels(labels2).Node("node1").Obj(),
st.MakePod().Labels(labels1).Node("node1").Obj(),
st.MakePod().Labels(labels1).Node("node2").Obj(),
},
nodes: []string{"node1", "node2"},
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "s1"}, Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
name: "three pods, two service pods on different nodes",
},
{
pod: st.MakePod().Labels(labels1).Obj(),
pods: []*v1.Pod{
st.MakePod().Labels(labels2).Node("node1").Obj(),
st.MakePod().Labels(labels1).Node("node1").Obj(),
st.MakePod().Labels(labels1).Node("node2").Obj(),
st.MakePod().Labels(labels1).Node("node2").Obj(),
},
nodes: []string{"node1", "node2"},
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "s1"}, Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []framework.NodeScore{{Name: "node1", Score: 50}, {Name: "node2", Score: 0}},
name: "four pods, three service pods",
},
{
pod: st.MakePod().Labels(labels1).Obj(),
pods: []*v1.Pod{
st.MakePod().Labels(labels2).Node("node1").Obj(),
st.MakePod().Labels(labels1).Node("node1").Obj(),
st.MakePod().Labels(labels1).Node("node2").Obj(),
},
nodes: []string{"node1", "node2"},
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "s1"}, Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 50}},
name: "service with partial pod label matches",
},
{
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(labels1).OwnerReference("rc1", rcKind).Obj(),
pods: []*v1.Pod{
st.MakePod().Node("node1").Namespace(metav1.NamespaceDefault).Labels(labels2).Obj(),
st.MakePod().Node("node1").Namespace(metav1.NamespaceDefault).Labels(labels1).Obj(),
st.MakePod().Node("node2").Namespace(metav1.NamespaceDefault).Labels(labels1).Obj(),
},
nodes: []string{"node1", "node2"},
rcs: []*v1.ReplicationController{
{ObjectMeta: metav1.ObjectMeta{Name: "rc1", Namespace: metav1.NamespaceDefault}, Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "rc2", Namespace: metav1.NamespaceDefault}, Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"bar": "foo"}}},
},
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Name: "s1", Namespace: metav1.NamespaceDefault}, Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
// "baz=blah" matches both labels1 and labels2, and "foo=bar" matches only labels 1. This means that we assume that we want to
// do spreading pod2 and pod3 and not pod1.
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
name: "service with partial pod label matches with service and replication controller",
},
{
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(labels1).OwnerReference("rs1", rsKind).Obj(),
pods: []*v1.Pod{
st.MakePod().Node("node1").Namespace(metav1.NamespaceDefault).Labels(labels2).Obj(),
st.MakePod().Node("node1").Namespace(metav1.NamespaceDefault).Labels(labels1).Obj(),
st.MakePod().Node("node2").Namespace(metav1.NamespaceDefault).Labels(labels1).Obj(),
},
nodes: []string{"node1", "node2"},
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Name: "s1", Namespace: metav1.NamespaceDefault}, Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
rss: []*apps.ReplicaSet{
{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "rs1"}, Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}},
{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "rs2"}, Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"bar": "foo"}}}},
},
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
name: "service with partial pod label matches with service and replica set",
},
{
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(labels1).OwnerReference("ss1", ssKind).Obj(),
pods: []*v1.Pod{
st.MakePod().Node("node1").Namespace(metav1.NamespaceDefault).Labels(labels2).Obj(),
st.MakePod().Node("node1").Namespace(metav1.NamespaceDefault).Labels(labels1).Obj(),
st.MakePod().Node("node2").Namespace(metav1.NamespaceDefault).Labels(labels1).Obj(),
},
nodes: []string{"node1", "node2"},
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Name: "s1", Namespace: metav1.NamespaceDefault}, Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
sss: []*apps.StatefulSet{
{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "ss1"}, Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}},
{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "ss2"}, Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"bar": "foo"}}}},
},
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
name: "service with partial pod label matches with service and statefulset",
},
{
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(map[string]string{"foo": "bar", "bar": "foo"}).OwnerReference("rc3", rcKind).Obj(),
pods: []*v1.Pod{
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels2).OwnerReference("rc2", rcKind).Obj(),
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels1).OwnerReference("rc1", rcKind).Obj(),
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node2").Labels(labels1).OwnerReference("rc1", rcKind).Obj(),
},
nodes: []string{"node1", "node2"},
rcs: []*v1.ReplicationController{{
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "rc3"},
Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Name: "s1", Namespace: metav1.NamespaceDefault}, Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
// Taken together Service and Replication Controller should match no pods.
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}},
name: "disjoined service and replication controller matches no pods",
},
{
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(map[string]string{"foo": "bar", "bar": "foo"}).OwnerReference("rs3", rsKind).Obj(),
pods: []*v1.Pod{
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels2).OwnerReference("rs2", rsKind).Obj(),
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels1).OwnerReference("rs1", rsKind).Obj(),
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node2").Labels(labels1).OwnerReference("rs1", rsKind).Obj(),
},
nodes: []string{"node1", "node2"},
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Name: "s1", Namespace: metav1.NamespaceDefault}, Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
rss: []*apps.ReplicaSet{
{ObjectMeta: metav1.ObjectMeta{Name: "rs3", Namespace: metav1.NamespaceDefault}, Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}},
name: "disjoined service and replica set matches no pods",
},
{
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(map[string]string{"foo": "bar", "bar": "foo"}).OwnerReference("ss3", ssKind).Obj(),
pods: []*v1.Pod{
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels2).OwnerReference("ss2", ssKind).Obj(),
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels1).OwnerReference("ss1", ssKind).Obj(),
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node2").Labels(labels1).OwnerReference("ss1", ssKind).Obj(),
},
nodes: []string{"node1", "node2"},
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Name: "s1", Namespace: metav1.NamespaceDefault}, Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
sss: []*apps.StatefulSet{
{ObjectMeta: metav1.ObjectMeta{Name: "ss3", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}},
name: "disjoined service and stateful set matches no pods",
},
{
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(labels1).OwnerReference("rc1", rcKind).Obj(),
pods: []*v1.Pod{
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels2).OwnerReference("rc2", rcKind).Obj(),
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels1).OwnerReference("rc1", rcKind).Obj(),
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node2").Labels(labels1).OwnerReference("rc1", rcKind).Obj(),
},
nodes: []string{"node1", "node2"},
rcs: []*v1.ReplicationController{{ObjectMeta: metav1.ObjectMeta{Name: "rc1", Namespace: metav1.NamespaceDefault}, Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
// Both Nodes have one pod from the given RC, hence both get 0 score.
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
name: "Replication controller with partial pod label matches",
},
{
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(labels1).OwnerReference("rs1", rsKind).Obj(),
pods: []*v1.Pod{
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels2).OwnerReference("rs2", rsKind).Obj(),
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels1).OwnerReference("rs1", rsKind).Obj(),
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node2").Labels(labels1).OwnerReference("rs1", rsKind).Obj(),
},
nodes: []string{"node1", "node2"},
rss: []*apps.ReplicaSet{{ObjectMeta: metav1.ObjectMeta{Name: "rs1", Namespace: metav1.NamespaceDefault}, Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
name: "Replica set with partial pod label matches",
},
{
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(labels1).OwnerReference("ss1", ssKind).Obj(),
pods: []*v1.Pod{
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels2).OwnerReference("ss2", ssKind).Obj(),
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels1).OwnerReference("ss1", ssKind).Obj(),
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node2").Labels(labels1).OwnerReference("ss1", ssKind).Obj(),
},
nodes: []string{"node1", "node2"},
sss: []*apps.StatefulSet{{ObjectMeta: metav1.ObjectMeta{Name: "ss1", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
// We use StatefulSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
name: "StatefulSet with partial pod label matches",
},
{
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(labels1).OwnerReference("rc3", rcKind).Obj(),
pods: []*v1.Pod{
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels2).OwnerReference("rc2", rcKind).Obj(),
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels1).OwnerReference("rc1", rcKind).Obj(),
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node2").Labels(labels1).OwnerReference("rc1", rcKind).Obj(),
},
nodes: []string{"node1", "node2"},
rcs: []*v1.ReplicationController{{ObjectMeta: metav1.ObjectMeta{Name: "rc3", Namespace: metav1.NamespaceDefault}, Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}},
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 50}},
name: "Another replication controller with partial pod label matches",
},
{
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(labels1).OwnerReference("rs3", rsKind).Obj(),
pods: []*v1.Pod{
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels2).OwnerReference("rs2", rsKind).Obj(),
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels1).OwnerReference("rs1", rsKind).Obj(),
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node2").Labels(labels1).OwnerReference("rs1", rsKind).Obj(),
},
nodes: []string{"node1", "node2"},
rss: []*apps.ReplicaSet{{ObjectMeta: metav1.ObjectMeta{Name: "rs3", Namespace: metav1.NamespaceDefault}, Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 50}},
name: "Another replication set with partial pod label matches",
},
{
pod: st.MakePod().Namespace(metav1.NamespaceDefault).Labels(labels1).OwnerReference("ss3", ssKind).Obj(),
pods: []*v1.Pod{
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels2).OwnerReference("ss2", ssKind).Obj(),
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node1").Labels(labels1).OwnerReference("ss1", ssKind).Obj(),
st.MakePod().Namespace(metav1.NamespaceDefault).Node("node2").Labels(labels1).OwnerReference("ss1", ssKind).Obj(),
},
nodes: []string{"node1", "node2"},
sss: []*apps.StatefulSet{{ObjectMeta: metav1.ObjectMeta{Name: "ss3", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
// We use StatefulSet, instead of ReplicationController. The result should be exactly as above.
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 50}},
name: "Another stateful set with partial pod label matches",
},
{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
Labels: labels1,
OwnerReferences: controllerRef("ss1", ssKind),
},
Spec: v1.PodSpec{
TopologySpreadConstraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: "foo",
WhenUnsatisfiable: v1.DoNotSchedule,
},
},
},
},
pods: []*v1.Pod{
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels2, OwnerReferences: controllerRef("ss2", ssKind)}},
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("ss1", ssKind)}},
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels1, OwnerReferences: controllerRef("ss1", ssKind)}},
},
nodes: []string{"node1", "node2"},
sss: []*apps.StatefulSet{{ObjectMeta: metav1.ObjectMeta{Name: "ss1", Namespace: metav1.NamespaceDefault}, Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
expectedList: []framework.NodeScore{{Name: "node1", Score: 0}, {Name: "node2", Score: 0}},
name: "Another statefulset with TopologySpreadConstraints set in pod",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
nodes := makeNodeList(test.nodes)
snapshot := cache.NewSnapshot(test.pods, nodes)
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
informerFactory, err := populateAndStartInformers(ctx, test.rcs, test.rss, test.services, test.sss)
if err != nil {
t.Errorf("error creating informerFactory: %+v", err)
}
fh, err := frameworkruntime.NewFramework(ctx, nil, nil, frameworkruntime.WithSnapshotSharedLister(snapshot), frameworkruntime.WithInformerFactory(informerFactory))
if err != nil {
t.Errorf("error creating new framework handle: %+v", err)
}
state := framework.NewCycleState()
pl, err := New(nil, fh)
if err != nil {
t.Fatal(err)
}
plugin := pl.(*SelectorSpread)
status := plugin.PreScore(ctx, state, test.pod, nodes)
if !status.IsSuccess() {
t.Fatalf("unexpected error: %v", status)
}
var gotList framework.NodeScoreList
for _, nodeName := range test.nodes {
score, status := plugin.Score(ctx, state, test.pod, nodeName)
if !status.IsSuccess() {
t.Errorf("unexpected error: %v", status)
}
gotList = append(gotList, framework.NodeScore{Name: nodeName, Score: score})
}
status = plugin.ScoreExtensions().NormalizeScore(ctx, state, test.pod, gotList)
if !status.IsSuccess() {
t.Errorf("unexpected error: %v", status)
}
if !reflect.DeepEqual(test.expectedList, gotList) {
t.Errorf("expected:\n\t%+v,\ngot:\n\t%+v", test.expectedList, gotList)
}
})
}
}
func buildPod(nodeName string, labels map[string]string, ownerRefs []metav1.OwnerReference) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Labels: labels, OwnerReferences: ownerRefs},
Spec: v1.PodSpec{NodeName: nodeName},
}
}
func TestZoneSelectorSpreadPriority(t *testing.T) {
labels1 := map[string]string{
"label1": "l1",
"baz": "blah",
}
labels2 := map[string]string{
"label2": "l2",
"baz": "blah",
}
const nodeMachine1Zone1 = "node1.zone1"
const nodeMachine1Zone2 = "node1.zone2"
const nodeMachine2Zone2 = "node2.zone2"
const nodeMachine1Zone3 = "node1.zone3"
const nodeMachine2Zone3 = "node2.zone3"
const nodeMachine3Zone3 = "node3.zone3"
buildNodeLabels := func(failureDomain string) map[string]string {
labels := map[string]string{
v1.LabelTopologyZone: failureDomain,
}
return labels
}
labeledNodes := map[string]map[string]string{
nodeMachine1Zone1: buildNodeLabels("zone1"),
nodeMachine1Zone2: buildNodeLabels("zone2"),
nodeMachine2Zone2: buildNodeLabels("zone2"),
nodeMachine1Zone3: buildNodeLabels("zone3"),
nodeMachine2Zone3: buildNodeLabels("zone3"),
nodeMachine3Zone3: buildNodeLabels("zone3"),
}
tests := []struct {
pod *v1.Pod
pods []*v1.Pod
rcs []*v1.ReplicationController
rss []*apps.ReplicaSet
services []*v1.Service
sss []*apps.StatefulSet
expectedList framework.NodeScoreList
name string
}{
{
pod: new(v1.Pod),
expectedList: []framework.NodeScore{
{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore},
{Name: nodeMachine1Zone2, Score: framework.MaxNodeScore},
{Name: nodeMachine2Zone2, Score: framework.MaxNodeScore},
{Name: nodeMachine1Zone3, Score: framework.MaxNodeScore},
{Name: nodeMachine2Zone3, Score: framework.MaxNodeScore},
{Name: nodeMachine3Zone3, Score: framework.MaxNodeScore},
},
name: "nothing scheduled",
},
{
pod: buildPod("", labels1, nil),
pods: []*v1.Pod{buildPod(nodeMachine1Zone1, nil, nil)},
expectedList: []framework.NodeScore{
{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore},
{Name: nodeMachine1Zone2, Score: framework.MaxNodeScore},
{Name: nodeMachine2Zone2, Score: framework.MaxNodeScore},
{Name: nodeMachine1Zone3, Score: framework.MaxNodeScore},
{Name: nodeMachine2Zone3, Score: framework.MaxNodeScore},
{Name: nodeMachine3Zone3, Score: framework.MaxNodeScore},
},
name: "no services",
},
{
pod: buildPod("", labels1, nil),
pods: []*v1.Pod{buildPod(nodeMachine1Zone1, labels2, nil)},
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "s1"}, Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
expectedList: []framework.NodeScore{
{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore},
{Name: nodeMachine1Zone2, Score: framework.MaxNodeScore},
{Name: nodeMachine2Zone2, Score: framework.MaxNodeScore},
{Name: nodeMachine1Zone3, Score: framework.MaxNodeScore},
{Name: nodeMachine2Zone3, Score: framework.MaxNodeScore},
{Name: nodeMachine3Zone3, Score: framework.MaxNodeScore},
},
name: "different services",
},
{
pod: buildPod("", labels1, nil),
pods: []*v1.Pod{
buildPod(nodeMachine1Zone1, labels2, nil),
buildPod(nodeMachine1Zone2, labels2, nil),
},
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "s1"}, Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []framework.NodeScore{
{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore},
{Name: nodeMachine1Zone2, Score: framework.MaxNodeScore},
{Name: nodeMachine2Zone2, Score: framework.MaxNodeScore},
{Name: nodeMachine1Zone3, Score: framework.MaxNodeScore},
{Name: nodeMachine2Zone3, Score: framework.MaxNodeScore},
{Name: nodeMachine3Zone3, Score: framework.MaxNodeScore},
},
name: "two pods, 0 matching",
},
{
pod: buildPod("", labels1, nil),
pods: []*v1.Pod{
buildPod(nodeMachine1Zone1, labels2, nil),
buildPod(nodeMachine1Zone2, labels1, nil),
},
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "s1"}, Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []framework.NodeScore{
{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore},
{Name: nodeMachine1Zone2, Score: 0}, // Already have pod on node
{Name: nodeMachine2Zone2, Score: 33}, // Already have pod in zone
{Name: nodeMachine1Zone3, Score: framework.MaxNodeScore},
{Name: nodeMachine2Zone3, Score: framework.MaxNodeScore},
{Name: nodeMachine3Zone3, Score: framework.MaxNodeScore},
},
name: "two pods, 1 matching (in z2)",
},
{
pod: buildPod("", labels1, nil),
pods: []*v1.Pod{
buildPod(nodeMachine1Zone1, labels2, nil),
buildPod(nodeMachine1Zone2, labels1, nil),
buildPod(nodeMachine2Zone2, labels1, nil),
buildPod(nodeMachine1Zone3, labels2, nil),
buildPod(nodeMachine2Zone3, labels1, nil),
},
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "s1"}, Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []framework.NodeScore{
{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore},
{Name: nodeMachine1Zone2, Score: 0}, // Pod on node
{Name: nodeMachine2Zone2, Score: 0}, // Pod on node
{Name: nodeMachine1Zone3, Score: 66}, // Pod in zone
{Name: nodeMachine2Zone3, Score: 33}, // Pod on node
{Name: nodeMachine3Zone3, Score: 66}, // Pod in zone
},
name: "five pods, 3 matching (z2=2, z3=1)",
},
{
pod: buildPod("", labels1, nil),
pods: []*v1.Pod{
buildPod(nodeMachine1Zone1, labels1, nil),
buildPod(nodeMachine1Zone2, labels1, nil),
buildPod(nodeMachine2Zone2, labels2, nil),
buildPod(nodeMachine1Zone3, labels1, nil),
},
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "s1"}, Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []framework.NodeScore{
{Name: nodeMachine1Zone1, Score: 0}, // Pod on node
{Name: nodeMachine1Zone2, Score: 0}, // Pod on node
{Name: nodeMachine2Zone2, Score: 33}, // Pod in zone
{Name: nodeMachine1Zone3, Score: 0}, // Pod on node
{Name: nodeMachine2Zone3, Score: 33}, // Pod in zone
{Name: nodeMachine3Zone3, Score: 33}, // Pod in zone
},
name: "four pods, 3 matching (z1=1, z2=1, z3=1)",
},
{
pod: buildPod("", labels1, nil),
pods: []*v1.Pod{
buildPod(nodeMachine1Zone1, labels1, nil),
buildPod(nodeMachine1Zone2, labels1, nil),
buildPod(nodeMachine2Zone2, labels1, nil),
buildPod(nodeMachine2Zone2, labels2, nil),
buildPod(nodeMachine1Zone3, labels1, nil),
},
services: []*v1.Service{{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "s1"}, Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []framework.NodeScore{
{Name: nodeMachine1Zone1, Score: 33}, // Pod on node
{Name: nodeMachine1Zone2, Score: 0}, // Pod on node
{Name: nodeMachine2Zone2, Score: 0}, // Pod in zone
{Name: nodeMachine1Zone3, Score: 33}, // Pod on node
{Name: nodeMachine2Zone3, Score: 66}, // Pod in zone
{Name: nodeMachine3Zone3, Score: 66}, // Pod in zone
},
name: "five pods, 4 matching (z1=1, z2=2, z3=1)",
},
{
pod: buildPod("", labels1, controllerRef("rc1", rcKind)),
pods: []*v1.Pod{
buildPod(nodeMachine1Zone3, labels1, nil),
buildPod(nodeMachine1Zone2, labels1, nil),
buildPod(nodeMachine1Zone3, labels1, nil),
},
rcs: []*v1.ReplicationController{
{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "rc1"}, Spec: v1.ReplicationControllerSpec{Selector: labels1}}},
expectedList: []framework.NodeScore{
// Note that because we put two pods on the same node (nodeMachine1Zone3),
// the values here are questionable for zone2, in particular for nodeMachine1Zone2.
// However they kind of make sense; zone1 is still most-highly favored.
// zone3 is in general least favored, and m1.z3 particularly low priority.
// We would probably prefer to see a bigger gap between putting a second
// pod on m1.z2 and putting a pod on m2.z2, but the ordering is correct.
// This is also consistent with what we have already.
{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore}, // No pods in zone
{Name: nodeMachine1Zone2, Score: 50}, // Pod on node
{Name: nodeMachine2Zone2, Score: 66}, // Pod in zone
{Name: nodeMachine1Zone3, Score: 0}, // Two pods on node
{Name: nodeMachine2Zone3, Score: 33}, // Pod in zone
{Name: nodeMachine3Zone3, Score: 33}, // Pod in zone
},
name: "Replication controller spreading (z1=0, z2=1, z3=2)",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
nodes := makeLabeledNodeList(labeledNodes)
snapshot := cache.NewSnapshot(test.pods, nodes)
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
informerFactory, err := populateAndStartInformers(ctx, test.rcs, test.rss, test.services, test.sss)
if err != nil {
t.Errorf("error creating informerFactory: %+v", err)
}
fh, err := frameworkruntime.NewFramework(ctx, nil, nil, frameworkruntime.WithSnapshotSharedLister(snapshot), frameworkruntime.WithInformerFactory(informerFactory))
if err != nil {
t.Errorf("error creating new framework handle: %+v", err)
}
pl, err := New(nil, fh)
if err != nil {
t.Fatal(err)
}
plugin := pl.(*SelectorSpread)
state := framework.NewCycleState()
status := plugin.PreScore(ctx, state, test.pod, nodes)
if !status.IsSuccess() {
t.Fatalf("unexpected error: %v", status)
}
var gotList framework.NodeScoreList
for _, n := range nodes {
nodeName := n.ObjectMeta.Name
score, status := plugin.Score(ctx, state, test.pod, nodeName)
if !status.IsSuccess() {
t.Errorf("unexpected error: %v", status)
}
gotList = append(gotList, framework.NodeScore{Name: nodeName, Score: score})
}
status = plugin.ScoreExtensions().NormalizeScore(ctx, state, test.pod, gotList)
if !status.IsSuccess() {
t.Errorf("unexpected error: %v", status)
}
sortNodeScoreList(test.expectedList)
sortNodeScoreList(gotList)
if !reflect.DeepEqual(test.expectedList, gotList) {
t.Errorf("expected:\n\t%+v,\ngot:\n\t%+v", test.expectedList, gotList)
}
})
}
}
func populateAndStartInformers(ctx context.Context, rcs []*v1.ReplicationController, rss []*apps.ReplicaSet, services []*v1.Service, sss []*apps.StatefulSet) (informers.SharedInformerFactory, error) {
objects := make([]runtime.Object, 0, len(rcs)+len(rss)+len(services)+len(sss))
for _, rc := range rcs {
objects = append(objects, rc.DeepCopyObject())
}
for _, rs := range rss {
objects = append(objects, rs.DeepCopyObject())
}
for _, service := range services {
objects = append(objects, service.DeepCopyObject())
}
for _, ss := range sss {
objects = append(objects, ss.DeepCopyObject())
}
client := clientsetfake.NewSimpleClientset(objects...)
informerFactory := informers.NewSharedInformerFactory(client, 0)
// Because we use an informer factory, we need to make requests for the specific informers we want before calling Start()
_ = informerFactory.Core().V1().Services().Lister()
_ = informerFactory.Core().V1().ReplicationControllers().Lister()
_ = informerFactory.Apps().V1().ReplicaSets().Lister()
_ = informerFactory.Apps().V1().StatefulSets().Lister()
informerFactory.Start(ctx.Done())
caches := informerFactory.WaitForCacheSync(ctx.Done())
for _, synced := range caches {
if !synced {
return nil, fmt.Errorf("error waiting for informer cache sync")
}
}
return informerFactory, nil
}
func makeLabeledNodeList(nodeMap map[string]map[string]string) []*v1.Node {
nodes := make([]*v1.Node, 0, len(nodeMap))
for nodeName, labels := range nodeMap {
nodes = append(nodes, &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName, Labels: labels}})
}
return nodes
}
func makeNodeList(nodeNames []string) []*v1.Node {
nodes := make([]*v1.Node, 0, len(nodeNames))
for _, nodeName := range nodeNames {
nodes = append(nodes, &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}})
}
return nodes
}
func sortNodeScoreList(out framework.NodeScoreList) {
sort.Slice(out, func(i, j int) bool {
if out[i].Score == out[j].Score {
return out[i].Name < out[j].Name
}
return out[i].Score < out[j].Score
})
}

View File

@ -57,7 +57,6 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/selectorspread"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding"
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
@ -2598,7 +2597,7 @@ func TestZeroRequest(t *testing.T) {
{Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2},
},
expectedScore: 250,
expectedScore: 150,
},
{
pod: &v1.Pod{Spec: small},
@ -2608,7 +2607,7 @@ func TestZeroRequest(t *testing.T) {
{Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2},
},
expectedScore: 250,
expectedScore: 150,
},
// The point of this test is to verify that we're not just getting the same score no matter what we schedule.
{
@ -2619,7 +2618,7 @@ func TestZeroRequest(t *testing.T) {
{Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2},
},
expectedScore: 230,
expectedScore: 130,
},
}
@ -2634,8 +2633,6 @@ func TestZeroRequest(t *testing.T) {
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterScorePlugin(noderesources.Name, frameworkruntime.FactoryAdapter(fts, noderesources.NewFit), 1),
st.RegisterScorePlugin(noderesources.BalancedAllocationName, frameworkruntime.FactoryAdapter(fts, noderesources.NewBalancedAllocation), 1),
st.RegisterScorePlugin(selectorspread.Name, selectorspread.New, 1),
st.RegisterPreScorePlugin(selectorspread.Name, selectorspread.New),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
}
ctx, cancel := context.WithCancel(context.Background())