From 9e426a6be1ced72eaf2073d3473c7fe694e1bba6 Mon Sep 17 00:00:00 2001 From: notpad Date: Fri, 11 Oct 2019 18:35:30 +0800 Subject: [PATCH] [migration phase 1] NoDiskConflict as filter plugin --- .../api/compatibility/compatibility_test.go | 37 +-- pkg/scheduler/framework/plugins/BUILD | 2 + .../framework/plugins/default_registry.go | 7 + .../plugins/volumerestrictions/BUILD | 42 ++++ .../volumerestrictions/volume_restrictions.go | 51 ++++ .../volume_restrictions_test.go | 231 ++++++++++++++++++ test/integration/scheduler/scheduler_test.go | 18 +- 7 files changed, 366 insertions(+), 22 deletions(-) create mode 100644 pkg/scheduler/framework/plugins/volumerestrictions/BUILD create mode 100644 pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go create mode 100644 pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions_test.go diff --git a/pkg/scheduler/api/compatibility/compatibility_test.go b/pkg/scheduler/api/compatibility/compatibility_test.go index 794149bbeb0..778ae64b33f 100644 --- a/pkg/scheduler/api/compatibility/compatibility_test.go +++ b/pkg/scheduler/api/compatibility/compatibility_test.go @@ -69,7 +69,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { "MatchNodeSelector", "PodFitsResources", "PodFitsPorts", - "NoDiskConflict", "TestServiceAffinity", "TestLabelsPresence", ), @@ -79,6 +78,11 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { "TestServiceAntiAffinity", "TestLabelPreference", ), + wantPlugins: map[string][]kubeschedulerconfig.Plugin{ + "FilterPlugin": { + {Name: "VolumeRestrictions"}, + }, + }, }, // Do not change this JSON after the corresponding release has been tagged. @@ -108,7 +112,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { "MatchNodeSelector", "PodFitsHostPorts", "PodFitsResources", - "NoDiskConflict", "TestServiceAffinity", "TestLabelsPresence", ), @@ -123,6 +126,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { wantPlugins: map[string][]kubeschedulerconfig.Plugin{ "FilterPlugin": { {Name: "NodeName"}, + {Name: "VolumeRestrictions"}, }, }, }, @@ -160,7 +164,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { "MatchNodeSelector", "PodFitsResources", "PodFitsHostPorts", - "NoDiskConflict", "NoVolumeZoneConflict", "MaxEBSVolumeCount", "MaxGCEPDVolumeCount", @@ -181,6 +184,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { wantPlugins: map[string][]kubeschedulerconfig.Plugin{ "FilterPlugin": { {Name: "NodeName"}, + {Name: "VolumeRestrictions"}, }, }, }, @@ -222,7 +226,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { "MatchNodeSelector", "PodFitsResources", "PodFitsHostPorts", - "NoDiskConflict", "NoVolumeZoneConflict", "CheckNodeMemoryPressure", "MaxEBSVolumeCount", @@ -245,6 +248,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { wantPlugins: map[string][]kubeschedulerconfig.Plugin{ "FilterPlugin": { {Name: "NodeName"}, + {Name: "VolumeRestrictions"}, {Name: "TaintToleration"}, }, "ScorePlugin": {{Name: "TaintToleration", Weight: 2}}, @@ -291,7 +295,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { "MatchNodeSelector", "PodFitsResources", "PodFitsHostPorts", - "NoDiskConflict", "NoVolumeZoneConflict", "CheckNodeMemoryPressure", "CheckNodeDiskPressure", @@ -317,6 +320,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { wantPlugins: map[string][]kubeschedulerconfig.Plugin{ "FilterPlugin": { {Name: "NodeName"}, + {Name: "VolumeRestrictions"}, {Name: "TaintToleration"}, }, "ScorePlugin": {{Name: "TaintToleration", Weight: 2}}, @@ -372,7 +376,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { "MatchNodeSelector", "PodFitsResources", "PodFitsHostPorts", - "NoDiskConflict", "NoVolumeZoneConflict", "CheckNodeMemoryPressure", "CheckNodeDiskPressure", @@ -398,6 +401,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { wantPlugins: map[string][]kubeschedulerconfig.Plugin{ "FilterPlugin": { {Name: "NodeName"}, + {Name: "VolumeRestrictions"}, {Name: "TaintToleration"}, }, "ScorePlugin": {{Name: "TaintToleration", Weight: 2}}, @@ -465,7 +469,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { "MatchNodeSelector", "PodFitsResources", "PodFitsHostPorts", - "NoDiskConflict", "NoVolumeZoneConflict", "CheckNodeMemoryPressure", "CheckNodeDiskPressure", @@ -492,6 +495,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { wantPlugins: map[string][]kubeschedulerconfig.Plugin{ "FilterPlugin": { {Name: "NodeName"}, + {Name: "VolumeRestrictions"}, {Name: "TaintToleration"}, }, "ScorePlugin": {{Name: "TaintToleration", Weight: 2}}, @@ -560,7 +564,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { "MatchNodeSelector", "PodFitsResources", "PodFitsHostPorts", - "NoDiskConflict", "NoVolumeZoneConflict", "CheckNodeMemoryPressure", "CheckNodeDiskPressure", @@ -587,6 +590,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { wantPlugins: map[string][]kubeschedulerconfig.Plugin{ "FilterPlugin": { {Name: "NodeName"}, + {Name: "VolumeRestrictions"}, {Name: "TaintToleration"}, {Name: "VolumeBinding"}, }, @@ -660,7 +664,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { "MatchNodeSelector", "PodFitsResources", "PodFitsHostPorts", - "NoDiskConflict", "NoVolumeZoneConflict", "CheckNodeMemoryPressure", "CheckNodeDiskPressure", @@ -688,6 +691,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { wantPlugins: map[string][]kubeschedulerconfig.Plugin{ "FilterPlugin": { {Name: "NodeName"}, + {Name: "VolumeRestrictions"}, {Name: "TaintToleration"}, {Name: "VolumeBinding"}, }, @@ -773,7 +777,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { "MatchNodeSelector", "PodFitsResources", "PodFitsHostPorts", - "NoDiskConflict", "NoVolumeZoneConflict", "CheckNodeMemoryPressure", "CheckNodeDiskPressure", @@ -802,6 +805,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { wantPlugins: map[string][]kubeschedulerconfig.Plugin{ "FilterPlugin": { {Name: "NodeName"}, + {Name: "VolumeRestrictions"}, {Name: "TaintToleration"}, {Name: "VolumeBinding"}, }, @@ -888,7 +892,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { "MatchNodeSelector", "PodFitsResources", "PodFitsHostPorts", - "NoDiskConflict", "NoVolumeZoneConflict", "CheckNodeMemoryPressure", "CheckNodeDiskPressure", @@ -918,6 +921,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { wantPlugins: map[string][]kubeschedulerconfig.Plugin{ "FilterPlugin": { {Name: "NodeName"}, + {Name: "VolumeRestrictions"}, {Name: "TaintToleration"}, {Name: "VolumeBinding"}, }, @@ -1003,7 +1007,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { "MatchNodeSelector", "PodFitsResources", "PodFitsHostPorts", - "NoDiskConflict", "NoVolumeZoneConflict", "CheckNodeMemoryPressure", "CheckNodeDiskPressure", @@ -1034,6 +1037,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { wantPlugins: map[string][]kubeschedulerconfig.Plugin{ "FilterPlugin": { {Name: "NodeName"}, + {Name: "VolumeRestrictions"}, {Name: "TaintToleration"}, {Name: "VolumeBinding"}, }, @@ -1123,7 +1127,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { "MatchNodeSelector", "PodFitsResources", "PodFitsHostPorts", - "NoDiskConflict", "NoVolumeZoneConflict", "CheckNodeMemoryPressure", "CheckNodeDiskPressure", @@ -1154,6 +1157,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { wantPlugins: map[string][]kubeschedulerconfig.Plugin{ "FilterPlugin": { {Name: "NodeName"}, + {Name: "VolumeRestrictions"}, {Name: "TaintToleration"}, {Name: "VolumeBinding"}, }, @@ -1180,9 +1184,10 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { seenPriorities := sets.NewString() mandatoryPredicates := sets.NewString("CheckNodeCondition") filterToPredicateMap := map[string]string{ - "TaintToleration": "PodToleratesNodeTaints", - "NodeName": "HostName", - "VolumeBinding": "CheckVolumeBinding", + "VolumeRestrictions": "NoDiskConflict", + "TaintToleration": "PodToleratesNodeTaints", + "NodeName": "HostName", + "VolumeBinding": "CheckVolumeBinding", } scoreToPriorityMap := map[string]string{ "TaintToleration": "TaintTolerationPriority", diff --git a/pkg/scheduler/framework/plugins/BUILD b/pkg/scheduler/framework/plugins/BUILD index 3a59dc4e9f4..dfd247e7e71 100644 --- a/pkg/scheduler/framework/plugins/BUILD +++ b/pkg/scheduler/framework/plugins/BUILD @@ -13,6 +13,7 @@ go_library( "//pkg/scheduler/framework/plugins/nodename:go_default_library", "//pkg/scheduler/framework/plugins/tainttoleration:go_default_library", "//pkg/scheduler/framework/plugins/volumebinding:go_default_library", + "//pkg/scheduler/framework/plugins/volumerestrictions:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/volumebinder:go_default_library", @@ -38,6 +39,7 @@ filegroup( "//pkg/scheduler/framework/plugins/nodename:all-srcs", "//pkg/scheduler/framework/plugins/tainttoleration:all-srcs", "//pkg/scheduler/framework/plugins/volumebinding:all-srcs", + "//pkg/scheduler/framework/plugins/volumerestrictions:all-srcs", ], tags = ["automanaged"], visibility = ["//visibility:public"], diff --git a/pkg/scheduler/framework/plugins/default_registry.go b/pkg/scheduler/framework/plugins/default_registry.go index 0492e701c7e..de31fce44ae 100644 --- a/pkg/scheduler/framework/plugins/default_registry.go +++ b/pkg/scheduler/framework/plugins/default_registry.go @@ -29,6 +29,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/volumebinder" @@ -58,6 +59,7 @@ func NewDefaultRegistry(args *RegistryArgs) framework.Registry { volumebinding.Name: func(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) { return volumebinding.NewFromVolumeBinder(args.VolumeBinder), nil }, + volumerestrictions.Name: volumerestrictions.New, } } @@ -100,6 +102,11 @@ func NewDefaultConfigProducerRegistry() *ConfigProducerRegistry { plugins.Filter = appendToPluginSet(plugins.Filter, volumebinding.Name, nil) return }) + registry.RegisterPredicate(predicates.NoDiskConflictPred, + func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Filter = appendToPluginSet(plugins.Filter, volumerestrictions.Name, nil) + return + }) registry.RegisterPriority(priorities.TaintTolerationPriority, func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { diff --git a/pkg/scheduler/framework/plugins/volumerestrictions/BUILD b/pkg/scheduler/framework/plugins/volumerestrictions/BUILD new file mode 100644 index 00000000000..4d7d651454e --- /dev/null +++ b/pkg/scheduler/framework/plugins/volumerestrictions/BUILD @@ -0,0 +1,42 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["volume_restrictions.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["volume_restrictions_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + ], +) diff --git a/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go b/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go new file mode 100644 index 00000000000..f69e9be799c --- /dev/null +++ b/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go @@ -0,0 +1,51 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volumerestrictions + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +// VolumeRestrictions is a plugin that checks volume restrictions +type VolumeRestrictions struct{} + +var _ = framework.FilterPlugin(&VolumeRestrictions{}) + +// Name is the name of the plugin used in the plugin registry and configurations. +const Name = "VolumeRestrictions" + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *VolumeRestrictions) Name() string { + return Name +} + +// Filter invoked at the filter extension point. +func (pl *VolumeRestrictions) Filter(_ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { + // metadata is not needed for NoDiskConflict + _, reasons, err := predicates.NoDiskConflict(pod, nil, nodeInfo) + return migration.PredicateResultToFrameworkStatus(reasons, err) +} + +// New initializes a new plugin and returns it. +func New(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) { + return &VolumeRestrictions{}, nil +} diff --git a/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions_test.go b/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions_test.go new file mode 100644 index 00000000000..77426df601d --- /dev/null +++ b/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions_test.go @@ -0,0 +1,231 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volumerestrictions + +import ( + "reflect" + "testing" + + "k8s.io/api/core/v1" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +func TestGCEDiskConflicts(t *testing.T) { + volState := v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ + PDName: "foo", + }, + }, + }, + }, + } + volState2 := v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ + PDName: "bar", + }, + }, + }, + }, + } + errStatus := framework.NewStatus(framework.Unschedulable, predicates.ErrDiskConflict.GetReason()) + tests := []struct { + pod *v1.Pod + nodeInfo *schedulernodeinfo.NodeInfo + isOk bool + name string + wantStatus *framework.Status + }{ + {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing", nil}, + {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil}, + {&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus}, + {&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + p, _ := New(nil, nil) + gotStatus := p.(framework.FilterPlugin).Filter(nil, test.pod, test.nodeInfo) + if !reflect.DeepEqual(gotStatus, test.wantStatus) { + t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) + } + }) + } +} + +func TestAWSDiskConflicts(t *testing.T) { + volState := v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ + VolumeID: "foo", + }, + }, + }, + }, + } + volState2 := v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ + VolumeID: "bar", + }, + }, + }, + }, + } + errStatus := framework.NewStatus(framework.Unschedulable, predicates.ErrDiskConflict.GetReason()) + tests := []struct { + pod *v1.Pod + nodeInfo *schedulernodeinfo.NodeInfo + isOk bool + name string + wantStatus *framework.Status + }{ + {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing", nil}, + {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil}, + {&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus}, + {&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + p, _ := New(nil, nil) + gotStatus := p.(framework.FilterPlugin).Filter(nil, test.pod, test.nodeInfo) + if !reflect.DeepEqual(gotStatus, test.wantStatus) { + t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) + } + }) + } +} + +func TestRBDDiskConflicts(t *testing.T) { + volState := v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + RBD: &v1.RBDVolumeSource{ + CephMonitors: []string{"a", "b"}, + RBDPool: "foo", + RBDImage: "bar", + FSType: "ext4", + }, + }, + }, + }, + } + volState2 := v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + RBD: &v1.RBDVolumeSource{ + CephMonitors: []string{"c", "d"}, + RBDPool: "foo", + RBDImage: "bar", + FSType: "ext4", + }, + }, + }, + }, + } + errStatus := framework.NewStatus(framework.Unschedulable, predicates.ErrDiskConflict.GetReason()) + tests := []struct { + pod *v1.Pod + nodeInfo *schedulernodeinfo.NodeInfo + isOk bool + name string + wantStatus *framework.Status + }{ + {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing", nil}, + {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil}, + {&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus}, + {&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + p, _ := New(nil, nil) + gotStatus := p.(framework.FilterPlugin).Filter(nil, test.pod, test.nodeInfo) + if !reflect.DeepEqual(gotStatus, test.wantStatus) { + t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) + } + }) + } +} + +func TestISCSIDiskConflicts(t *testing.T) { + volState := v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + ISCSI: &v1.ISCSIVolumeSource{ + TargetPortal: "127.0.0.1:3260", + IQN: "iqn.2016-12.server:storage.target01", + FSType: "ext4", + Lun: 0, + }, + }, + }, + }, + } + volState2 := v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + ISCSI: &v1.ISCSIVolumeSource{ + TargetPortal: "127.0.0.1:3260", + IQN: "iqn.2017-12.server:storage.target01", + FSType: "ext4", + Lun: 0, + }, + }, + }, + }, + } + errStatus := framework.NewStatus(framework.Unschedulable, predicates.ErrDiskConflict.GetReason()) + tests := []struct { + pod *v1.Pod + nodeInfo *schedulernodeinfo.NodeInfo + isOk bool + name string + wantStatus *framework.Status + }{ + {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing", nil}, + {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil}, + {&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus}, + {&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + p, _ := New(nil, nil) + gotStatus := p.(framework.FilterPlugin).Filter(nil, test.pod, test.nodeInfo) + if !reflect.DeepEqual(gotStatus, test.wantStatus) { + t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) + } + }) + } +} diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go index 4d524c26979..b3bc8d7c8a6 100644 --- a/test/integration/scheduler/scheduler_test.go +++ b/test/integration/scheduler/scheduler_test.go @@ -136,7 +136,6 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) { "MaxCSIVolumeCountPred", "MaxEBSVolumeCount", "MaxGCEPDVolumeCount", - "NoDiskConflict", "NoVolumeZoneConflict", ), expectedPrioritizers: sets.NewString( @@ -149,8 +148,12 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) { "ImageLocalityPriority", ), expectedPlugins: map[string][]kubeschedulerconfig.Plugin{ - "FilterPlugin": {{Name: "TaintToleration"}, {Name: "VolumeBinding"}}, - "ScorePlugin": {{Name: "TaintToleration", Weight: 1}}, + "FilterPlugin": { + {Name: "VolumeRestrictions"}, + {Name: "TaintToleration"}, + {Name: "VolumeBinding"}, + }, + "ScorePlugin": {{Name: "TaintToleration", Weight: 1}}, }, }, { @@ -202,7 +205,6 @@ kind: Policy "MaxCSIVolumeCountPred", "MaxEBSVolumeCount", "MaxGCEPDVolumeCount", - "NoDiskConflict", "NoVolumeZoneConflict", ), expectedPrioritizers: sets.NewString( @@ -215,8 +217,12 @@ kind: Policy "ImageLocalityPriority", ), expectedPlugins: map[string][]kubeschedulerconfig.Plugin{ - "FilterPlugin": {{Name: "TaintToleration"}, {Name: "VolumeBinding"}}, - "ScorePlugin": {{Name: "TaintToleration", Weight: 1}}, + "FilterPlugin": { + {Name: "VolumeRestrictions"}, + {Name: "TaintToleration"}, + {Name: "VolumeBinding"}, + }, + "ScorePlugin": {{Name: "TaintToleration", Weight: 1}}, }, }, {