Merge pull request #83731 from notpad/feature/migration_nodiskconflict

[migration phase 1] NoDiskConflict as filter plugin
This commit is contained in:
Kubernetes Prow Robot 2019-10-11 21:16:52 -07:00 committed by GitHub
commit a7a6ca80d8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 366 additions and 22 deletions

View File

@ -69,7 +69,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
"MatchNodeSelector",
"PodFitsResources",
"PodFitsPorts",
"NoDiskConflict",
"TestServiceAffinity",
"TestLabelsPresence",
),
@ -79,6 +78,11 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
"TestServiceAntiAffinity",
"TestLabelPreference",
),
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "VolumeRestrictions"},
},
},
},
// Do not change this JSON after the corresponding release has been tagged.
@ -108,7 +112,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
"MatchNodeSelector",
"PodFitsHostPorts",
"PodFitsResources",
"NoDiskConflict",
"TestServiceAffinity",
"TestLabelsPresence",
),
@ -123,6 +126,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "VolumeRestrictions"},
},
},
},
@ -160,7 +164,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
"MatchNodeSelector",
"PodFitsResources",
"PodFitsHostPorts",
"NoDiskConflict",
"NoVolumeZoneConflict",
"MaxEBSVolumeCount",
"MaxGCEPDVolumeCount",
@ -181,6 +184,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "VolumeRestrictions"},
},
},
},
@ -222,7 +226,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
"MatchNodeSelector",
"PodFitsResources",
"PodFitsHostPorts",
"NoDiskConflict",
"NoVolumeZoneConflict",
"CheckNodeMemoryPressure",
"MaxEBSVolumeCount",
@ -245,6 +248,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
},
"ScorePlugin": {{Name: "TaintToleration", Weight: 2}},
@ -291,7 +295,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
"MatchNodeSelector",
"PodFitsResources",
"PodFitsHostPorts",
"NoDiskConflict",
"NoVolumeZoneConflict",
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
@ -317,6 +320,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
},
"ScorePlugin": {{Name: "TaintToleration", Weight: 2}},
@ -372,7 +376,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
"MatchNodeSelector",
"PodFitsResources",
"PodFitsHostPorts",
"NoDiskConflict",
"NoVolumeZoneConflict",
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
@ -398,6 +401,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
},
"ScorePlugin": {{Name: "TaintToleration", Weight: 2}},
@ -465,7 +469,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
"MatchNodeSelector",
"PodFitsResources",
"PodFitsHostPorts",
"NoDiskConflict",
"NoVolumeZoneConflict",
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
@ -492,6 +495,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
},
"ScorePlugin": {{Name: "TaintToleration", Weight: 2}},
@ -560,7 +564,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
"MatchNodeSelector",
"PodFitsResources",
"PodFitsHostPorts",
"NoDiskConflict",
"NoVolumeZoneConflict",
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
@ -587,6 +590,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "VolumeBinding"},
},
@ -660,7 +664,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
"MatchNodeSelector",
"PodFitsResources",
"PodFitsHostPorts",
"NoDiskConflict",
"NoVolumeZoneConflict",
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
@ -688,6 +691,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "VolumeBinding"},
},
@ -773,7 +777,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
"MatchNodeSelector",
"PodFitsResources",
"PodFitsHostPorts",
"NoDiskConflict",
"NoVolumeZoneConflict",
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
@ -802,6 +805,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "VolumeBinding"},
},
@ -888,7 +892,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
"MatchNodeSelector",
"PodFitsResources",
"PodFitsHostPorts",
"NoDiskConflict",
"NoVolumeZoneConflict",
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
@ -918,6 +921,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "VolumeBinding"},
},
@ -1003,7 +1007,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
"MatchNodeSelector",
"PodFitsResources",
"PodFitsHostPorts",
"NoDiskConflict",
"NoVolumeZoneConflict",
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
@ -1034,6 +1037,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "VolumeBinding"},
},
@ -1123,7 +1127,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
"MatchNodeSelector",
"PodFitsResources",
"PodFitsHostPorts",
"NoDiskConflict",
"NoVolumeZoneConflict",
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
@ -1154,6 +1157,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {
{Name: "NodeName"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "VolumeBinding"},
},
@ -1180,9 +1184,10 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
seenPriorities := sets.NewString()
mandatoryPredicates := sets.NewString("CheckNodeCondition")
filterToPredicateMap := map[string]string{
"TaintToleration": "PodToleratesNodeTaints",
"NodeName": "HostName",
"VolumeBinding": "CheckVolumeBinding",
"VolumeRestrictions": "NoDiskConflict",
"TaintToleration": "PodToleratesNodeTaints",
"NodeName": "HostName",
"VolumeBinding": "CheckVolumeBinding",
}
scoreToPriorityMap := map[string]string{
"TaintToleration": "TaintTolerationPriority",

View File

@ -13,6 +13,7 @@ go_library(
"//pkg/scheduler/framework/plugins/nodename:go_default_library",
"//pkg/scheduler/framework/plugins/tainttoleration:go_default_library",
"//pkg/scheduler/framework/plugins/volumebinding:go_default_library",
"//pkg/scheduler/framework/plugins/volumerestrictions:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/volumebinder:go_default_library",
@ -38,6 +39,7 @@ filegroup(
"//pkg/scheduler/framework/plugins/nodename:all-srcs",
"//pkg/scheduler/framework/plugins/tainttoleration:all-srcs",
"//pkg/scheduler/framework/plugins/volumebinding:all-srcs",
"//pkg/scheduler/framework/plugins/volumerestrictions:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],

View File

@ -29,6 +29,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
@ -58,6 +59,7 @@ func NewDefaultRegistry(args *RegistryArgs) framework.Registry {
volumebinding.Name: func(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) {
return volumebinding.NewFromVolumeBinder(args.VolumeBinder), nil
},
volumerestrictions.Name: volumerestrictions.New,
}
}
@ -100,6 +102,11 @@ func NewDefaultConfigProducerRegistry() *ConfigProducerRegistry {
plugins.Filter = appendToPluginSet(plugins.Filter, volumebinding.Name, nil)
return
})
registry.RegisterPredicate(predicates.NoDiskConflictPred,
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
plugins.Filter = appendToPluginSet(plugins.Filter, volumerestrictions.Name, nil)
return
})
registry.RegisterPriority(priorities.TaintTolerationPriority,
func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {

View File

@ -0,0 +1,42 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["volume_restrictions.go"],
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions",
visibility = ["//visibility:public"],
deps = [
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/framework/plugins/migration:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["volume_restrictions_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
],
)

View File

@ -0,0 +1,51 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumerestrictions
import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
// VolumeRestrictions is a plugin that checks volume restrictions
type VolumeRestrictions struct{}
var _ = framework.FilterPlugin(&VolumeRestrictions{})
// Name is the name of the plugin used in the plugin registry and configurations.
const Name = "VolumeRestrictions"
// Name returns name of the plugin. It is used in logs, etc.
func (pl *VolumeRestrictions) Name() string {
return Name
}
// Filter invoked at the filter extension point.
func (pl *VolumeRestrictions) Filter(_ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
// metadata is not needed for NoDiskConflict
_, reasons, err := predicates.NoDiskConflict(pod, nil, nodeInfo)
return migration.PredicateResultToFrameworkStatus(reasons, err)
}
// New initializes a new plugin and returns it.
func New(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) {
return &VolumeRestrictions{}, nil
}

View File

@ -0,0 +1,231 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumerestrictions
import (
"reflect"
"testing"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
func TestGCEDiskConflicts(t *testing.T) {
volState := v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "foo",
},
},
},
},
}
volState2 := v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "bar",
},
},
},
},
}
errStatus := framework.NewStatus(framework.Unschedulable, predicates.ErrDiskConflict.GetReason())
tests := []struct {
pod *v1.Pod
nodeInfo *schedulernodeinfo.NodeInfo
isOk bool
name string
wantStatus *framework.Status
}{
{&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing", nil},
{&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
p, _ := New(nil, nil)
gotStatus := p.(framework.FilterPlugin).Filter(nil, test.pod, test.nodeInfo)
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
}
})
}
}
func TestAWSDiskConflicts(t *testing.T) {
volState := v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: "foo",
},
},
},
},
}
volState2 := v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: "bar",
},
},
},
},
}
errStatus := framework.NewStatus(framework.Unschedulable, predicates.ErrDiskConflict.GetReason())
tests := []struct {
pod *v1.Pod
nodeInfo *schedulernodeinfo.NodeInfo
isOk bool
name string
wantStatus *framework.Status
}{
{&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing", nil},
{&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
p, _ := New(nil, nil)
gotStatus := p.(framework.FilterPlugin).Filter(nil, test.pod, test.nodeInfo)
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
}
})
}
}
func TestRBDDiskConflicts(t *testing.T) {
volState := v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{"a", "b"},
RBDPool: "foo",
RBDImage: "bar",
FSType: "ext4",
},
},
},
},
}
volState2 := v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{"c", "d"},
RBDPool: "foo",
RBDImage: "bar",
FSType: "ext4",
},
},
},
},
}
errStatus := framework.NewStatus(framework.Unschedulable, predicates.ErrDiskConflict.GetReason())
tests := []struct {
pod *v1.Pod
nodeInfo *schedulernodeinfo.NodeInfo
isOk bool
name string
wantStatus *framework.Status
}{
{&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing", nil},
{&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
p, _ := New(nil, nil)
gotStatus := p.(framework.FilterPlugin).Filter(nil, test.pod, test.nodeInfo)
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
}
})
}
}
func TestISCSIDiskConflicts(t *testing.T) {
volState := v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
ISCSI: &v1.ISCSIVolumeSource{
TargetPortal: "127.0.0.1:3260",
IQN: "iqn.2016-12.server:storage.target01",
FSType: "ext4",
Lun: 0,
},
},
},
},
}
volState2 := v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
ISCSI: &v1.ISCSIVolumeSource{
TargetPortal: "127.0.0.1:3260",
IQN: "iqn.2017-12.server:storage.target01",
FSType: "ext4",
Lun: 0,
},
},
},
},
}
errStatus := framework.NewStatus(framework.Unschedulable, predicates.ErrDiskConflict.GetReason())
tests := []struct {
pod *v1.Pod
nodeInfo *schedulernodeinfo.NodeInfo
isOk bool
name string
wantStatus *framework.Status
}{
{&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing", nil},
{&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
p, _ := New(nil, nil)
gotStatus := p.(framework.FilterPlugin).Filter(nil, test.pod, test.nodeInfo)
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
}
})
}
}

View File

@ -136,7 +136,6 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
"MaxCSIVolumeCountPred",
"MaxEBSVolumeCount",
"MaxGCEPDVolumeCount",
"NoDiskConflict",
"NoVolumeZoneConflict",
),
expectedPrioritizers: sets.NewString(
@ -149,8 +148,12 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
"ImageLocalityPriority",
),
expectedPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {{Name: "TaintToleration"}, {Name: "VolumeBinding"}},
"ScorePlugin": {{Name: "TaintToleration", Weight: 1}},
"FilterPlugin": {
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "VolumeBinding"},
},
"ScorePlugin": {{Name: "TaintToleration", Weight: 1}},
},
},
{
@ -202,7 +205,6 @@ kind: Policy
"MaxCSIVolumeCountPred",
"MaxEBSVolumeCount",
"MaxGCEPDVolumeCount",
"NoDiskConflict",
"NoVolumeZoneConflict",
),
expectedPrioritizers: sets.NewString(
@ -215,8 +217,12 @@ kind: Policy
"ImageLocalityPriority",
),
expectedPlugins: map[string][]kubeschedulerconfig.Plugin{
"FilterPlugin": {{Name: "TaintToleration"}, {Name: "VolumeBinding"}},
"ScorePlugin": {{Name: "TaintToleration", Weight: 1}},
"FilterPlugin": {
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "VolumeBinding"},
},
"ScorePlugin": {{Name: "TaintToleration", Weight: 1}},
},
},
{