diff --git a/pkg/scheduler/api/compatibility/compatibility_test.go b/pkg/scheduler/api/compatibility/compatibility_test.go index 36bb70ed2b1..cbf916c917b 100644 --- a/pkg/scheduler/api/compatibility/compatibility_test.go +++ b/pkg/scheduler/api/compatibility/compatibility_test.go @@ -160,7 +160,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { ] }`, wantPredicates: sets.NewString( - "NoVolumeZoneConflict", "MaxEBSVolumeCount", "MaxGCEPDVolumeCount", "MaxAzureDiskVolumeCount", @@ -183,6 +182,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "NodeAffinity"}, {Name: "NodeResources"}, {Name: "VolumeRestrictions"}, + {Name: "VolumeZone"}, }, "ScorePlugin": { {Name: "ImageLocality", Weight: 2}, @@ -224,7 +224,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { ] }`, wantPredicates: sets.NewString( - "NoVolumeZoneConflict", "CheckNodeMemoryPressure", "MaxEBSVolumeCount", "MaxGCEPDVolumeCount", @@ -250,6 +249,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "NodeResources"}, {Name: "VolumeRestrictions"}, {Name: "TaintToleration"}, + {Name: "VolumeZone"}, }, "ScorePlugin": { {Name: "ImageLocality", Weight: 2}, @@ -295,7 +295,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { ] }`, wantPredicates: sets.NewString( - "NoVolumeZoneConflict", "CheckNodeMemoryPressure", "CheckNodeDiskPressure", "MaxEBSVolumeCount", @@ -324,6 +323,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "NodeResources"}, {Name: "VolumeRestrictions"}, {Name: "TaintToleration"}, + {Name: "VolumeZone"}, }, "ScorePlugin": { {Name: "ImageLocality", Weight: 2}, @@ -378,7 +378,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { }] }`, wantPredicates: sets.NewString( - "NoVolumeZoneConflict", "CheckNodeMemoryPressure", "CheckNodeDiskPressure", "MaxEBSVolumeCount", @@ -407,6 +406,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "NodeResources"}, {Name: "VolumeRestrictions"}, {Name: "TaintToleration"}, + {Name: "VolumeZone"}, }, "ScorePlugin": { {Name: "ImageLocality", Weight: 2}, @@ -473,7 +473,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { }] }`, wantPredicates: sets.NewString( - "NoVolumeZoneConflict", "CheckNodeMemoryPressure", "CheckNodeDiskPressure", "CheckNodeCondition", @@ -503,6 +502,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "NodeResources"}, {Name: "VolumeRestrictions"}, {Name: "TaintToleration"}, + {Name: "VolumeZone"}, }, "ScorePlugin": { {Name: "ImageLocality", Weight: 2}, @@ -570,7 +570,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { }] }`, wantPredicates: sets.NewString( - "NoVolumeZoneConflict", "CheckNodeMemoryPressure", "CheckNodeDiskPressure", "CheckNodeCondition", @@ -601,6 +600,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "VolumeRestrictions"}, {Name: "TaintToleration"}, {Name: "VolumeBinding"}, + {Name: "VolumeZone"}, }, "ScorePlugin": { {Name: "ImageLocality", Weight: 2}, @@ -672,7 +672,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { }] }`, wantPredicates: sets.NewString( - "NoVolumeZoneConflict", "CheckNodeMemoryPressure", "CheckNodeDiskPressure", "CheckNodePIDPressure", @@ -704,6 +703,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "VolumeRestrictions"}, {Name: "TaintToleration"}, {Name: "VolumeBinding"}, + {Name: "VolumeZone"}, }, "ScorePlugin": { {Name: "ImageLocality", Weight: 2}, @@ -787,7 +787,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { }] }`, wantPredicates: sets.NewString( - "NoVolumeZoneConflict", "CheckNodeMemoryPressure", "CheckNodeDiskPressure", "CheckNodePIDPressure", @@ -820,6 +819,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "VolumeRestrictions"}, {Name: "TaintToleration"}, {Name: "VolumeBinding"}, + {Name: "VolumeZone"}, }, "ScorePlugin": { {Name: "ImageLocality", Weight: 2}, @@ -904,7 +904,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { }] }`, wantPredicates: sets.NewString( - "NoVolumeZoneConflict", "CheckNodeMemoryPressure", "CheckNodeDiskPressure", "CheckNodePIDPressure", @@ -938,6 +937,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "VolumeRestrictions"}, {Name: "TaintToleration"}, {Name: "VolumeBinding"}, + {Name: "VolumeZone"}, }, "ScorePlugin": { {Name: "ImageLocality", Weight: 2}, @@ -1021,7 +1021,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { }] }`, wantPredicates: sets.NewString( - "NoVolumeZoneConflict", "CheckNodeMemoryPressure", "CheckNodeDiskPressure", "CheckNodePIDPressure", @@ -1056,6 +1055,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "VolumeRestrictions"}, {Name: "TaintToleration"}, {Name: "VolumeBinding"}, + {Name: "VolumeZone"}, }, "ScorePlugin": { {Name: "ImageLocality", Weight: 2}, @@ -1143,7 +1143,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { }] }`, wantPredicates: sets.NewString( - "NoVolumeZoneConflict", "CheckNodeMemoryPressure", "CheckNodeDiskPressure", "CheckNodePIDPressure", @@ -1178,6 +1177,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { {Name: "VolumeRestrictions"}, {Name: "TaintToleration"}, {Name: "VolumeBinding"}, + {Name: "VolumeZone"}, }, "ScorePlugin": { {Name: "ImageLocality", Weight: 2}, @@ -1212,6 +1212,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { "NodeAffinity": "MatchNodeSelector", "VolumeBinding": "CheckVolumeBinding", "VolumeRestrictions": "NoDiskConflict", + "VolumeZone": "NoVolumeZoneConflict", } scoreToPriorityMap := map[string]string{ "TaintToleration": "TaintTolerationPriority", diff --git a/pkg/scheduler/framework/plugins/BUILD b/pkg/scheduler/framework/plugins/BUILD index 08cbaad83af..6c9b7196457 100644 --- a/pkg/scheduler/framework/plugins/BUILD +++ b/pkg/scheduler/framework/plugins/BUILD @@ -18,6 +18,7 @@ go_library( "//pkg/scheduler/framework/plugins/tainttoleration:go_default_library", "//pkg/scheduler/framework/plugins/volumebinding:go_default_library", "//pkg/scheduler/framework/plugins/volumerestrictions:go_default_library", + "//pkg/scheduler/framework/plugins/volumezone:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/volumebinder:go_default_library", @@ -48,6 +49,7 @@ filegroup( "//pkg/scheduler/framework/plugins/tainttoleration:all-srcs", "//pkg/scheduler/framework/plugins/volumebinding:all-srcs", "//pkg/scheduler/framework/plugins/volumerestrictions:all-srcs", + "//pkg/scheduler/framework/plugins/volumezone:all-srcs", ], tags = ["automanaged"], visibility = ["//visibility:public"], diff --git a/pkg/scheduler/framework/plugins/default_registry.go b/pkg/scheduler/framework/plugins/default_registry.go index c8c773ee289..69833e19cad 100644 --- a/pkg/scheduler/framework/plugins/default_registry.go +++ b/pkg/scheduler/framework/plugins/default_registry.go @@ -34,6 +34,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/volumebinder" @@ -57,6 +58,10 @@ type RegistryArgs struct { // This is the registry that Kubernetes default scheduler uses. A scheduler that // runs custom plugins, can pass a different Registry when initializing the scheduler. func NewDefaultRegistry(args *RegistryArgs) framework.Registry { + pvInfo := &predicates.CachedPersistentVolumeInfo{PersistentVolumeLister: args.PVLister} + pvcInfo := &predicates.CachedPersistentVolumeClaimInfo{PersistentVolumeClaimLister: args.PVCLister} + classInfo := &predicates.CachedStorageClassInfo{StorageClassLister: args.StorageClassLister} + return framework.Registry{ imagelocality.Name: imagelocality.New, tainttoleration.Name: tainttoleration.New, @@ -68,6 +73,9 @@ func NewDefaultRegistry(args *RegistryArgs) framework.Registry { return volumebinding.NewFromVolumeBinder(args.VolumeBinder), nil }, volumerestrictions.Name: volumerestrictions.New, + volumezone.Name: func(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) { + return volumezone.New(pvInfo, pvcInfo, classInfo), nil + }, } } @@ -130,6 +138,11 @@ func NewDefaultConfigProducerRegistry() *ConfigProducerRegistry { plugins.Filter = appendToPluginSet(plugins.Filter, volumerestrictions.Name, nil) return }) + registry.RegisterPredicate(predicates.NoVolumeZoneConflictPred, + func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { + plugins.Filter = appendToPluginSet(plugins.Filter, volumezone.Name, nil) + return + }) registry.RegisterPriority(priorities.TaintTolerationPriority, func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { diff --git a/pkg/scheduler/framework/plugins/volumezone/BUILD b/pkg/scheduler/framework/plugins/volumezone/BUILD new file mode 100644 index 00000000000..709809c0e38 --- /dev/null +++ b/pkg/scheduler/framework/plugins/volumezone/BUILD @@ -0,0 +1,43 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["volume_zone.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/plugins/migration:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["volume_zone_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/algorithm/predicates:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/storage/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/scheduler/framework/plugins/volumezone/volume_zone.go b/pkg/scheduler/framework/plugins/volumezone/volume_zone.go new file mode 100644 index 00000000000..f7a11804c57 --- /dev/null +++ b/pkg/scheduler/framework/plugins/volumezone/volume_zone.go @@ -0,0 +1,54 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volumezone + +import ( + "k8s.io/api/core/v1" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +// VolumeZone is a plugin that checks volume zone +type VolumeZone struct { + predicate predicates.FitPredicate +} + +var _ = framework.FilterPlugin(&VolumeZone{}) + +// Name is the name of the plugin used in the plugin registry and configurations. +const Name = "VolumeZone" + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *VolumeZone) Name() string { + return Name +} + +// Filter invoked at the filter extension point. +func (pl *VolumeZone) Filter(_ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { + // metadata is not needed + _, reasons, err := pl.predicate(pod, nil, nodeInfo) + return migration.PredicateResultToFrameworkStatus(reasons, err) +} + +// New initializes a new plugin and returns it. +func New(pvInfo predicates.PersistentVolumeInfo, pvcInfo predicates.PersistentVolumeClaimInfo, classInfo predicates.StorageClassInfo) framework.Plugin { + return &VolumeZone{ + predicate: predicates.NewVolumeZonePredicate(pvInfo, pvcInfo, classInfo), + } +} diff --git a/pkg/scheduler/framework/plugins/volumezone/volume_zone_test.go b/pkg/scheduler/framework/plugins/volumezone/volume_zone_test.go new file mode 100644 index 00000000000..6e6442d2536 --- /dev/null +++ b/pkg/scheduler/framework/plugins/volumezone/volume_zone_test.go @@ -0,0 +1,349 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volumezone + +import ( + "reflect" + "testing" + + "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" +) + +func createPodWithVolume(pod, pv, pvc string) *v1.Pod { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: pod, Namespace: "default"}, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + Name: pv, + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc, + }, + }, + }, + }, + }, + } +} + +func TestSingleZone(t *testing.T) { + pvInfo := predicates.FakePersistentVolumeInfo{ + { + ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "Vol_2", Labels: map[string]string{v1.LabelZoneRegion: "us-west1-b", "uselessLabel": "none"}}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "Vol_3", Labels: map[string]string{v1.LabelZoneRegion: "us-west1-c"}}, + }, + } + + pvcInfo := predicates.FakePersistentVolumeClaimInfo{ + { + ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"}, + Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "PVC_2", Namespace: "default"}, + Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_2"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "PVC_3", Namespace: "default"}, + Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_3"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "PVC_4", Namespace: "default"}, + Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_not_exist"}, + }, + } + + tests := []struct { + name string + Pod *v1.Pod + Node *v1.Node + wantStatus *framework.Status + }{ + { + name: "pod without volume", + Pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod_1", Namespace: "default"}, + }, + Node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "host1", + Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}, + }, + }, + }, + { + name: "node without labels", + Pod: createPodWithVolume("pod_1", "vol_1", "PVC_1"), + Node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "host1", + }, + }, + }, + { + name: "label zone failure domain matched", + Pod: createPodWithVolume("pod_1", "vol_1", "PVC_1"), + Node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "host1", + Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a", "uselessLabel": "none"}, + }, + }, + }, + { + name: "label zone region matched", + Pod: createPodWithVolume("pod_1", "vol_1", "PVC_2"), + Node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "host1", + Labels: map[string]string{v1.LabelZoneRegion: "us-west1-b", "uselessLabel": "none"}, + }, + }, + }, + { + name: "label zone region failed match", + Pod: createPodWithVolume("pod_1", "vol_1", "PVC_2"), + Node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "host1", + Labels: map[string]string{v1.LabelZoneRegion: "no_us-west1-b", "uselessLabel": "none"}, + }, + }, + wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, predicates.ErrVolumeZoneConflict.GetReason()), + }, + { + name: "label zone failure domain failed match", + Pod: createPodWithVolume("pod_1", "vol_1", "PVC_1"), + Node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "host1", + Labels: map[string]string{v1.LabelZoneFailureDomain: "no_us-west1-a", "uselessLabel": "none"}, + }, + }, + wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, predicates.ErrVolumeZoneConflict.GetReason()), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + node := &schedulernodeinfo.NodeInfo{} + node.SetNode(test.Node) + p := New(pvInfo, pvcInfo, nil) + gotStatus := p.(framework.FilterPlugin).Filter(nil, test.Pod, node) + if !reflect.DeepEqual(gotStatus, test.wantStatus) { + t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) + } + }) + } +} + +func TestMultiZone(t *testing.T) { + pvInfo := predicates.FakePersistentVolumeInfo{ + { + ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "Vol_2", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-b", "uselessLabel": "none"}}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "Vol_3", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-c__us-west1-a"}}, + }, + } + + pvcInfo := predicates.FakePersistentVolumeClaimInfo{ + { + ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"}, + Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "PVC_2", Namespace: "default"}, + Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_2"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "PVC_3", Namespace: "default"}, + Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_3"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "PVC_4", Namespace: "default"}, + Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_not_exist"}, + }, + } + + tests := []struct { + name string + Pod *v1.Pod + Node *v1.Node + wantStatus *framework.Status + }{ + { + name: "node without labels", + Pod: createPodWithVolume("pod_1", "Vol_3", "PVC_3"), + Node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "host1", + }, + }, + }, + { + name: "label zone failure domain matched", + Pod: createPodWithVolume("pod_1", "Vol_3", "PVC_3"), + Node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "host1", + Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a", "uselessLabel": "none"}, + }, + }, + }, + { + name: "label zone failure domain failed match", + Pod: createPodWithVolume("pod_1", "vol_1", "PVC_1"), + Node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "host1", + Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-b", "uselessLabel": "none"}, + }, + }, + wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, predicates.ErrVolumeZoneConflict.GetReason()), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + node := &schedulernodeinfo.NodeInfo{} + node.SetNode(test.Node) + p := New(pvInfo, pvcInfo, nil) + gotStatus := p.(framework.FilterPlugin).Filter(nil, test.Pod, node) + if !reflect.DeepEqual(gotStatus, test.wantStatus) { + t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) + } + }) + } +} + +func TestWithBinding(t *testing.T) { + var ( + modeWait = storagev1.VolumeBindingWaitForFirstConsumer + + class0 = "Class_0" + classWait = "Class_Wait" + classImmediate = "Class_Immediate" + ) + + classInfo := predicates.FakeStorageClassInfo{ + { + ObjectMeta: metav1.ObjectMeta{Name: classImmediate}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: classWait}, + VolumeBindingMode: &modeWait, + }, + } + + pvInfo := predicates.FakePersistentVolumeInfo{ + { + ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}}, + }, + } + + pvcInfo := predicates.FakePersistentVolumeClaimInfo{ + { + ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"}, + Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "PVC_NoSC", Namespace: "default"}, + Spec: v1.PersistentVolumeClaimSpec{StorageClassName: &class0}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "PVC_EmptySC", Namespace: "default"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "PVC_WaitSC", Namespace: "default"}, + Spec: v1.PersistentVolumeClaimSpec{StorageClassName: &classWait}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "PVC_ImmediateSC", Namespace: "default"}, + Spec: v1.PersistentVolumeClaimSpec{StorageClassName: &classImmediate}, + }, + } + + testNode := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "host1", + Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a", "uselessLabel": "none"}, + }, + } + + tests := []struct { + name string + Pod *v1.Pod + Node *v1.Node + wantStatus *framework.Status + }{ + { + name: "label zone failure domain matched", + Pod: createPodWithVolume("pod_1", "vol_1", "PVC_1"), + Node: testNode, + }, + { + name: "unbound volume empty storage class", + Pod: createPodWithVolume("pod_1", "vol_1", "PVC_EmptySC"), + Node: testNode, + wantStatus: framework.NewStatus(framework.Error, "PersistentVolumeClaim was not found: \"PVC_EmptySC\""), + }, + { + name: "unbound volume no storage class", + Pod: createPodWithVolume("pod_1", "vol_1", "PVC_NoSC"), + Node: testNode, + wantStatus: framework.NewStatus(framework.Error, "PersistentVolumeClaim was not found: \"PVC_NoSC\""), + }, + { + name: "unbound volume immediate binding mode", + Pod: createPodWithVolume("pod_1", "vol_1", "PVC_ImmediateSC"), + Node: testNode, + wantStatus: framework.NewStatus(framework.Error, "VolumeBindingMode not set for StorageClass \"Class_Immediate\""), + }, + { + name: "unbound volume wait binding mode", + Pod: createPodWithVolume("pod_1", "vol_1", "PVC_WaitSC"), + Node: testNode, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + node := &schedulernodeinfo.NodeInfo{} + node.SetNode(test.Node) + p := New(pvInfo, pvcInfo, classInfo) + gotStatus := p.(framework.FilterPlugin).Filter(nil, test.Pod, node) + if !reflect.DeepEqual(gotStatus, test.wantStatus) { + t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) + } + }) + } +} diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go index 01339870519..a0a736169fb 100644 --- a/test/integration/scheduler/scheduler_test.go +++ b/test/integration/scheduler/scheduler_test.go @@ -135,7 +135,6 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) { "MaxCSIVolumeCountPred", "MaxEBSVolumeCount", "MaxGCEPDVolumeCount", - "NoVolumeZoneConflict", ), expectedPrioritizers: sets.NewString( "BalancedResourceAllocation", @@ -150,6 +149,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) { {Name: "VolumeRestrictions"}, {Name: "TaintToleration"}, {Name: "VolumeBinding"}, + {Name: "VolumeZone"}, }, "ScorePlugin": { {Name: "ImageLocality", Weight: 1}, @@ -206,7 +206,6 @@ kind: Policy "MaxCSIVolumeCountPred", "MaxEBSVolumeCount", "MaxGCEPDVolumeCount", - "NoVolumeZoneConflict", ), expectedPrioritizers: sets.NewString( "BalancedResourceAllocation", @@ -221,6 +220,7 @@ kind: Policy {Name: "VolumeRestrictions"}, {Name: "TaintToleration"}, {Name: "VolumeBinding"}, + {Name: "VolumeZone"}, }, "ScorePlugin": { {Name: "ImageLocality", Weight: 1},