From 6329b17d2f6fbf834d58dbddd3ff95776c55fd30 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Tue, 25 Feb 2020 11:10:27 +0100 Subject: [PATCH] volume scheduler: introduce special string type This makes it possible to search for the special strings more easily (https://github.com/kubernetes/kubernetes/pull/88230#discussion_r382367043). --- .../volume/scheduling/scheduler_binder.go | 19 +++++++++--- .../scheduling/scheduler_binder_fake.go | 4 +-- .../scheduling/scheduler_binder_test.go | 30 +++++++++---------- pkg/scheduler/core/generic_scheduler_test.go | 4 +-- .../plugins/volumebinding/volume_binding.go | 2 +- .../volumebinding/volume_binding_test.go | 8 ++--- pkg/scheduler/scheduler_test.go | 6 ++-- 7 files changed, 42 insertions(+), 31 deletions(-) diff --git a/pkg/controller/volume/scheduling/scheduler_binder.go b/pkg/controller/volume/scheduling/scheduler_binder.go index 0fb249813e0..352816764a5 100644 --- a/pkg/controller/volume/scheduling/scheduler_binder.go +++ b/pkg/controller/volume/scheduling/scheduler_binder.go @@ -45,11 +45,22 @@ import ( volumeutil "k8s.io/kubernetes/pkg/volume/util" ) +// ConflictReason is used for the special strings which explain why +// volume binding is impossible for a node. +type ConflictReason string + +// ConflictReasons contains all reasons that explain why volume binding is impossible for a node. +type ConflictReasons []ConflictReason + +func (reasons ConflictReasons) Len() int { return len(reasons) } +func (reasons ConflictReasons) Less(i, j int) bool { return reasons[i] < reasons[j] } +func (reasons ConflictReasons) Swap(i, j int) { reasons[i], reasons[j] = reasons[j], reasons[i] } + const ( // ErrReasonBindConflict is used for VolumeBindingNoMatch predicate error. - ErrReasonBindConflict = "node(s) didn't find available persistent volumes to bind" + ErrReasonBindConflict ConflictReason = "node(s) didn't find available persistent volumes to bind" // ErrReasonNodeConflict is used for VolumeNodeAffinityConflict predicate error. - ErrReasonNodeConflict = "node(s) had volume node affinity conflict" + ErrReasonNodeConflict ConflictReason = "node(s) had volume node affinity conflict" ) // InTreeToCSITranslator contains methods required to check migratable status @@ -94,7 +105,7 @@ type SchedulerVolumeBinder interface { // (currently) not usable for the pod. // // This function is called by the volume binding scheduler predicate and can be called in parallel - FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons []string, err error) + FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons ConflictReasons, err error) // AssumePodVolumes will: // 1. Take the PV matches for unbound PVCs and update the PV cache assuming @@ -173,7 +184,7 @@ func (b *volumeBinder) GetBindingsCache() PodBindingCache { // FindPodVolumes caches the matching PVs and PVCs to provision per node in podBindingCache. // This method intentionally takes in a *v1.Node object instead of using volumebinder.nodeInformer. // That's necessary because some operations will need to pass in to the predicate fake node objects. -func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons []string, err error) { +func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons ConflictReasons, err error) { podName := getPodName(pod) // Warning: Below log needs high verbosity as it can be printed several times (#60933). diff --git a/pkg/controller/volume/scheduling/scheduler_binder_fake.go b/pkg/controller/volume/scheduling/scheduler_binder_fake.go index fa64d53c8ad..45d66969401 100644 --- a/pkg/controller/volume/scheduling/scheduler_binder_fake.go +++ b/pkg/controller/volume/scheduling/scheduler_binder_fake.go @@ -21,7 +21,7 @@ import "k8s.io/api/core/v1" // FakeVolumeBinderConfig holds configurations for fake volume binder. type FakeVolumeBinderConfig struct { AllBound bool - FindReasons []string + FindReasons ConflictReasons FindErr error AssumeErr error BindErr error @@ -43,7 +43,7 @@ type FakeVolumeBinder struct { } // FindPodVolumes implements SchedulerVolumeBinder.FindPodVolumes. -func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons []string, err error) { +func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons ConflictReasons, err error) { return b.config.FindReasons, b.config.FindErr } diff --git a/pkg/controller/volume/scheduling/scheduler_binder_test.go b/pkg/controller/volume/scheduling/scheduler_binder_test.go index b502079027c..bbf6f033a72 100644 --- a/pkg/controller/volume/scheduling/scheduler_binder_test.go +++ b/pkg/controller/volume/scheduling/scheduler_binder_test.go @@ -739,7 +739,7 @@ func addProvisionAnn(pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim { // reasonNames pretty-prints a list of reasons with variable names in // case of a test failure because that is easier to read than the full // strings. -func reasonNames(reasons []string) string { +func reasonNames(reasons ConflictReasons) string { var varNames []string for _, reason := range reasons { switch reason { @@ -748,16 +748,16 @@ func reasonNames(reasons []string) string { case ErrReasonNodeConflict: varNames = append(varNames, "ErrReasonNodeConflict") default: - varNames = append(varNames, reason) + varNames = append(varNames, string(reason)) } } return fmt.Sprintf("%v", varNames) } -func checkReasons(t *testing.T, actual, expected []string) { +func checkReasons(t *testing.T, actual, expected ConflictReasons) { equal := len(actual) == len(expected) - sort.Strings(actual) - sort.Strings(expected) + sort.Sort(actual) + sort.Sort(expected) if equal { for i, reason := range actual { if reason != expected[i] { @@ -785,7 +785,7 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) { expectedBindings []*bindingInfo // Expected return values - reasons []string + reasons ConflictReasons shouldFail bool } scenarios := map[string]scenarioType{ @@ -821,7 +821,7 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) { "unbound-pvc,pv-different-node": { podPVCs: []*v1.PersistentVolumeClaim{unboundPVC}, pvs: []*v1.PersistentVolume{pvNode2}, - reasons: []string{ErrReasonBindConflict}, + reasons: ConflictReasons{ErrReasonBindConflict}, }, "two-unbound-pvcs": { podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2}, @@ -837,7 +837,7 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) { podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2}, pvs: []*v1.PersistentVolume{pvNode1a}, expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a)}, - reasons: []string{ErrReasonBindConflict}, + reasons: ConflictReasons{ErrReasonBindConflict}, }, "one-bound,one-unbound": { podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, boundPVC}, @@ -847,7 +847,7 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) { "one-bound,one-unbound,no-match": { podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, boundPVC}, pvs: []*v1.PersistentVolume{pvBound, pvNode2}, - reasons: []string{ErrReasonBindConflict}, + reasons: ConflictReasons{ErrReasonBindConflict}, }, "one-prebound,one-unbound": { podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, preboundPVC}, @@ -861,7 +861,7 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) { "immediate-bound-pvc-wrong-node": { podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC}, pvs: []*v1.PersistentVolume{pvBoundImmediateNode2}, - reasons: []string{ErrReasonNodeConflict}, + reasons: ConflictReasons{ErrReasonNodeConflict}, }, "immediate-unbound-pvc": { podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC}, @@ -940,7 +940,7 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) { expectedProvisions []*v1.PersistentVolumeClaim // Expected return values - reasons []string + reasons ConflictReasons shouldFail bool } scenarios := map[string]scenarioType{ @@ -975,11 +975,11 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) { }, "invalid-provisioner": { podPVCs: []*v1.PersistentVolumeClaim{noProvisionerPVC}, - reasons: []string{ErrReasonBindConflict}, + reasons: ConflictReasons{ErrReasonBindConflict}, }, "volume-topology-unsatisfied": { podPVCs: []*v1.PersistentVolumeClaim{topoMismatchPVC}, - reasons: []string{ErrReasonBindConflict}, + reasons: ConflictReasons{ErrReasonBindConflict}, }, } @@ -1047,7 +1047,7 @@ func TestFindPodVolumesWithCSIMigration(t *testing.T) { initCSINodes []*storagev1.CSINode // Expected return values - reasons []string + reasons ConflictReasons shouldFail bool } scenarios := map[string]scenarioType{ @@ -1073,7 +1073,7 @@ func TestFindPodVolumesWithCSIMigration(t *testing.T) { pvs: []*v1.PersistentVolume{migrationPVBound}, initNodes: []*v1.Node{node1Zone2}, initCSINodes: []*storagev1.CSINode{csiNode1Migrated}, - reasons: []string{ErrReasonNodeConflict}, + reasons: ConflictReasons{ErrReasonNodeConflict}, }, } diff --git a/pkg/scheduler/core/generic_scheduler_test.go b/pkg/scheduler/core/generic_scheduler_test.go index b418c312bca..c8bc97051c2 100644 --- a/pkg/scheduler/core/generic_scheduler_test.go +++ b/pkg/scheduler/core/generic_scheduler_test.go @@ -1999,8 +1999,8 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) { name: "ErrVolume... errors should not be tried as it indicates that the pod is unschedulable due to no matching volumes for pod on node", nodesStatuses: framework.NodeToStatusMap{ "machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumezone.ErrReasonConflict), - "machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumescheduling.ErrReasonNodeConflict), - "machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumescheduling.ErrReasonBindConflict), + "machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumescheduling.ErrReasonNodeConflict)), + "machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumescheduling.ErrReasonBindConflict)), }, expected: map[string]bool{"machine4": true}, }, diff --git a/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go b/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go index 2beb0287b00..1f8e254b94b 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go +++ b/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go @@ -81,7 +81,7 @@ func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, p if len(reasons) > 0 { status := framework.NewStatus(framework.UnschedulableAndUnresolvable) for _, reason := range reasons { - status.AppendReason(reason) + status.AppendReason(string(reason)) } return status } diff --git a/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go b/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go index 5128e4eb564..c4f1631c794 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go +++ b/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go @@ -67,18 +67,18 @@ func TestVolumeBinding(t *testing.T) { pod: &v1.Pod{Spec: volState}, node: &v1.Node{}, volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ - FindReasons: []string{volumescheduling.ErrReasonBindConflict}, + FindReasons: []volumescheduling.ConflictReason{volumescheduling.ErrReasonBindConflict}, }, - wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, volumescheduling.ErrReasonBindConflict), + wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumescheduling.ErrReasonBindConflict)), }, { name: "bound and unbound unsatisfied", pod: &v1.Pod{Spec: volState}, node: &v1.Node{}, volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ - FindReasons: []string{volumescheduling.ErrReasonBindConflict, volumescheduling.ErrReasonNodeConflict}, + FindReasons: []volumescheduling.ConflictReason{volumescheduling.ErrReasonBindConflict, volumescheduling.ErrReasonNodeConflict}, }, - wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, volumescheduling.ErrReasonBindConflict, volumescheduling.ErrReasonNodeConflict), + wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumescheduling.ErrReasonBindConflict), string(volumescheduling.ErrReasonNodeConflict)), }, { name: "unbound/found matches/bind succeeds", diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index ec39968faae..b21edc8c1ed 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -911,7 +911,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) { name: "bound/invalid pv affinity", volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ AllBound: true, - FindReasons: []string{volumescheduling.ErrReasonNodeConflict}, + FindReasons: volumescheduling.ConflictReasons{volumescheduling.ErrReasonNodeConflict}, }, eventReason: "FailedScheduling", expectError: makePredicateError("1 node(s) had volume node affinity conflict"), @@ -919,7 +919,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) { { name: "unbound/no matches", volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ - FindReasons: []string{volumescheduling.ErrReasonBindConflict}, + FindReasons: volumescheduling.ConflictReasons{volumescheduling.ErrReasonBindConflict}, }, eventReason: "FailedScheduling", expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind"), @@ -927,7 +927,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) { { name: "bound and unbound unsatisfied", volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ - FindReasons: []string{volumescheduling.ErrReasonBindConflict, volumescheduling.ErrReasonNodeConflict}, + FindReasons: volumescheduling.ConflictReasons{volumescheduling.ErrReasonBindConflict, volumescheduling.ErrReasonNodeConflict}, }, eventReason: "FailedScheduling", expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind, 1 node(s) had volume node affinity conflict"),