diff --git a/pkg/controller/volume/scheduling/scheduler_binder.go b/pkg/controller/volume/scheduling/scheduler_binder.go index 564e26eef37..0fb249813e0 100644 --- a/pkg/controller/volume/scheduling/scheduler_binder.go +++ b/pkg/controller/volume/scheduling/scheduler_binder.go @@ -45,6 +45,13 @@ import ( volumeutil "k8s.io/kubernetes/pkg/volume/util" ) +const ( + // ErrReasonBindConflict is used for VolumeBindingNoMatch predicate error. + ErrReasonBindConflict = "node(s) didn't find available persistent volumes to bind" + // ErrReasonNodeConflict is used for VolumeNodeAffinityConflict predicate error. + ErrReasonNodeConflict = "node(s) had volume node affinity conflict" +) + // InTreeToCSITranslator contains methods required to check migratable status // and perform translations from InTree PV's to CSI type InTreeToCSITranslator interface { @@ -83,11 +90,11 @@ type SchedulerVolumeBinder interface { // If a PVC is bound, it checks if the PV's NodeAffinity matches the Node. // Otherwise, it tries to find an available PV to bind to the PVC. // - // It returns true if all of the Pod's PVCs have matching PVs or can be dynamic provisioned, - // and returns true if bound volumes satisfy the PV NodeAffinity. + // It returns an error when something went wrong or a list of reasons why the node is + // (currently) not usable for the pod. // // This function is called by the volume binding scheduler predicate and can be called in parallel - FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisified, boundVolumesSatisfied bool, err error) + FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons []string, err error) // AssumePodVolumes will: // 1. Take the PV matches for unbound PVCs and update the PV cache assuming @@ -166,15 +173,29 @@ func (b *volumeBinder) GetBindingsCache() PodBindingCache { // FindPodVolumes caches the matching PVs and PVCs to provision per node in podBindingCache. // This method intentionally takes in a *v1.Node object instead of using volumebinder.nodeInformer. // That's necessary because some operations will need to pass in to the predicate fake node objects. -func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisfied, boundVolumesSatisfied bool, err error) { +func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons []string, err error) { podName := getPodName(pod) // Warning: Below log needs high verbosity as it can be printed several times (#60933). klog.V(5).Infof("FindPodVolumes for pod %q, node %q", podName, node.Name) - // Initialize to true for pods that don't have volumes - unboundVolumesSatisfied = true - boundVolumesSatisfied = true + // Initialize to true for pods that don't have volumes. These + // booleans get translated into reason strings when the function + // returns without an error. + unboundVolumesSatisfied := true + boundVolumesSatisfied := true + defer func() { + if err != nil { + return + } + if !boundVolumesSatisfied { + reasons = append(reasons, ErrReasonNodeConflict) + } + if !unboundVolumesSatisfied { + reasons = append(reasons, ErrReasonBindConflict) + } + }() + start := time.Now() defer func() { metrics.VolumeSchedulingStageLatency.WithLabelValues("predicate").Observe(time.Since(start).Seconds()) @@ -210,19 +231,19 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume // volumes can get bound/provisioned in between calls. boundClaims, claimsToBind, unboundClaimsImmediate, err := b.getPodVolumes(pod) if err != nil { - return false, false, err + return nil, err } // Immediate claims should be bound if len(unboundClaimsImmediate) > 0 { - return false, false, fmt.Errorf("pod has unbound immediate PersistentVolumeClaims") + return nil, fmt.Errorf("pod has unbound immediate PersistentVolumeClaims") } // Check PV node affinity on bound volumes if len(boundClaims) > 0 { boundVolumesSatisfied, err = b.checkBoundClaims(boundClaims, node, podName) if err != nil { - return false, false, err + return nil, err } } @@ -237,8 +258,9 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume for _, claim := range claimsToBind { if selectedNode, ok := claim.Annotations[pvutil.AnnSelectedNode]; ok { if selectedNode != node.Name { - // Fast path, skip unmatched node - return false, boundVolumesSatisfied, nil + // Fast path, skip unmatched node. + unboundVolumesSatisfied = false + return } claimsToProvision = append(claimsToProvision, claim) } else { @@ -251,7 +273,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume var unboundClaims []*v1.PersistentVolumeClaim unboundVolumesSatisfied, matchedBindings, unboundClaims, err = b.findMatchingVolumes(pod, claimsToFindMatching, node) if err != nil { - return false, false, err + return nil, err } claimsToProvision = append(claimsToProvision, unboundClaims...) } @@ -260,12 +282,12 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume if len(claimsToProvision) > 0 { unboundVolumesSatisfied, provisionedClaims, err = b.checkVolumeProvisions(pod, claimsToProvision, node) if err != nil { - return false, false, err + return nil, err } } } - return unboundVolumesSatisfied, boundVolumesSatisfied, nil + return } // AssumePodVolumes will take the cached matching PVs and PVCs to provision diff --git a/pkg/controller/volume/scheduling/scheduler_binder_fake.go b/pkg/controller/volume/scheduling/scheduler_binder_fake.go index bb38c0d6ba2..fa64d53c8ad 100644 --- a/pkg/controller/volume/scheduling/scheduler_binder_fake.go +++ b/pkg/controller/volume/scheduling/scheduler_binder_fake.go @@ -20,12 +20,11 @@ import "k8s.io/api/core/v1" // FakeVolumeBinderConfig holds configurations for fake volume binder. type FakeVolumeBinderConfig struct { - AllBound bool - FindUnboundSatsified bool - FindBoundSatsified bool - FindErr error - AssumeErr error - BindErr error + AllBound bool + FindReasons []string + FindErr error + AssumeErr error + BindErr error } // NewFakeVolumeBinder sets up all the caches needed for the scheduler to make @@ -44,8 +43,8 @@ type FakeVolumeBinder struct { } // FindPodVolumes implements SchedulerVolumeBinder.FindPodVolumes. -func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisfied, boundVolumesSatsified bool, err error) { - return b.config.FindUnboundSatsified, b.config.FindBoundSatsified, b.config.FindErr +func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons []string, err error) { + return b.config.FindReasons, b.config.FindErr } // AssumePodVolumes implements SchedulerVolumeBinder.AssumePodVolumes. diff --git a/pkg/controller/volume/scheduling/scheduler_binder_test.go b/pkg/controller/volume/scheduling/scheduler_binder_test.go index 777fae8e333..b502079027c 100644 --- a/pkg/controller/volume/scheduling/scheduler_binder_test.go +++ b/pkg/controller/volume/scheduling/scheduler_binder_test.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "reflect" + "sort" "testing" "time" @@ -735,6 +736,41 @@ func addProvisionAnn(pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim { return res } +// reasonNames pretty-prints a list of reasons with variable names in +// case of a test failure because that is easier to read than the full +// strings. +func reasonNames(reasons []string) string { + var varNames []string + for _, reason := range reasons { + switch reason { + case ErrReasonBindConflict: + varNames = append(varNames, "ErrReasonBindConflict") + case ErrReasonNodeConflict: + varNames = append(varNames, "ErrReasonNodeConflict") + default: + varNames = append(varNames, reason) + } + } + return fmt.Sprintf("%v", varNames) +} + +func checkReasons(t *testing.T, actual, expected []string) { + equal := len(actual) == len(expected) + sort.Strings(actual) + sort.Strings(expected) + if equal { + for i, reason := range actual { + if reason != expected[i] { + equal = false + break + } + } + } + if !equal { + t.Errorf("expected failure reasons %s, got %s", reasonNames(expected), reasonNames(actual)) + } +} + func TestFindPodVolumesWithoutProvisioning(t *testing.T) { type scenarioType struct { // Inputs @@ -749,39 +785,28 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) { expectedBindings []*bindingInfo // Expected return values - expectedUnbound bool - expectedBound bool - shouldFail bool + reasons []string + shouldFail bool } scenarios := map[string]scenarioType{ "no-volumes": { - pod: makePod(nil), - expectedUnbound: true, - expectedBound: true, + pod: makePod(nil), }, "no-pvcs": { - pod: makePodWithoutPVC(), - expectedUnbound: true, - expectedBound: true, + pod: makePodWithoutPVC(), }, "pvc-not-found": { - cachePVCs: []*v1.PersistentVolumeClaim{}, - podPVCs: []*v1.PersistentVolumeClaim{boundPVC}, - expectedUnbound: false, - expectedBound: false, - shouldFail: true, + cachePVCs: []*v1.PersistentVolumeClaim{}, + podPVCs: []*v1.PersistentVolumeClaim{boundPVC}, + shouldFail: true, }, "bound-pvc": { - podPVCs: []*v1.PersistentVolumeClaim{boundPVC}, - pvs: []*v1.PersistentVolume{pvBound}, - expectedUnbound: true, - expectedBound: true, + podPVCs: []*v1.PersistentVolumeClaim{boundPVC}, + pvs: []*v1.PersistentVolume{pvBound}, }, "bound-pvc,pv-not-exists": { - podPVCs: []*v1.PersistentVolumeClaim{boundPVC}, - expectedUnbound: false, - expectedBound: false, - shouldFail: true, + podPVCs: []*v1.PersistentVolumeClaim{boundPVC}, + shouldFail: true, }, "prebound-pvc": { podPVCs: []*v1.PersistentVolumeClaim{preboundPVC}, @@ -792,48 +817,37 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) { podPVCs: []*v1.PersistentVolumeClaim{unboundPVC}, pvs: []*v1.PersistentVolume{pvNode2, pvNode1a, pvNode1b}, expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a)}, - expectedUnbound: true, - expectedBound: true, }, "unbound-pvc,pv-different-node": { - podPVCs: []*v1.PersistentVolumeClaim{unboundPVC}, - pvs: []*v1.PersistentVolume{pvNode2}, - expectedUnbound: false, - expectedBound: true, + podPVCs: []*v1.PersistentVolumeClaim{unboundPVC}, + pvs: []*v1.PersistentVolume{pvNode2}, + reasons: []string{ErrReasonBindConflict}, }, "two-unbound-pvcs": { podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2}, pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b}, expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a), makeBinding(unboundPVC2, pvNode1b)}, - expectedUnbound: true, - expectedBound: true, }, "two-unbound-pvcs,order-by-size": { podPVCs: []*v1.PersistentVolumeClaim{unboundPVC2, unboundPVC}, pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b}, expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a), makeBinding(unboundPVC2, pvNode1b)}, - expectedUnbound: true, - expectedBound: true, }, "two-unbound-pvcs,partial-match": { podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2}, pvs: []*v1.PersistentVolume{pvNode1a}, expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a)}, - expectedUnbound: false, - expectedBound: true, + reasons: []string{ErrReasonBindConflict}, }, "one-bound,one-unbound": { podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, boundPVC}, pvs: []*v1.PersistentVolume{pvBound, pvNode1a}, expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a)}, - expectedUnbound: true, - expectedBound: true, }, "one-bound,one-unbound,no-match": { - podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, boundPVC}, - pvs: []*v1.PersistentVolume{pvBound, pvNode2}, - expectedUnbound: false, - expectedBound: true, + podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, boundPVC}, + pvs: []*v1.PersistentVolume{pvBound, pvNode2}, + reasons: []string{ErrReasonBindConflict}, }, "one-prebound,one-unbound": { podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, preboundPVC}, @@ -841,35 +855,26 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) { shouldFail: true, }, "immediate-bound-pvc": { - podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC}, - pvs: []*v1.PersistentVolume{pvBoundImmediate}, - expectedUnbound: true, - expectedBound: true, + podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC}, + pvs: []*v1.PersistentVolume{pvBoundImmediate}, }, "immediate-bound-pvc-wrong-node": { - podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC}, - pvs: []*v1.PersistentVolume{pvBoundImmediateNode2}, - expectedUnbound: true, - expectedBound: false, + podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC}, + pvs: []*v1.PersistentVolume{pvBoundImmediateNode2}, + reasons: []string{ErrReasonNodeConflict}, }, "immediate-unbound-pvc": { - podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC}, - expectedUnbound: false, - expectedBound: false, - shouldFail: true, + podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC}, + shouldFail: true, }, "immediate-unbound-pvc,delayed-mode-bound": { - podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC, boundPVC}, - pvs: []*v1.PersistentVolume{pvBound}, - expectedUnbound: false, - expectedBound: false, - shouldFail: true, + podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC, boundPVC}, + pvs: []*v1.PersistentVolume{pvBound}, + shouldFail: true, }, "immediate-unbound-pvc,delayed-mode-unbound": { - podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC, unboundPVC}, - expectedUnbound: false, - expectedBound: false, - shouldFail: true, + podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC, unboundPVC}, + shouldFail: true, }, } @@ -902,7 +907,7 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) { } // Execute - unboundSatisfied, boundSatisfied, err := testEnv.binder.FindPodVolumes(scenario.pod, testNode) + reasons, err := testEnv.binder.FindPodVolumes(scenario.pod, testNode) // Validate if !scenario.shouldFail && err != nil { @@ -911,12 +916,7 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) { if scenario.shouldFail && err == nil { t.Error("returned success but expected error") } - if boundSatisfied != scenario.expectedBound { - t.Errorf("expected boundSatisfied %v, got %v", scenario.expectedBound, boundSatisfied) - } - if unboundSatisfied != scenario.expectedUnbound { - t.Errorf("expected unboundSatisfied %v, got %v", scenario.expectedUnbound, unboundSatisfied) - } + checkReasons(t, reasons, scenario.reasons) testEnv.validatePodCache(t, testNode.Name, scenario.pod, scenario.expectedBindings, nil) } @@ -940,61 +940,46 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) { expectedProvisions []*v1.PersistentVolumeClaim // Expected return values - expectedUnbound bool - expectedBound bool - shouldFail bool + reasons []string + shouldFail bool } scenarios := map[string]scenarioType{ "one-provisioned": { podPVCs: []*v1.PersistentVolumeClaim{provisionedPVC}, expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC}, - expectedUnbound: true, - expectedBound: true, }, "two-unbound-pvcs,one-matched,one-provisioned": { podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC}, pvs: []*v1.PersistentVolume{pvNode1a}, expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a)}, expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC}, - expectedUnbound: true, - expectedBound: true, }, "one-bound,one-provisioned": { podPVCs: []*v1.PersistentVolumeClaim{boundPVC, provisionedPVC}, pvs: []*v1.PersistentVolume{pvBound}, expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC}, - expectedUnbound: true, - expectedBound: true, }, "one-binding,one-selected-node": { podPVCs: []*v1.PersistentVolumeClaim{boundPVC, selectedNodePVC}, pvs: []*v1.PersistentVolume{pvBound}, expectedProvisions: []*v1.PersistentVolumeClaim{selectedNodePVC}, - expectedUnbound: true, - expectedBound: true, }, "immediate-unbound-pvc": { - podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC}, - expectedUnbound: false, - expectedBound: false, - shouldFail: true, + podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC}, + shouldFail: true, }, "one-immediate-bound,one-provisioned": { podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC, provisionedPVC}, pvs: []*v1.PersistentVolume{pvBoundImmediate}, expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC}, - expectedUnbound: true, - expectedBound: true, }, "invalid-provisioner": { - podPVCs: []*v1.PersistentVolumeClaim{noProvisionerPVC}, - expectedUnbound: false, - expectedBound: true, + podPVCs: []*v1.PersistentVolumeClaim{noProvisionerPVC}, + reasons: []string{ErrReasonBindConflict}, }, "volume-topology-unsatisfied": { - podPVCs: []*v1.PersistentVolumeClaim{topoMismatchPVC}, - expectedUnbound: false, - expectedBound: true, + podPVCs: []*v1.PersistentVolumeClaim{topoMismatchPVC}, + reasons: []string{ErrReasonBindConflict}, }, } @@ -1027,7 +1012,7 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) { } // Execute - unboundSatisfied, boundSatisfied, err := testEnv.binder.FindPodVolumes(scenario.pod, testNode) + reasons, err := testEnv.binder.FindPodVolumes(scenario.pod, testNode) // Validate if !scenario.shouldFail && err != nil { @@ -1036,12 +1021,7 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) { if scenario.shouldFail && err == nil { t.Error("returned success but expected error") } - if boundSatisfied != scenario.expectedBound { - t.Errorf("expected boundSatisfied %v, got %v", scenario.expectedBound, boundSatisfied) - } - if unboundSatisfied != scenario.expectedUnbound { - t.Errorf("expected unboundSatisfied %v, got %v", scenario.expectedUnbound, unboundSatisfied) - } + checkReasons(t, reasons, scenario.reasons) testEnv.validatePodCache(t, testNode.Name, scenario.pod, scenario.expectedBindings, scenario.expectedProvisions) } @@ -1067,41 +1047,33 @@ func TestFindPodVolumesWithCSIMigration(t *testing.T) { initCSINodes []*storagev1.CSINode // Expected return values - expectedUnbound bool - expectedBound bool - shouldFail bool + reasons []string + shouldFail bool } scenarios := map[string]scenarioType{ "pvc-bound": { - podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC}, - pvs: []*v1.PersistentVolume{migrationPVBound}, - initNodes: []*v1.Node{node1Zone1}, - initCSINodes: []*storagev1.CSINode{csiNode1Migrated}, - expectedBound: true, - expectedUnbound: true, + podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC}, + pvs: []*v1.PersistentVolume{migrationPVBound}, + initNodes: []*v1.Node{node1Zone1}, + initCSINodes: []*storagev1.CSINode{csiNode1Migrated}, }, "pvc-bound,csinode-not-migrated": { - podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC}, - pvs: []*v1.PersistentVolume{migrationPVBound}, - initNodes: []*v1.Node{node1Zone1}, - initCSINodes: []*storagev1.CSINode{csiNode1NotMigrated}, - expectedBound: true, - expectedUnbound: true, + podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC}, + pvs: []*v1.PersistentVolume{migrationPVBound}, + initNodes: []*v1.Node{node1Zone1}, + initCSINodes: []*storagev1.CSINode{csiNode1NotMigrated}, }, "pvc-bound,missing-csinode": { - podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC}, - pvs: []*v1.PersistentVolume{migrationPVBound}, - initNodes: []*v1.Node{node1Zone1}, - expectedBound: true, - expectedUnbound: true, + podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC}, + pvs: []*v1.PersistentVolume{migrationPVBound}, + initNodes: []*v1.Node{node1Zone1}, }, "pvc-bound,node-different-zone": { - podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC}, - pvs: []*v1.PersistentVolume{migrationPVBound}, - initNodes: []*v1.Node{node1Zone2}, - initCSINodes: []*storagev1.CSINode{csiNode1Migrated}, - expectedBound: false, - expectedUnbound: true, + podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC}, + pvs: []*v1.PersistentVolume{migrationPVBound}, + initNodes: []*v1.Node{node1Zone2}, + initCSINodes: []*storagev1.CSINode{csiNode1Migrated}, + reasons: []string{ErrReasonNodeConflict}, }, } @@ -1140,7 +1112,7 @@ func TestFindPodVolumesWithCSIMigration(t *testing.T) { } // Execute - unboundSatisfied, boundSatisfied, err := testEnv.binder.FindPodVolumes(scenario.pod, node) + reasons, err := testEnv.binder.FindPodVolumes(scenario.pod, node) // Validate if !scenario.shouldFail && err != nil { @@ -1149,12 +1121,7 @@ func TestFindPodVolumesWithCSIMigration(t *testing.T) { if scenario.shouldFail && err == nil { t.Error("returned success but expected error") } - if boundSatisfied != scenario.expectedBound { - t.Errorf("expected boundSatisfied %v, got %v", scenario.expectedBound, boundSatisfied) - } - if unboundSatisfied != scenario.expectedUnbound { - t.Errorf("expected unboundSatisfied %v, got %v", scenario.expectedUnbound, unboundSatisfied) - } + checkReasons(t, reasons, scenario.reasons) } for name, scenario := range scenarios { @@ -1966,12 +1933,12 @@ func TestFindAssumeVolumes(t *testing.T) { // Execute // 1. Find matching PVs - unboundSatisfied, _, err := testEnv.binder.FindPodVolumes(pod, testNode) + reasons, err := testEnv.binder.FindPodVolumes(pod, testNode) if err != nil { t.Errorf("Test failed: FindPodVolumes returned error: %v", err) } - if !unboundSatisfied { - t.Errorf("Test failed: couldn't find PVs for all PVCs") + if len(reasons) > 0 { + t.Errorf("Test failed: couldn't find PVs for all PVCs: %v", reasons) } expectedBindings := testEnv.getPodBindings(t, testNode.Name, pod) @@ -1992,12 +1959,12 @@ func TestFindAssumeVolumes(t *testing.T) { // This should always return the original chosen pv // Run this many times in case sorting returns different orders for the two PVs. for i := 0; i < 50; i++ { - unboundSatisfied, _, err := testEnv.binder.FindPodVolumes(pod, testNode) + reasons, err := testEnv.binder.FindPodVolumes(pod, testNode) if err != nil { t.Errorf("Test failed: FindPodVolumes returned error: %v", err) } - if !unboundSatisfied { - t.Errorf("Test failed: couldn't find PVs for all PVCs") + if len(reasons) > 0 { + t.Errorf("Test failed: couldn't find PVs for all PVCs: %v", reasons) } testEnv.validatePodCache(t, testNode.Name, pod, expectedBindings, nil) } diff --git a/pkg/scheduler/core/generic_scheduler_test.go b/pkg/scheduler/core/generic_scheduler_test.go index a91766efd87..b418c312bca 100644 --- a/pkg/scheduler/core/generic_scheduler_test.go +++ b/pkg/scheduler/core/generic_scheduler_test.go @@ -38,6 +38,7 @@ import ( "k8s.io/client-go/informers" clientsetfake "k8s.io/client-go/kubernetes/fake" extenderv1 "k8s.io/kube-scheduler/extender/v1" + volumescheduling "k8s.io/kubernetes/pkg/controller/volume/scheduling" schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread" @@ -50,7 +51,6 @@ import ( "k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" @@ -1999,8 +1999,8 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) { name: "ErrVolume... errors should not be tried as it indicates that the pod is unschedulable due to no matching volumes for pod on node", nodesStatuses: framework.NodeToStatusMap{ "machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumezone.ErrReasonConflict), - "machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumebinding.ErrReasonNodeConflict), - "machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumebinding.ErrReasonBindConflict), + "machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumescheduling.ErrReasonNodeConflict), + "machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumescheduling.ErrReasonBindConflict), }, expected: map[string]bool{"machine4": true}, }, diff --git a/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go b/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go index 772dad6f220..2beb0287b00 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go +++ b/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go @@ -36,13 +36,6 @@ var _ framework.FilterPlugin = &VolumeBinding{} // Name is the name of the plugin used in Registry and configurations. const Name = "VolumeBinding" -const ( - // ErrReasonBindConflict is used for VolumeBindingNoMatch predicate error. - ErrReasonBindConflict = "node(s) didn't find available persistent volumes to bind" - // ErrReasonNodeConflict is used for VolumeNodeAffinityConflict predicate error. - ErrReasonNodeConflict = "node(s) had volume node affinity conflict" -) - // Name returns name of the plugin. It is used in logs, etc. func (pl *VolumeBinding) Name() string { return Name @@ -79,19 +72,16 @@ func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, p return nil } - unboundSatisfied, boundSatisfied, err := pl.binder.Binder.FindPodVolumes(pod, node) + reasons, err := pl.binder.Binder.FindPodVolumes(pod, node) if err != nil { return framework.NewStatus(framework.Error, err.Error()) } - if !boundSatisfied || !unboundSatisfied { + if len(reasons) > 0 { status := framework.NewStatus(framework.UnschedulableAndUnresolvable) - if !boundSatisfied { - status.AppendReason(ErrReasonNodeConflict) - } - if !unboundSatisfied { - status.AppendReason(ErrReasonBindConflict) + for _, reason := range reasons { + status.AppendReason(reason) } return status } diff --git a/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go b/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go index 390e0c58392..5128e4eb564 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go +++ b/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go @@ -58,9 +58,7 @@ func TestVolumeBinding(t *testing.T) { pod: &v1.Pod{Spec: volState}, node: &v1.Node{}, volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ - AllBound: true, - FindUnboundSatsified: true, - FindBoundSatsified: true, + AllBound: true, }, wantStatus: nil, }, @@ -69,31 +67,25 @@ func TestVolumeBinding(t *testing.T) { pod: &v1.Pod{Spec: volState}, node: &v1.Node{}, volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ - FindUnboundSatsified: false, - FindBoundSatsified: true, + FindReasons: []string{volumescheduling.ErrReasonBindConflict}, }, - wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonBindConflict), + wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, volumescheduling.ErrReasonBindConflict), }, { name: "bound and unbound unsatisfied", pod: &v1.Pod{Spec: volState}, node: &v1.Node{}, volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ - FindUnboundSatsified: false, - FindBoundSatsified: false, + FindReasons: []string{volumescheduling.ErrReasonBindConflict, volumescheduling.ErrReasonNodeConflict}, }, - wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonNodeConflict, - ErrReasonBindConflict), + wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, volumescheduling.ErrReasonBindConflict, volumescheduling.ErrReasonNodeConflict), }, { - name: "unbound/found matches/bind succeeds", - pod: &v1.Pod{Spec: volState}, - node: &v1.Node{}, - volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ - FindUnboundSatsified: true, - FindBoundSatsified: true, - }, - wantStatus: nil, + name: "unbound/found matches/bind succeeds", + pod: &v1.Pod{Spec: volState}, + node: &v1.Node{}, + volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{}, + wantStatus: nil, }, { name: "predicate error", diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index 761c35dc3db..ec39968faae 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -901,9 +901,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) { { name: "all bound", volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ - AllBound: true, - FindUnboundSatsified: true, - FindBoundSatsified: true, + AllBound: true, }, expectAssumeCalled: true, expectPodBind: &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "machine1"}}, @@ -912,9 +910,8 @@ func TestSchedulerWithVolumeBinding(t *testing.T) { { name: "bound/invalid pv affinity", volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ - AllBound: true, - FindUnboundSatsified: true, - FindBoundSatsified: false, + AllBound: true, + FindReasons: []string{volumescheduling.ErrReasonNodeConflict}, }, eventReason: "FailedScheduling", expectError: makePredicateError("1 node(s) had volume node affinity conflict"), @@ -922,8 +919,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) { { name: "unbound/no matches", volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ - FindUnboundSatsified: false, - FindBoundSatsified: true, + FindReasons: []string{volumescheduling.ErrReasonBindConflict}, }, eventReason: "FailedScheduling", expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind"), @@ -931,18 +927,14 @@ func TestSchedulerWithVolumeBinding(t *testing.T) { { name: "bound and unbound unsatisfied", volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ - FindUnboundSatsified: false, - FindBoundSatsified: false, + FindReasons: []string{volumescheduling.ErrReasonBindConflict, volumescheduling.ErrReasonNodeConflict}, }, eventReason: "FailedScheduling", expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind, 1 node(s) had volume node affinity conflict"), }, { - name: "unbound/found matches/bind succeeds", - volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ - FindUnboundSatsified: true, - FindBoundSatsified: true, - }, + name: "unbound/found matches/bind succeeds", + volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{}, expectAssumeCalled: true, expectBindCalled: true, expectPodBind: &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "machine1"}}, @@ -959,9 +951,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) { { name: "assume error", volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ - FindUnboundSatsified: true, - FindBoundSatsified: true, - AssumeErr: assumeErr, + AssumeErr: assumeErr, }, expectAssumeCalled: true, eventReason: "FailedScheduling", @@ -970,9 +960,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) { { name: "bind error", volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ - FindUnboundSatsified: true, - FindBoundSatsified: true, - BindErr: bindErr, + BindErr: bindErr, }, expectAssumeCalled: true, expectBindCalled: true,