volume scheduler: move reason strings into volume code

The scheduler doesn't really need to know in detail which reasons
rendered a node unusable for a node. All it needs from the volume
binder is a list of reasons that it then can present to the user.

This seems a bit cleaner. But the main reason for the change is that
it simplifies the checking of CSI inline volumes and perhaps later
capacity checking. Both will lead to new failure reasons, which then
can be added without changing the interface.
This commit is contained in:
Patrick Ohly 2020-02-14 13:40:29 +01:00
parent c73532c4f7
commit 6eb0b034ac
7 changed files with 174 additions and 216 deletions

View File

@ -45,6 +45,13 @@ import (
volumeutil "k8s.io/kubernetes/pkg/volume/util" volumeutil "k8s.io/kubernetes/pkg/volume/util"
) )
const (
// ErrReasonBindConflict is used for VolumeBindingNoMatch predicate error.
ErrReasonBindConflict = "node(s) didn't find available persistent volumes to bind"
// ErrReasonNodeConflict is used for VolumeNodeAffinityConflict predicate error.
ErrReasonNodeConflict = "node(s) had volume node affinity conflict"
)
// InTreeToCSITranslator contains methods required to check migratable status // InTreeToCSITranslator contains methods required to check migratable status
// and perform translations from InTree PV's to CSI // and perform translations from InTree PV's to CSI
type InTreeToCSITranslator interface { type InTreeToCSITranslator interface {
@ -83,11 +90,11 @@ type SchedulerVolumeBinder interface {
// If a PVC is bound, it checks if the PV's NodeAffinity matches the Node. // If a PVC is bound, it checks if the PV's NodeAffinity matches the Node.
// Otherwise, it tries to find an available PV to bind to the PVC. // Otherwise, it tries to find an available PV to bind to the PVC.
// //
// It returns true if all of the Pod's PVCs have matching PVs or can be dynamic provisioned, // It returns an error when something went wrong or a list of reasons why the node is
// and returns true if bound volumes satisfy the PV NodeAffinity. // (currently) not usable for the pod.
// //
// This function is called by the volume binding scheduler predicate and can be called in parallel // This function is called by the volume binding scheduler predicate and can be called in parallel
FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisified, boundVolumesSatisfied bool, err error) FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons []string, err error)
// AssumePodVolumes will: // AssumePodVolumes will:
// 1. Take the PV matches for unbound PVCs and update the PV cache assuming // 1. Take the PV matches for unbound PVCs and update the PV cache assuming
@ -166,15 +173,29 @@ func (b *volumeBinder) GetBindingsCache() PodBindingCache {
// FindPodVolumes caches the matching PVs and PVCs to provision per node in podBindingCache. // FindPodVolumes caches the matching PVs and PVCs to provision per node in podBindingCache.
// This method intentionally takes in a *v1.Node object instead of using volumebinder.nodeInformer. // This method intentionally takes in a *v1.Node object instead of using volumebinder.nodeInformer.
// That's necessary because some operations will need to pass in to the predicate fake node objects. // That's necessary because some operations will need to pass in to the predicate fake node objects.
func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisfied, boundVolumesSatisfied bool, err error) { func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons []string, err error) {
podName := getPodName(pod) podName := getPodName(pod)
// Warning: Below log needs high verbosity as it can be printed several times (#60933). // Warning: Below log needs high verbosity as it can be printed several times (#60933).
klog.V(5).Infof("FindPodVolumes for pod %q, node %q", podName, node.Name) klog.V(5).Infof("FindPodVolumes for pod %q, node %q", podName, node.Name)
// Initialize to true for pods that don't have volumes // Initialize to true for pods that don't have volumes. These
unboundVolumesSatisfied = true // booleans get translated into reason strings when the function
boundVolumesSatisfied = true // returns without an error.
unboundVolumesSatisfied := true
boundVolumesSatisfied := true
defer func() {
if err != nil {
return
}
if !boundVolumesSatisfied {
reasons = append(reasons, ErrReasonNodeConflict)
}
if !unboundVolumesSatisfied {
reasons = append(reasons, ErrReasonBindConflict)
}
}()
start := time.Now() start := time.Now()
defer func() { defer func() {
metrics.VolumeSchedulingStageLatency.WithLabelValues("predicate").Observe(time.Since(start).Seconds()) metrics.VolumeSchedulingStageLatency.WithLabelValues("predicate").Observe(time.Since(start).Seconds())
@ -210,19 +231,19 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
// volumes can get bound/provisioned in between calls. // volumes can get bound/provisioned in between calls.
boundClaims, claimsToBind, unboundClaimsImmediate, err := b.getPodVolumes(pod) boundClaims, claimsToBind, unboundClaimsImmediate, err := b.getPodVolumes(pod)
if err != nil { if err != nil {
return false, false, err return nil, err
} }
// Immediate claims should be bound // Immediate claims should be bound
if len(unboundClaimsImmediate) > 0 { if len(unboundClaimsImmediate) > 0 {
return false, false, fmt.Errorf("pod has unbound immediate PersistentVolumeClaims") return nil, fmt.Errorf("pod has unbound immediate PersistentVolumeClaims")
} }
// Check PV node affinity on bound volumes // Check PV node affinity on bound volumes
if len(boundClaims) > 0 { if len(boundClaims) > 0 {
boundVolumesSatisfied, err = b.checkBoundClaims(boundClaims, node, podName) boundVolumesSatisfied, err = b.checkBoundClaims(boundClaims, node, podName)
if err != nil { if err != nil {
return false, false, err return nil, err
} }
} }
@ -237,8 +258,9 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
for _, claim := range claimsToBind { for _, claim := range claimsToBind {
if selectedNode, ok := claim.Annotations[pvutil.AnnSelectedNode]; ok { if selectedNode, ok := claim.Annotations[pvutil.AnnSelectedNode]; ok {
if selectedNode != node.Name { if selectedNode != node.Name {
// Fast path, skip unmatched node // Fast path, skip unmatched node.
return false, boundVolumesSatisfied, nil unboundVolumesSatisfied = false
return
} }
claimsToProvision = append(claimsToProvision, claim) claimsToProvision = append(claimsToProvision, claim)
} else { } else {
@ -251,7 +273,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
var unboundClaims []*v1.PersistentVolumeClaim var unboundClaims []*v1.PersistentVolumeClaim
unboundVolumesSatisfied, matchedBindings, unboundClaims, err = b.findMatchingVolumes(pod, claimsToFindMatching, node) unboundVolumesSatisfied, matchedBindings, unboundClaims, err = b.findMatchingVolumes(pod, claimsToFindMatching, node)
if err != nil { if err != nil {
return false, false, err return nil, err
} }
claimsToProvision = append(claimsToProvision, unboundClaims...) claimsToProvision = append(claimsToProvision, unboundClaims...)
} }
@ -260,12 +282,12 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
if len(claimsToProvision) > 0 { if len(claimsToProvision) > 0 {
unboundVolumesSatisfied, provisionedClaims, err = b.checkVolumeProvisions(pod, claimsToProvision, node) unboundVolumesSatisfied, provisionedClaims, err = b.checkVolumeProvisions(pod, claimsToProvision, node)
if err != nil { if err != nil {
return false, false, err return nil, err
} }
} }
} }
return unboundVolumesSatisfied, boundVolumesSatisfied, nil return
} }
// AssumePodVolumes will take the cached matching PVs and PVCs to provision // AssumePodVolumes will take the cached matching PVs and PVCs to provision

View File

@ -20,12 +20,11 @@ import "k8s.io/api/core/v1"
// FakeVolumeBinderConfig holds configurations for fake volume binder. // FakeVolumeBinderConfig holds configurations for fake volume binder.
type FakeVolumeBinderConfig struct { type FakeVolumeBinderConfig struct {
AllBound bool AllBound bool
FindUnboundSatsified bool FindReasons []string
FindBoundSatsified bool FindErr error
FindErr error AssumeErr error
AssumeErr error BindErr error
BindErr error
} }
// NewFakeVolumeBinder sets up all the caches needed for the scheduler to make // NewFakeVolumeBinder sets up all the caches needed for the scheduler to make
@ -44,8 +43,8 @@ type FakeVolumeBinder struct {
} }
// FindPodVolumes implements SchedulerVolumeBinder.FindPodVolumes. // FindPodVolumes implements SchedulerVolumeBinder.FindPodVolumes.
func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisfied, boundVolumesSatsified bool, err error) { func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons []string, err error) {
return b.config.FindUnboundSatsified, b.config.FindBoundSatsified, b.config.FindErr return b.config.FindReasons, b.config.FindErr
} }
// AssumePodVolumes implements SchedulerVolumeBinder.AssumePodVolumes. // AssumePodVolumes implements SchedulerVolumeBinder.AssumePodVolumes.

View File

@ -20,6 +20,7 @@ import (
"context" "context"
"fmt" "fmt"
"reflect" "reflect"
"sort"
"testing" "testing"
"time" "time"
@ -735,6 +736,41 @@ func addProvisionAnn(pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
return res return res
} }
// reasonNames pretty-prints a list of reasons with variable names in
// case of a test failure because that is easier to read than the full
// strings.
func reasonNames(reasons []string) string {
var varNames []string
for _, reason := range reasons {
switch reason {
case ErrReasonBindConflict:
varNames = append(varNames, "ErrReasonBindConflict")
case ErrReasonNodeConflict:
varNames = append(varNames, "ErrReasonNodeConflict")
default:
varNames = append(varNames, reason)
}
}
return fmt.Sprintf("%v", varNames)
}
func checkReasons(t *testing.T, actual, expected []string) {
equal := len(actual) == len(expected)
sort.Strings(actual)
sort.Strings(expected)
if equal {
for i, reason := range actual {
if reason != expected[i] {
equal = false
break
}
}
}
if !equal {
t.Errorf("expected failure reasons %s, got %s", reasonNames(expected), reasonNames(actual))
}
}
func TestFindPodVolumesWithoutProvisioning(t *testing.T) { func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
type scenarioType struct { type scenarioType struct {
// Inputs // Inputs
@ -749,39 +785,28 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
expectedBindings []*bindingInfo expectedBindings []*bindingInfo
// Expected return values // Expected return values
expectedUnbound bool reasons []string
expectedBound bool shouldFail bool
shouldFail bool
} }
scenarios := map[string]scenarioType{ scenarios := map[string]scenarioType{
"no-volumes": { "no-volumes": {
pod: makePod(nil), pod: makePod(nil),
expectedUnbound: true,
expectedBound: true,
}, },
"no-pvcs": { "no-pvcs": {
pod: makePodWithoutPVC(), pod: makePodWithoutPVC(),
expectedUnbound: true,
expectedBound: true,
}, },
"pvc-not-found": { "pvc-not-found": {
cachePVCs: []*v1.PersistentVolumeClaim{}, cachePVCs: []*v1.PersistentVolumeClaim{},
podPVCs: []*v1.PersistentVolumeClaim{boundPVC}, podPVCs: []*v1.PersistentVolumeClaim{boundPVC},
expectedUnbound: false, shouldFail: true,
expectedBound: false,
shouldFail: true,
}, },
"bound-pvc": { "bound-pvc": {
podPVCs: []*v1.PersistentVolumeClaim{boundPVC}, podPVCs: []*v1.PersistentVolumeClaim{boundPVC},
pvs: []*v1.PersistentVolume{pvBound}, pvs: []*v1.PersistentVolume{pvBound},
expectedUnbound: true,
expectedBound: true,
}, },
"bound-pvc,pv-not-exists": { "bound-pvc,pv-not-exists": {
podPVCs: []*v1.PersistentVolumeClaim{boundPVC}, podPVCs: []*v1.PersistentVolumeClaim{boundPVC},
expectedUnbound: false, shouldFail: true,
expectedBound: false,
shouldFail: true,
}, },
"prebound-pvc": { "prebound-pvc": {
podPVCs: []*v1.PersistentVolumeClaim{preboundPVC}, podPVCs: []*v1.PersistentVolumeClaim{preboundPVC},
@ -792,48 +817,37 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC}, podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
pvs: []*v1.PersistentVolume{pvNode2, pvNode1a, pvNode1b}, pvs: []*v1.PersistentVolume{pvNode2, pvNode1a, pvNode1b},
expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a)}, expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a)},
expectedUnbound: true,
expectedBound: true,
}, },
"unbound-pvc,pv-different-node": { "unbound-pvc,pv-different-node": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC}, podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
pvs: []*v1.PersistentVolume{pvNode2}, pvs: []*v1.PersistentVolume{pvNode2},
expectedUnbound: false, reasons: []string{ErrReasonBindConflict},
expectedBound: true,
}, },
"two-unbound-pvcs": { "two-unbound-pvcs": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2}, podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2},
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b}, pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a), makeBinding(unboundPVC2, pvNode1b)}, expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a), makeBinding(unboundPVC2, pvNode1b)},
expectedUnbound: true,
expectedBound: true,
}, },
"two-unbound-pvcs,order-by-size": { "two-unbound-pvcs,order-by-size": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC2, unboundPVC}, podPVCs: []*v1.PersistentVolumeClaim{unboundPVC2, unboundPVC},
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b}, pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a), makeBinding(unboundPVC2, pvNode1b)}, expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a), makeBinding(unboundPVC2, pvNode1b)},
expectedUnbound: true,
expectedBound: true,
}, },
"two-unbound-pvcs,partial-match": { "two-unbound-pvcs,partial-match": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2}, podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2},
pvs: []*v1.PersistentVolume{pvNode1a}, pvs: []*v1.PersistentVolume{pvNode1a},
expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a)}, expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a)},
expectedUnbound: false, reasons: []string{ErrReasonBindConflict},
expectedBound: true,
}, },
"one-bound,one-unbound": { "one-bound,one-unbound": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, boundPVC}, podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, boundPVC},
pvs: []*v1.PersistentVolume{pvBound, pvNode1a}, pvs: []*v1.PersistentVolume{pvBound, pvNode1a},
expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a)}, expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a)},
expectedUnbound: true,
expectedBound: true,
}, },
"one-bound,one-unbound,no-match": { "one-bound,one-unbound,no-match": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, boundPVC}, podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, boundPVC},
pvs: []*v1.PersistentVolume{pvBound, pvNode2}, pvs: []*v1.PersistentVolume{pvBound, pvNode2},
expectedUnbound: false, reasons: []string{ErrReasonBindConflict},
expectedBound: true,
}, },
"one-prebound,one-unbound": { "one-prebound,one-unbound": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, preboundPVC}, podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, preboundPVC},
@ -841,35 +855,26 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
shouldFail: true, shouldFail: true,
}, },
"immediate-bound-pvc": { "immediate-bound-pvc": {
podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC}, podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC},
pvs: []*v1.PersistentVolume{pvBoundImmediate}, pvs: []*v1.PersistentVolume{pvBoundImmediate},
expectedUnbound: true,
expectedBound: true,
}, },
"immediate-bound-pvc-wrong-node": { "immediate-bound-pvc-wrong-node": {
podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC}, podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC},
pvs: []*v1.PersistentVolume{pvBoundImmediateNode2}, pvs: []*v1.PersistentVolume{pvBoundImmediateNode2},
expectedUnbound: true, reasons: []string{ErrReasonNodeConflict},
expectedBound: false,
}, },
"immediate-unbound-pvc": { "immediate-unbound-pvc": {
podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC}, podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC},
expectedUnbound: false, shouldFail: true,
expectedBound: false,
shouldFail: true,
}, },
"immediate-unbound-pvc,delayed-mode-bound": { "immediate-unbound-pvc,delayed-mode-bound": {
podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC, boundPVC}, podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC, boundPVC},
pvs: []*v1.PersistentVolume{pvBound}, pvs: []*v1.PersistentVolume{pvBound},
expectedUnbound: false, shouldFail: true,
expectedBound: false,
shouldFail: true,
}, },
"immediate-unbound-pvc,delayed-mode-unbound": { "immediate-unbound-pvc,delayed-mode-unbound": {
podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC, unboundPVC}, podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC, unboundPVC},
expectedUnbound: false, shouldFail: true,
expectedBound: false,
shouldFail: true,
}, },
} }
@ -902,7 +907,7 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
} }
// Execute // Execute
unboundSatisfied, boundSatisfied, err := testEnv.binder.FindPodVolumes(scenario.pod, testNode) reasons, err := testEnv.binder.FindPodVolumes(scenario.pod, testNode)
// Validate // Validate
if !scenario.shouldFail && err != nil { if !scenario.shouldFail && err != nil {
@ -911,12 +916,7 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
if scenario.shouldFail && err == nil { if scenario.shouldFail && err == nil {
t.Error("returned success but expected error") t.Error("returned success but expected error")
} }
if boundSatisfied != scenario.expectedBound { checkReasons(t, reasons, scenario.reasons)
t.Errorf("expected boundSatisfied %v, got %v", scenario.expectedBound, boundSatisfied)
}
if unboundSatisfied != scenario.expectedUnbound {
t.Errorf("expected unboundSatisfied %v, got %v", scenario.expectedUnbound, unboundSatisfied)
}
testEnv.validatePodCache(t, testNode.Name, scenario.pod, scenario.expectedBindings, nil) testEnv.validatePodCache(t, testNode.Name, scenario.pod, scenario.expectedBindings, nil)
} }
@ -940,61 +940,46 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
expectedProvisions []*v1.PersistentVolumeClaim expectedProvisions []*v1.PersistentVolumeClaim
// Expected return values // Expected return values
expectedUnbound bool reasons []string
expectedBound bool shouldFail bool
shouldFail bool
} }
scenarios := map[string]scenarioType{ scenarios := map[string]scenarioType{
"one-provisioned": { "one-provisioned": {
podPVCs: []*v1.PersistentVolumeClaim{provisionedPVC}, podPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC}, expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC},
expectedUnbound: true,
expectedBound: true,
}, },
"two-unbound-pvcs,one-matched,one-provisioned": { "two-unbound-pvcs,one-matched,one-provisioned": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC}, podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC},
pvs: []*v1.PersistentVolume{pvNode1a}, pvs: []*v1.PersistentVolume{pvNode1a},
expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a)}, expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a)},
expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC}, expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC},
expectedUnbound: true,
expectedBound: true,
}, },
"one-bound,one-provisioned": { "one-bound,one-provisioned": {
podPVCs: []*v1.PersistentVolumeClaim{boundPVC, provisionedPVC}, podPVCs: []*v1.PersistentVolumeClaim{boundPVC, provisionedPVC},
pvs: []*v1.PersistentVolume{pvBound}, pvs: []*v1.PersistentVolume{pvBound},
expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC}, expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC},
expectedUnbound: true,
expectedBound: true,
}, },
"one-binding,one-selected-node": { "one-binding,one-selected-node": {
podPVCs: []*v1.PersistentVolumeClaim{boundPVC, selectedNodePVC}, podPVCs: []*v1.PersistentVolumeClaim{boundPVC, selectedNodePVC},
pvs: []*v1.PersistentVolume{pvBound}, pvs: []*v1.PersistentVolume{pvBound},
expectedProvisions: []*v1.PersistentVolumeClaim{selectedNodePVC}, expectedProvisions: []*v1.PersistentVolumeClaim{selectedNodePVC},
expectedUnbound: true,
expectedBound: true,
}, },
"immediate-unbound-pvc": { "immediate-unbound-pvc": {
podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC}, podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC},
expectedUnbound: false, shouldFail: true,
expectedBound: false,
shouldFail: true,
}, },
"one-immediate-bound,one-provisioned": { "one-immediate-bound,one-provisioned": {
podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC, provisionedPVC}, podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC, provisionedPVC},
pvs: []*v1.PersistentVolume{pvBoundImmediate}, pvs: []*v1.PersistentVolume{pvBoundImmediate},
expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC}, expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC},
expectedUnbound: true,
expectedBound: true,
}, },
"invalid-provisioner": { "invalid-provisioner": {
podPVCs: []*v1.PersistentVolumeClaim{noProvisionerPVC}, podPVCs: []*v1.PersistentVolumeClaim{noProvisionerPVC},
expectedUnbound: false, reasons: []string{ErrReasonBindConflict},
expectedBound: true,
}, },
"volume-topology-unsatisfied": { "volume-topology-unsatisfied": {
podPVCs: []*v1.PersistentVolumeClaim{topoMismatchPVC}, podPVCs: []*v1.PersistentVolumeClaim{topoMismatchPVC},
expectedUnbound: false, reasons: []string{ErrReasonBindConflict},
expectedBound: true,
}, },
} }
@ -1027,7 +1012,7 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
} }
// Execute // Execute
unboundSatisfied, boundSatisfied, err := testEnv.binder.FindPodVolumes(scenario.pod, testNode) reasons, err := testEnv.binder.FindPodVolumes(scenario.pod, testNode)
// Validate // Validate
if !scenario.shouldFail && err != nil { if !scenario.shouldFail && err != nil {
@ -1036,12 +1021,7 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
if scenario.shouldFail && err == nil { if scenario.shouldFail && err == nil {
t.Error("returned success but expected error") t.Error("returned success but expected error")
} }
if boundSatisfied != scenario.expectedBound { checkReasons(t, reasons, scenario.reasons)
t.Errorf("expected boundSatisfied %v, got %v", scenario.expectedBound, boundSatisfied)
}
if unboundSatisfied != scenario.expectedUnbound {
t.Errorf("expected unboundSatisfied %v, got %v", scenario.expectedUnbound, unboundSatisfied)
}
testEnv.validatePodCache(t, testNode.Name, scenario.pod, scenario.expectedBindings, scenario.expectedProvisions) testEnv.validatePodCache(t, testNode.Name, scenario.pod, scenario.expectedBindings, scenario.expectedProvisions)
} }
@ -1067,41 +1047,33 @@ func TestFindPodVolumesWithCSIMigration(t *testing.T) {
initCSINodes []*storagev1.CSINode initCSINodes []*storagev1.CSINode
// Expected return values // Expected return values
expectedUnbound bool reasons []string
expectedBound bool shouldFail bool
shouldFail bool
} }
scenarios := map[string]scenarioType{ scenarios := map[string]scenarioType{
"pvc-bound": { "pvc-bound": {
podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC}, podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC},
pvs: []*v1.PersistentVolume{migrationPVBound}, pvs: []*v1.PersistentVolume{migrationPVBound},
initNodes: []*v1.Node{node1Zone1}, initNodes: []*v1.Node{node1Zone1},
initCSINodes: []*storagev1.CSINode{csiNode1Migrated}, initCSINodes: []*storagev1.CSINode{csiNode1Migrated},
expectedBound: true,
expectedUnbound: true,
}, },
"pvc-bound,csinode-not-migrated": { "pvc-bound,csinode-not-migrated": {
podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC}, podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC},
pvs: []*v1.PersistentVolume{migrationPVBound}, pvs: []*v1.PersistentVolume{migrationPVBound},
initNodes: []*v1.Node{node1Zone1}, initNodes: []*v1.Node{node1Zone1},
initCSINodes: []*storagev1.CSINode{csiNode1NotMigrated}, initCSINodes: []*storagev1.CSINode{csiNode1NotMigrated},
expectedBound: true,
expectedUnbound: true,
}, },
"pvc-bound,missing-csinode": { "pvc-bound,missing-csinode": {
podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC}, podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC},
pvs: []*v1.PersistentVolume{migrationPVBound}, pvs: []*v1.PersistentVolume{migrationPVBound},
initNodes: []*v1.Node{node1Zone1}, initNodes: []*v1.Node{node1Zone1},
expectedBound: true,
expectedUnbound: true,
}, },
"pvc-bound,node-different-zone": { "pvc-bound,node-different-zone": {
podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC}, podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC},
pvs: []*v1.PersistentVolume{migrationPVBound}, pvs: []*v1.PersistentVolume{migrationPVBound},
initNodes: []*v1.Node{node1Zone2}, initNodes: []*v1.Node{node1Zone2},
initCSINodes: []*storagev1.CSINode{csiNode1Migrated}, initCSINodes: []*storagev1.CSINode{csiNode1Migrated},
expectedBound: false, reasons: []string{ErrReasonNodeConflict},
expectedUnbound: true,
}, },
} }
@ -1140,7 +1112,7 @@ func TestFindPodVolumesWithCSIMigration(t *testing.T) {
} }
// Execute // Execute
unboundSatisfied, boundSatisfied, err := testEnv.binder.FindPodVolumes(scenario.pod, node) reasons, err := testEnv.binder.FindPodVolumes(scenario.pod, node)
// Validate // Validate
if !scenario.shouldFail && err != nil { if !scenario.shouldFail && err != nil {
@ -1149,12 +1121,7 @@ func TestFindPodVolumesWithCSIMigration(t *testing.T) {
if scenario.shouldFail && err == nil { if scenario.shouldFail && err == nil {
t.Error("returned success but expected error") t.Error("returned success but expected error")
} }
if boundSatisfied != scenario.expectedBound { checkReasons(t, reasons, scenario.reasons)
t.Errorf("expected boundSatisfied %v, got %v", scenario.expectedBound, boundSatisfied)
}
if unboundSatisfied != scenario.expectedUnbound {
t.Errorf("expected unboundSatisfied %v, got %v", scenario.expectedUnbound, unboundSatisfied)
}
} }
for name, scenario := range scenarios { for name, scenario := range scenarios {
@ -1966,12 +1933,12 @@ func TestFindAssumeVolumes(t *testing.T) {
// Execute // Execute
// 1. Find matching PVs // 1. Find matching PVs
unboundSatisfied, _, err := testEnv.binder.FindPodVolumes(pod, testNode) reasons, err := testEnv.binder.FindPodVolumes(pod, testNode)
if err != nil { if err != nil {
t.Errorf("Test failed: FindPodVolumes returned error: %v", err) t.Errorf("Test failed: FindPodVolumes returned error: %v", err)
} }
if !unboundSatisfied { if len(reasons) > 0 {
t.Errorf("Test failed: couldn't find PVs for all PVCs") t.Errorf("Test failed: couldn't find PVs for all PVCs: %v", reasons)
} }
expectedBindings := testEnv.getPodBindings(t, testNode.Name, pod) expectedBindings := testEnv.getPodBindings(t, testNode.Name, pod)
@ -1992,12 +1959,12 @@ func TestFindAssumeVolumes(t *testing.T) {
// This should always return the original chosen pv // This should always return the original chosen pv
// Run this many times in case sorting returns different orders for the two PVs. // Run this many times in case sorting returns different orders for the two PVs.
for i := 0; i < 50; i++ { for i := 0; i < 50; i++ {
unboundSatisfied, _, err := testEnv.binder.FindPodVolumes(pod, testNode) reasons, err := testEnv.binder.FindPodVolumes(pod, testNode)
if err != nil { if err != nil {
t.Errorf("Test failed: FindPodVolumes returned error: %v", err) t.Errorf("Test failed: FindPodVolumes returned error: %v", err)
} }
if !unboundSatisfied { if len(reasons) > 0 {
t.Errorf("Test failed: couldn't find PVs for all PVCs") t.Errorf("Test failed: couldn't find PVs for all PVCs: %v", reasons)
} }
testEnv.validatePodCache(t, testNode.Name, pod, expectedBindings, nil) testEnv.validatePodCache(t, testNode.Name, pod, expectedBindings, nil)
} }

View File

@ -38,6 +38,7 @@ import (
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
clientsetfake "k8s.io/client-go/kubernetes/fake" clientsetfake "k8s.io/client-go/kubernetes/fake"
extenderv1 "k8s.io/kube-scheduler/extender/v1" extenderv1 "k8s.io/kube-scheduler/extender/v1"
volumescheduling "k8s.io/kubernetes/pkg/controller/volume/scheduling"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config" schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread"
@ -50,7 +51,6 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
@ -1999,8 +1999,8 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
name: "ErrVolume... errors should not be tried as it indicates that the pod is unschedulable due to no matching volumes for pod on node", name: "ErrVolume... errors should not be tried as it indicates that the pod is unschedulable due to no matching volumes for pod on node",
nodesStatuses: framework.NodeToStatusMap{ nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumezone.ErrReasonConflict), "machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumezone.ErrReasonConflict),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumebinding.ErrReasonNodeConflict), "machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumescheduling.ErrReasonNodeConflict),
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumebinding.ErrReasonBindConflict), "machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumescheduling.ErrReasonBindConflict),
}, },
expected: map[string]bool{"machine4": true}, expected: map[string]bool{"machine4": true},
}, },

View File

@ -36,13 +36,6 @@ var _ framework.FilterPlugin = &VolumeBinding{}
// Name is the name of the plugin used in Registry and configurations. // Name is the name of the plugin used in Registry and configurations.
const Name = "VolumeBinding" const Name = "VolumeBinding"
const (
// ErrReasonBindConflict is used for VolumeBindingNoMatch predicate error.
ErrReasonBindConflict = "node(s) didn't find available persistent volumes to bind"
// ErrReasonNodeConflict is used for VolumeNodeAffinityConflict predicate error.
ErrReasonNodeConflict = "node(s) had volume node affinity conflict"
)
// Name returns name of the plugin. It is used in logs, etc. // Name returns name of the plugin. It is used in logs, etc.
func (pl *VolumeBinding) Name() string { func (pl *VolumeBinding) Name() string {
return Name return Name
@ -79,19 +72,16 @@ func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, p
return nil return nil
} }
unboundSatisfied, boundSatisfied, err := pl.binder.Binder.FindPodVolumes(pod, node) reasons, err := pl.binder.Binder.FindPodVolumes(pod, node)
if err != nil { if err != nil {
return framework.NewStatus(framework.Error, err.Error()) return framework.NewStatus(framework.Error, err.Error())
} }
if !boundSatisfied || !unboundSatisfied { if len(reasons) > 0 {
status := framework.NewStatus(framework.UnschedulableAndUnresolvable) status := framework.NewStatus(framework.UnschedulableAndUnresolvable)
if !boundSatisfied { for _, reason := range reasons {
status.AppendReason(ErrReasonNodeConflict) status.AppendReason(reason)
}
if !unboundSatisfied {
status.AppendReason(ErrReasonBindConflict)
} }
return status return status
} }

View File

@ -58,9 +58,7 @@ func TestVolumeBinding(t *testing.T) {
pod: &v1.Pod{Spec: volState}, pod: &v1.Pod{Spec: volState},
node: &v1.Node{}, node: &v1.Node{},
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
AllBound: true, AllBound: true,
FindUnboundSatsified: true,
FindBoundSatsified: true,
}, },
wantStatus: nil, wantStatus: nil,
}, },
@ -69,31 +67,25 @@ func TestVolumeBinding(t *testing.T) {
pod: &v1.Pod{Spec: volState}, pod: &v1.Pod{Spec: volState},
node: &v1.Node{}, node: &v1.Node{},
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindUnboundSatsified: false, FindReasons: []string{volumescheduling.ErrReasonBindConflict},
FindBoundSatsified: true,
}, },
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonBindConflict), wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, volumescheduling.ErrReasonBindConflict),
}, },
{ {
name: "bound and unbound unsatisfied", name: "bound and unbound unsatisfied",
pod: &v1.Pod{Spec: volState}, pod: &v1.Pod{Spec: volState},
node: &v1.Node{}, node: &v1.Node{},
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindUnboundSatsified: false, FindReasons: []string{volumescheduling.ErrReasonBindConflict, volumescheduling.ErrReasonNodeConflict},
FindBoundSatsified: false,
}, },
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonNodeConflict, wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, volumescheduling.ErrReasonBindConflict, volumescheduling.ErrReasonNodeConflict),
ErrReasonBindConflict),
}, },
{ {
name: "unbound/found matches/bind succeeds", name: "unbound/found matches/bind succeeds",
pod: &v1.Pod{Spec: volState}, pod: &v1.Pod{Spec: volState},
node: &v1.Node{}, node: &v1.Node{},
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{},
FindUnboundSatsified: true, wantStatus: nil,
FindBoundSatsified: true,
},
wantStatus: nil,
}, },
{ {
name: "predicate error", name: "predicate error",

View File

@ -901,9 +901,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
{ {
name: "all bound", name: "all bound",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
AllBound: true, AllBound: true,
FindUnboundSatsified: true,
FindBoundSatsified: true,
}, },
expectAssumeCalled: true, expectAssumeCalled: true,
expectPodBind: &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "machine1"}}, expectPodBind: &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "machine1"}},
@ -912,9 +910,8 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
{ {
name: "bound/invalid pv affinity", name: "bound/invalid pv affinity",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
AllBound: true, AllBound: true,
FindUnboundSatsified: true, FindReasons: []string{volumescheduling.ErrReasonNodeConflict},
FindBoundSatsified: false,
}, },
eventReason: "FailedScheduling", eventReason: "FailedScheduling",
expectError: makePredicateError("1 node(s) had volume node affinity conflict"), expectError: makePredicateError("1 node(s) had volume node affinity conflict"),
@ -922,8 +919,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
{ {
name: "unbound/no matches", name: "unbound/no matches",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindUnboundSatsified: false, FindReasons: []string{volumescheduling.ErrReasonBindConflict},
FindBoundSatsified: true,
}, },
eventReason: "FailedScheduling", eventReason: "FailedScheduling",
expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind"), expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind"),
@ -931,18 +927,14 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
{ {
name: "bound and unbound unsatisfied", name: "bound and unbound unsatisfied",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindUnboundSatsified: false, FindReasons: []string{volumescheduling.ErrReasonBindConflict, volumescheduling.ErrReasonNodeConflict},
FindBoundSatsified: false,
}, },
eventReason: "FailedScheduling", eventReason: "FailedScheduling",
expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind, 1 node(s) had volume node affinity conflict"), expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind, 1 node(s) had volume node affinity conflict"),
}, },
{ {
name: "unbound/found matches/bind succeeds", name: "unbound/found matches/bind succeeds",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{},
FindUnboundSatsified: true,
FindBoundSatsified: true,
},
expectAssumeCalled: true, expectAssumeCalled: true,
expectBindCalled: true, expectBindCalled: true,
expectPodBind: &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "machine1"}}, expectPodBind: &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "machine1"}},
@ -959,9 +951,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
{ {
name: "assume error", name: "assume error",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindUnboundSatsified: true, AssumeErr: assumeErr,
FindBoundSatsified: true,
AssumeErr: assumeErr,
}, },
expectAssumeCalled: true, expectAssumeCalled: true,
eventReason: "FailedScheduling", eventReason: "FailedScheduling",
@ -970,9 +960,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
{ {
name: "bind error", name: "bind error",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{ volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindUnboundSatsified: true, BindErr: bindErr,
FindBoundSatsified: true,
BindErr: bindErr,
}, },
expectAssumeCalled: true, expectAssumeCalled: true,
expectBindCalled: true, expectBindCalled: true,