Merge pull request #88230 from pohly/volume-binder-reasons

volume binder: simplify API
This commit is contained in:
Kubernetes Prow Robot 2020-02-28 12:54:38 -08:00 committed by GitHub
commit cfa5f20f8e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 186 additions and 217 deletions

View File

@ -45,6 +45,24 @@ import (
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
// ConflictReason is used for the special strings which explain why
// volume binding is impossible for a node.
type ConflictReason string
// ConflictReasons contains all reasons that explain why volume binding is impossible for a node.
type ConflictReasons []ConflictReason
func (reasons ConflictReasons) Len() int { return len(reasons) }
func (reasons ConflictReasons) Less(i, j int) bool { return reasons[i] < reasons[j] }
func (reasons ConflictReasons) Swap(i, j int) { reasons[i], reasons[j] = reasons[j], reasons[i] }
const (
// ErrReasonBindConflict is used for VolumeBindingNoMatch predicate error.
ErrReasonBindConflict ConflictReason = "node(s) didn't find available persistent volumes to bind"
// ErrReasonNodeConflict is used for VolumeNodeAffinityConflict predicate error.
ErrReasonNodeConflict ConflictReason = "node(s) had volume node affinity conflict"
)
// InTreeToCSITranslator contains methods required to check migratable status
// and perform translations from InTree PV's to CSI
type InTreeToCSITranslator interface {
@ -83,11 +101,11 @@ type SchedulerVolumeBinder interface {
// If a PVC is bound, it checks if the PV's NodeAffinity matches the Node.
// Otherwise, it tries to find an available PV to bind to the PVC.
//
// It returns true if all of the Pod's PVCs have matching PVs or can be dynamic provisioned,
// and returns true if bound volumes satisfy the PV NodeAffinity.
// It returns an error when something went wrong or a list of reasons why the node is
// (currently) not usable for the pod.
//
// This function is called by the volume binding scheduler predicate and can be called in parallel
FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisified, boundVolumesSatisfied bool, err error)
FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons ConflictReasons, err error)
// AssumePodVolumes will:
// 1. Take the PV matches for unbound PVCs and update the PV cache assuming
@ -166,15 +184,29 @@ func (b *volumeBinder) GetBindingsCache() PodBindingCache {
// FindPodVolumes caches the matching PVs and PVCs to provision per node in podBindingCache.
// This method intentionally takes in a *v1.Node object instead of using volumebinder.nodeInformer.
// That's necessary because some operations will need to pass in to the predicate fake node objects.
func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisfied, boundVolumesSatisfied bool, err error) {
func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons ConflictReasons, err error) {
podName := getPodName(pod)
// Warning: Below log needs high verbosity as it can be printed several times (#60933).
klog.V(5).Infof("FindPodVolumes for pod %q, node %q", podName, node.Name)
// Initialize to true for pods that don't have volumes
unboundVolumesSatisfied = true
boundVolumesSatisfied = true
// Initialize to true for pods that don't have volumes. These
// booleans get translated into reason strings when the function
// returns without an error.
unboundVolumesSatisfied := true
boundVolumesSatisfied := true
defer func() {
if err != nil {
return
}
if !boundVolumesSatisfied {
reasons = append(reasons, ErrReasonNodeConflict)
}
if !unboundVolumesSatisfied {
reasons = append(reasons, ErrReasonBindConflict)
}
}()
start := time.Now()
defer func() {
metrics.VolumeSchedulingStageLatency.WithLabelValues("predicate").Observe(time.Since(start).Seconds())
@ -210,19 +242,19 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
// volumes can get bound/provisioned in between calls.
boundClaims, claimsToBind, unboundClaimsImmediate, err := b.getPodVolumes(pod)
if err != nil {
return false, false, err
return nil, err
}
// Immediate claims should be bound
if len(unboundClaimsImmediate) > 0 {
return false, false, fmt.Errorf("pod has unbound immediate PersistentVolumeClaims")
return nil, fmt.Errorf("pod has unbound immediate PersistentVolumeClaims")
}
// Check PV node affinity on bound volumes
if len(boundClaims) > 0 {
boundVolumesSatisfied, err = b.checkBoundClaims(boundClaims, node, podName)
if err != nil {
return false, false, err
return nil, err
}
}
@ -237,8 +269,9 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
for _, claim := range claimsToBind {
if selectedNode, ok := claim.Annotations[pvutil.AnnSelectedNode]; ok {
if selectedNode != node.Name {
// Fast path, skip unmatched node
return false, boundVolumesSatisfied, nil
// Fast path, skip unmatched node.
unboundVolumesSatisfied = false
return
}
claimsToProvision = append(claimsToProvision, claim)
} else {
@ -251,7 +284,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
var unboundClaims []*v1.PersistentVolumeClaim
unboundVolumesSatisfied, matchedBindings, unboundClaims, err = b.findMatchingVolumes(pod, claimsToFindMatching, node)
if err != nil {
return false, false, err
return nil, err
}
claimsToProvision = append(claimsToProvision, unboundClaims...)
}
@ -260,12 +293,12 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
if len(claimsToProvision) > 0 {
unboundVolumesSatisfied, provisionedClaims, err = b.checkVolumeProvisions(pod, claimsToProvision, node)
if err != nil {
return false, false, err
return nil, err
}
}
}
return unboundVolumesSatisfied, boundVolumesSatisfied, nil
return
}
// AssumePodVolumes will take the cached matching PVs and PVCs to provision

View File

@ -20,12 +20,11 @@ import "k8s.io/api/core/v1"
// FakeVolumeBinderConfig holds configurations for fake volume binder.
type FakeVolumeBinderConfig struct {
AllBound bool
FindUnboundSatsified bool
FindBoundSatsified bool
FindErr error
AssumeErr error
BindErr error
AllBound bool
FindReasons ConflictReasons
FindErr error
AssumeErr error
BindErr error
}
// NewFakeVolumeBinder sets up all the caches needed for the scheduler to make
@ -44,8 +43,8 @@ type FakeVolumeBinder struct {
}
// FindPodVolumes implements SchedulerVolumeBinder.FindPodVolumes.
func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisfied, boundVolumesSatsified bool, err error) {
return b.config.FindUnboundSatsified, b.config.FindBoundSatsified, b.config.FindErr
func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons ConflictReasons, err error) {
return b.config.FindReasons, b.config.FindErr
}
// AssumePodVolumes implements SchedulerVolumeBinder.AssumePodVolumes.

View File

@ -20,6 +20,7 @@ import (
"context"
"fmt"
"reflect"
"sort"
"testing"
"time"
@ -735,6 +736,41 @@ func addProvisionAnn(pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
return res
}
// reasonNames pretty-prints a list of reasons with variable names in
// case of a test failure because that is easier to read than the full
// strings.
func reasonNames(reasons ConflictReasons) string {
var varNames []string
for _, reason := range reasons {
switch reason {
case ErrReasonBindConflict:
varNames = append(varNames, "ErrReasonBindConflict")
case ErrReasonNodeConflict:
varNames = append(varNames, "ErrReasonNodeConflict")
default:
varNames = append(varNames, string(reason))
}
}
return fmt.Sprintf("%v", varNames)
}
func checkReasons(t *testing.T, actual, expected ConflictReasons) {
equal := len(actual) == len(expected)
sort.Sort(actual)
sort.Sort(expected)
if equal {
for i, reason := range actual {
if reason != expected[i] {
equal = false
break
}
}
}
if !equal {
t.Errorf("expected failure reasons %s, got %s", reasonNames(expected), reasonNames(actual))
}
}
func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
type scenarioType struct {
// Inputs
@ -749,39 +785,28 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
expectedBindings []*bindingInfo
// Expected return values
expectedUnbound bool
expectedBound bool
shouldFail bool
reasons ConflictReasons
shouldFail bool
}
scenarios := map[string]scenarioType{
"no-volumes": {
pod: makePod(nil),
expectedUnbound: true,
expectedBound: true,
pod: makePod(nil),
},
"no-pvcs": {
pod: makePodWithoutPVC(),
expectedUnbound: true,
expectedBound: true,
pod: makePodWithoutPVC(),
},
"pvc-not-found": {
cachePVCs: []*v1.PersistentVolumeClaim{},
podPVCs: []*v1.PersistentVolumeClaim{boundPVC},
expectedUnbound: false,
expectedBound: false,
shouldFail: true,
cachePVCs: []*v1.PersistentVolumeClaim{},
podPVCs: []*v1.PersistentVolumeClaim{boundPVC},
shouldFail: true,
},
"bound-pvc": {
podPVCs: []*v1.PersistentVolumeClaim{boundPVC},
pvs: []*v1.PersistentVolume{pvBound},
expectedUnbound: true,
expectedBound: true,
podPVCs: []*v1.PersistentVolumeClaim{boundPVC},
pvs: []*v1.PersistentVolume{pvBound},
},
"bound-pvc,pv-not-exists": {
podPVCs: []*v1.PersistentVolumeClaim{boundPVC},
expectedUnbound: false,
expectedBound: false,
shouldFail: true,
podPVCs: []*v1.PersistentVolumeClaim{boundPVC},
shouldFail: true,
},
"prebound-pvc": {
podPVCs: []*v1.PersistentVolumeClaim{preboundPVC},
@ -792,48 +817,37 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
pvs: []*v1.PersistentVolume{pvNode2, pvNode1a, pvNode1b},
expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a)},
expectedUnbound: true,
expectedBound: true,
},
"unbound-pvc,pv-different-node": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
pvs: []*v1.PersistentVolume{pvNode2},
expectedUnbound: false,
expectedBound: true,
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
pvs: []*v1.PersistentVolume{pvNode2},
reasons: ConflictReasons{ErrReasonBindConflict},
},
"two-unbound-pvcs": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2},
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a), makeBinding(unboundPVC2, pvNode1b)},
expectedUnbound: true,
expectedBound: true,
},
"two-unbound-pvcs,order-by-size": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC2, unboundPVC},
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a), makeBinding(unboundPVC2, pvNode1b)},
expectedUnbound: true,
expectedBound: true,
},
"two-unbound-pvcs,partial-match": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2},
pvs: []*v1.PersistentVolume{pvNode1a},
expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a)},
expectedUnbound: false,
expectedBound: true,
reasons: ConflictReasons{ErrReasonBindConflict},
},
"one-bound,one-unbound": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, boundPVC},
pvs: []*v1.PersistentVolume{pvBound, pvNode1a},
expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a)},
expectedUnbound: true,
expectedBound: true,
},
"one-bound,one-unbound,no-match": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, boundPVC},
pvs: []*v1.PersistentVolume{pvBound, pvNode2},
expectedUnbound: false,
expectedBound: true,
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, boundPVC},
pvs: []*v1.PersistentVolume{pvBound, pvNode2},
reasons: ConflictReasons{ErrReasonBindConflict},
},
"one-prebound,one-unbound": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, preboundPVC},
@ -841,35 +855,26 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
shouldFail: true,
},
"immediate-bound-pvc": {
podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC},
pvs: []*v1.PersistentVolume{pvBoundImmediate},
expectedUnbound: true,
expectedBound: true,
podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC},
pvs: []*v1.PersistentVolume{pvBoundImmediate},
},
"immediate-bound-pvc-wrong-node": {
podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC},
pvs: []*v1.PersistentVolume{pvBoundImmediateNode2},
expectedUnbound: true,
expectedBound: false,
podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC},
pvs: []*v1.PersistentVolume{pvBoundImmediateNode2},
reasons: ConflictReasons{ErrReasonNodeConflict},
},
"immediate-unbound-pvc": {
podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC},
expectedUnbound: false,
expectedBound: false,
shouldFail: true,
podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC},
shouldFail: true,
},
"immediate-unbound-pvc,delayed-mode-bound": {
podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC, boundPVC},
pvs: []*v1.PersistentVolume{pvBound},
expectedUnbound: false,
expectedBound: false,
shouldFail: true,
podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC, boundPVC},
pvs: []*v1.PersistentVolume{pvBound},
shouldFail: true,
},
"immediate-unbound-pvc,delayed-mode-unbound": {
podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC, unboundPVC},
expectedUnbound: false,
expectedBound: false,
shouldFail: true,
podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC, unboundPVC},
shouldFail: true,
},
}
@ -902,7 +907,7 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
}
// Execute
unboundSatisfied, boundSatisfied, err := testEnv.binder.FindPodVolumes(scenario.pod, testNode)
reasons, err := testEnv.binder.FindPodVolumes(scenario.pod, testNode)
// Validate
if !scenario.shouldFail && err != nil {
@ -911,12 +916,7 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
if scenario.shouldFail && err == nil {
t.Error("returned success but expected error")
}
if boundSatisfied != scenario.expectedBound {
t.Errorf("expected boundSatisfied %v, got %v", scenario.expectedBound, boundSatisfied)
}
if unboundSatisfied != scenario.expectedUnbound {
t.Errorf("expected unboundSatisfied %v, got %v", scenario.expectedUnbound, unboundSatisfied)
}
checkReasons(t, reasons, scenario.reasons)
testEnv.validatePodCache(t, testNode.Name, scenario.pod, scenario.expectedBindings, nil)
}
@ -940,61 +940,46 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
expectedProvisions []*v1.PersistentVolumeClaim
// Expected return values
expectedUnbound bool
expectedBound bool
shouldFail bool
reasons ConflictReasons
shouldFail bool
}
scenarios := map[string]scenarioType{
"one-provisioned": {
podPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC},
expectedUnbound: true,
expectedBound: true,
},
"two-unbound-pvcs,one-matched,one-provisioned": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC},
pvs: []*v1.PersistentVolume{pvNode1a},
expectedBindings: []*bindingInfo{makeBinding(unboundPVC, pvNode1a)},
expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC},
expectedUnbound: true,
expectedBound: true,
},
"one-bound,one-provisioned": {
podPVCs: []*v1.PersistentVolumeClaim{boundPVC, provisionedPVC},
pvs: []*v1.PersistentVolume{pvBound},
expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC},
expectedUnbound: true,
expectedBound: true,
},
"one-binding,one-selected-node": {
podPVCs: []*v1.PersistentVolumeClaim{boundPVC, selectedNodePVC},
pvs: []*v1.PersistentVolume{pvBound},
expectedProvisions: []*v1.PersistentVolumeClaim{selectedNodePVC},
expectedUnbound: true,
expectedBound: true,
},
"immediate-unbound-pvc": {
podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC},
expectedUnbound: false,
expectedBound: false,
shouldFail: true,
podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC},
shouldFail: true,
},
"one-immediate-bound,one-provisioned": {
podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC, provisionedPVC},
pvs: []*v1.PersistentVolume{pvBoundImmediate},
expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC},
expectedUnbound: true,
expectedBound: true,
},
"invalid-provisioner": {
podPVCs: []*v1.PersistentVolumeClaim{noProvisionerPVC},
expectedUnbound: false,
expectedBound: true,
podPVCs: []*v1.PersistentVolumeClaim{noProvisionerPVC},
reasons: ConflictReasons{ErrReasonBindConflict},
},
"volume-topology-unsatisfied": {
podPVCs: []*v1.PersistentVolumeClaim{topoMismatchPVC},
expectedUnbound: false,
expectedBound: true,
podPVCs: []*v1.PersistentVolumeClaim{topoMismatchPVC},
reasons: ConflictReasons{ErrReasonBindConflict},
},
}
@ -1027,7 +1012,7 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
}
// Execute
unboundSatisfied, boundSatisfied, err := testEnv.binder.FindPodVolumes(scenario.pod, testNode)
reasons, err := testEnv.binder.FindPodVolumes(scenario.pod, testNode)
// Validate
if !scenario.shouldFail && err != nil {
@ -1036,12 +1021,7 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
if scenario.shouldFail && err == nil {
t.Error("returned success but expected error")
}
if boundSatisfied != scenario.expectedBound {
t.Errorf("expected boundSatisfied %v, got %v", scenario.expectedBound, boundSatisfied)
}
if unboundSatisfied != scenario.expectedUnbound {
t.Errorf("expected unboundSatisfied %v, got %v", scenario.expectedUnbound, unboundSatisfied)
}
checkReasons(t, reasons, scenario.reasons)
testEnv.validatePodCache(t, testNode.Name, scenario.pod, scenario.expectedBindings, scenario.expectedProvisions)
}
@ -1067,41 +1047,33 @@ func TestFindPodVolumesWithCSIMigration(t *testing.T) {
initCSINodes []*storagev1.CSINode
// Expected return values
expectedUnbound bool
expectedBound bool
shouldFail bool
reasons ConflictReasons
shouldFail bool
}
scenarios := map[string]scenarioType{
"pvc-bound": {
podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC},
pvs: []*v1.PersistentVolume{migrationPVBound},
initNodes: []*v1.Node{node1Zone1},
initCSINodes: []*storagev1.CSINode{csiNode1Migrated},
expectedBound: true,
expectedUnbound: true,
podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC},
pvs: []*v1.PersistentVolume{migrationPVBound},
initNodes: []*v1.Node{node1Zone1},
initCSINodes: []*storagev1.CSINode{csiNode1Migrated},
},
"pvc-bound,csinode-not-migrated": {
podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC},
pvs: []*v1.PersistentVolume{migrationPVBound},
initNodes: []*v1.Node{node1Zone1},
initCSINodes: []*storagev1.CSINode{csiNode1NotMigrated},
expectedBound: true,
expectedUnbound: true,
podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC},
pvs: []*v1.PersistentVolume{migrationPVBound},
initNodes: []*v1.Node{node1Zone1},
initCSINodes: []*storagev1.CSINode{csiNode1NotMigrated},
},
"pvc-bound,missing-csinode": {
podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC},
pvs: []*v1.PersistentVolume{migrationPVBound},
initNodes: []*v1.Node{node1Zone1},
expectedBound: true,
expectedUnbound: true,
podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC},
pvs: []*v1.PersistentVolume{migrationPVBound},
initNodes: []*v1.Node{node1Zone1},
},
"pvc-bound,node-different-zone": {
podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC},
pvs: []*v1.PersistentVolume{migrationPVBound},
initNodes: []*v1.Node{node1Zone2},
initCSINodes: []*storagev1.CSINode{csiNode1Migrated},
expectedBound: false,
expectedUnbound: true,
podPVCs: []*v1.PersistentVolumeClaim{boundMigrationPVC},
pvs: []*v1.PersistentVolume{migrationPVBound},
initNodes: []*v1.Node{node1Zone2},
initCSINodes: []*storagev1.CSINode{csiNode1Migrated},
reasons: ConflictReasons{ErrReasonNodeConflict},
},
}
@ -1140,7 +1112,7 @@ func TestFindPodVolumesWithCSIMigration(t *testing.T) {
}
// Execute
unboundSatisfied, boundSatisfied, err := testEnv.binder.FindPodVolumes(scenario.pod, node)
reasons, err := testEnv.binder.FindPodVolumes(scenario.pod, node)
// Validate
if !scenario.shouldFail && err != nil {
@ -1149,12 +1121,7 @@ func TestFindPodVolumesWithCSIMigration(t *testing.T) {
if scenario.shouldFail && err == nil {
t.Error("returned success but expected error")
}
if boundSatisfied != scenario.expectedBound {
t.Errorf("expected boundSatisfied %v, got %v", scenario.expectedBound, boundSatisfied)
}
if unboundSatisfied != scenario.expectedUnbound {
t.Errorf("expected unboundSatisfied %v, got %v", scenario.expectedUnbound, unboundSatisfied)
}
checkReasons(t, reasons, scenario.reasons)
}
for name, scenario := range scenarios {
@ -1966,12 +1933,12 @@ func TestFindAssumeVolumes(t *testing.T) {
// Execute
// 1. Find matching PVs
unboundSatisfied, _, err := testEnv.binder.FindPodVolumes(pod, testNode)
reasons, err := testEnv.binder.FindPodVolumes(pod, testNode)
if err != nil {
t.Errorf("Test failed: FindPodVolumes returned error: %v", err)
}
if !unboundSatisfied {
t.Errorf("Test failed: couldn't find PVs for all PVCs")
if len(reasons) > 0 {
t.Errorf("Test failed: couldn't find PVs for all PVCs: %v", reasons)
}
expectedBindings := testEnv.getPodBindings(t, testNode.Name, pod)
@ -1992,12 +1959,12 @@ func TestFindAssumeVolumes(t *testing.T) {
// This should always return the original chosen pv
// Run this many times in case sorting returns different orders for the two PVs.
for i := 0; i < 50; i++ {
unboundSatisfied, _, err := testEnv.binder.FindPodVolumes(pod, testNode)
reasons, err := testEnv.binder.FindPodVolumes(pod, testNode)
if err != nil {
t.Errorf("Test failed: FindPodVolumes returned error: %v", err)
}
if !unboundSatisfied {
t.Errorf("Test failed: couldn't find PVs for all PVCs")
if len(reasons) > 0 {
t.Errorf("Test failed: couldn't find PVs for all PVCs: %v", reasons)
}
testEnv.validatePodCache(t, testNode.Name, pod, expectedBindings, nil)
}

View File

@ -44,6 +44,7 @@ go_test(
embed = [":go_default_library"],
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/controller/volume/scheduling:go_default_library",
"//pkg/scheduler/apis/config:go_default_library",
"//pkg/scheduler/framework/plugins/defaultbinder:go_default_library",
"//pkg/scheduler/framework/plugins/defaultpodtopologyspread:go_default_library",
@ -56,7 +57,6 @@ go_test(
"//pkg/scheduler/framework/plugins/podtopologyspread:go_default_library",
"//pkg/scheduler/framework/plugins/queuesort:go_default_library",
"//pkg/scheduler/framework/plugins/tainttoleration:go_default_library",
"//pkg/scheduler/framework/plugins/volumebinding:go_default_library",
"//pkg/scheduler/framework/plugins/volumerestrictions:go_default_library",
"//pkg/scheduler/framework/plugins/volumezone:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",

View File

@ -38,6 +38,7 @@ import (
"k8s.io/client-go/informers"
clientsetfake "k8s.io/client-go/kubernetes/fake"
extenderv1 "k8s.io/kube-scheduler/extender/v1"
volumescheduling "k8s.io/kubernetes/pkg/controller/volume/scheduling"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread"
@ -50,7 +51,6 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
@ -1999,8 +1999,8 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
name: "ErrVolume... errors should not be tried as it indicates that the pod is unschedulable due to no matching volumes for pod on node",
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumezone.ErrReasonConflict),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumebinding.ErrReasonNodeConflict),
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumebinding.ErrReasonBindConflict),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumescheduling.ErrReasonNodeConflict)),
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumescheduling.ErrReasonBindConflict)),
},
expected: map[string]bool{"machine4": true},
},

View File

@ -36,13 +36,6 @@ var _ framework.FilterPlugin = &VolumeBinding{}
// Name is the name of the plugin used in Registry and configurations.
const Name = "VolumeBinding"
const (
// ErrReasonBindConflict is used for VolumeBindingNoMatch predicate error.
ErrReasonBindConflict = "node(s) didn't find available persistent volumes to bind"
// ErrReasonNodeConflict is used for VolumeNodeAffinityConflict predicate error.
ErrReasonNodeConflict = "node(s) had volume node affinity conflict"
)
// Name returns name of the plugin. It is used in logs, etc.
func (pl *VolumeBinding) Name() string {
return Name
@ -79,19 +72,16 @@ func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, p
return nil
}
unboundSatisfied, boundSatisfied, err := pl.binder.Binder.FindPodVolumes(pod, node)
reasons, err := pl.binder.Binder.FindPodVolumes(pod, node)
if err != nil {
return framework.NewStatus(framework.Error, err.Error())
}
if !boundSatisfied || !unboundSatisfied {
if len(reasons) > 0 {
status := framework.NewStatus(framework.UnschedulableAndUnresolvable)
if !boundSatisfied {
status.AppendReason(ErrReasonNodeConflict)
}
if !unboundSatisfied {
status.AppendReason(ErrReasonBindConflict)
for _, reason := range reasons {
status.AppendReason(string(reason))
}
return status
}

View File

@ -58,9 +58,7 @@ func TestVolumeBinding(t *testing.T) {
pod: &v1.Pod{Spec: volState},
node: &v1.Node{},
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
AllBound: true,
FindUnboundSatsified: true,
FindBoundSatsified: true,
AllBound: true,
},
wantStatus: nil,
},
@ -69,31 +67,25 @@ func TestVolumeBinding(t *testing.T) {
pod: &v1.Pod{Spec: volState},
node: &v1.Node{},
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindUnboundSatsified: false,
FindBoundSatsified: true,
FindReasons: []volumescheduling.ConflictReason{volumescheduling.ErrReasonBindConflict},
},
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonBindConflict),
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumescheduling.ErrReasonBindConflict)),
},
{
name: "bound and unbound unsatisfied",
pod: &v1.Pod{Spec: volState},
node: &v1.Node{},
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindUnboundSatsified: false,
FindBoundSatsified: false,
FindReasons: []volumescheduling.ConflictReason{volumescheduling.ErrReasonBindConflict, volumescheduling.ErrReasonNodeConflict},
},
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonNodeConflict,
ErrReasonBindConflict),
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumescheduling.ErrReasonBindConflict), string(volumescheduling.ErrReasonNodeConflict)),
},
{
name: "unbound/found matches/bind succeeds",
pod: &v1.Pod{Spec: volState},
node: &v1.Node{},
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindUnboundSatsified: true,
FindBoundSatsified: true,
},
wantStatus: nil,
name: "unbound/found matches/bind succeeds",
pod: &v1.Pod{Spec: volState},
node: &v1.Node{},
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{},
wantStatus: nil,
},
{
name: "predicate error",

View File

@ -901,9 +901,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
{
name: "all bound",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
AllBound: true,
FindUnboundSatsified: true,
FindBoundSatsified: true,
AllBound: true,
},
expectAssumeCalled: true,
expectPodBind: &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "machine1"}},
@ -912,9 +910,8 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
{
name: "bound/invalid pv affinity",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
AllBound: true,
FindUnboundSatsified: true,
FindBoundSatsified: false,
AllBound: true,
FindReasons: volumescheduling.ConflictReasons{volumescheduling.ErrReasonNodeConflict},
},
eventReason: "FailedScheduling",
expectError: makePredicateError("1 node(s) had volume node affinity conflict"),
@ -922,8 +919,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
{
name: "unbound/no matches",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindUnboundSatsified: false,
FindBoundSatsified: true,
FindReasons: volumescheduling.ConflictReasons{volumescheduling.ErrReasonBindConflict},
},
eventReason: "FailedScheduling",
expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind"),
@ -931,18 +927,14 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
{
name: "bound and unbound unsatisfied",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindUnboundSatsified: false,
FindBoundSatsified: false,
FindReasons: volumescheduling.ConflictReasons{volumescheduling.ErrReasonBindConflict, volumescheduling.ErrReasonNodeConflict},
},
eventReason: "FailedScheduling",
expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind, 1 node(s) had volume node affinity conflict"),
},
{
name: "unbound/found matches/bind succeeds",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindUnboundSatsified: true,
FindBoundSatsified: true,
},
name: "unbound/found matches/bind succeeds",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{},
expectAssumeCalled: true,
expectBindCalled: true,
expectPodBind: &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "machine1"}},
@ -959,9 +951,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
{
name: "assume error",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindUnboundSatsified: true,
FindBoundSatsified: true,
AssumeErr: assumeErr,
AssumeErr: assumeErr,
},
expectAssumeCalled: true,
eventReason: "FailedScheduling",
@ -970,9 +960,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
{
name: "bind error",
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindUnboundSatsified: true,
FindBoundSatsified: true,
BindErr: bindErr,
BindErr: bindErr,
},
expectAssumeCalled: true,
expectBindCalled: true,