Merge pull request #114098 from kidddddddddddddddddddddd/feat/pv_list

Optimize volumebinding by moving PV list calls to PreFilter
This commit is contained in:
Kubernetes Prow Robot 2022-12-22 10:11:37 -08:00 committed by GitHub
commit 3a5829044c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 257 additions and 72 deletions

View File

@ -147,9 +147,9 @@ type InTreeToCSITranslator interface {
// 2. Once all the assume operations are done in e), the scheduler processes the next Pod in the scheduler queue // 2. Once all the assume operations are done in e), the scheduler processes the next Pod in the scheduler queue
// while the actual binding operation occurs in the background. // while the actual binding operation occurs in the background.
type SchedulerVolumeBinder interface { type SchedulerVolumeBinder interface {
// GetPodVolumes returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning) // GetPodVolumeClaims returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning),
// and unbound with immediate binding (including prebound) // unbound with immediate binding (including prebound) and PVs that belong to storage classes of unbound PVCs with delayed binding.
GetPodVolumes(pod *v1.Pod) (boundClaims, unboundClaimsDelayBinding, unboundClaimsImmediate []*v1.PersistentVolumeClaim, err error) GetPodVolumeClaims(pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error)
// GetEligibleNodes checks the existing bound claims of the pod to determine if the list of nodes can be // GetEligibleNodes checks the existing bound claims of the pod to determine if the list of nodes can be
// potentially reduced down to a subset of eligible nodes based on the bound claims which then can be used // potentially reduced down to a subset of eligible nodes based on the bound claims which then can be used
@ -172,7 +172,7 @@ type SchedulerVolumeBinder interface {
// for volumes that still need to be created. // for volumes that still need to be created.
// //
// This function is called by the scheduler VolumeBinding plugin and can be called in parallel // This function is called by the scheduler VolumeBinding plugin and can be called in parallel
FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) FindPodVolumes(pod *v1.Pod, podVolumeClaims *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error)
// AssumePodVolumes will: // AssumePodVolumes will:
// 1. Take the PV matches for unbound PVCs and update the PV cache assuming // 1. Take the PV matches for unbound PVCs and update the PV cache assuming
@ -199,6 +199,17 @@ type SchedulerVolumeBinder interface {
BindPodVolumes(ctx context.Context, assumedPod *v1.Pod, podVolumes *PodVolumes) error BindPodVolumes(ctx context.Context, assumedPod *v1.Pod, podVolumes *PodVolumes) error
} }
type PodVolumeClaims struct {
// boundClaims are the pod's bound PVCs.
boundClaims []*v1.PersistentVolumeClaim
// unboundClaimsDelayBinding are the pod's unbound with delayed binding (including provisioning) PVCs.
unboundClaimsDelayBinding []*v1.PersistentVolumeClaim
// unboundClaimsImmediate are the pod's unbound with immediate binding PVCs (i.e., supposed to be bound already) .
unboundClaimsImmediate []*v1.PersistentVolumeClaim
// unboundVolumesDelayBinding are PVs that belong to storage classes of the pod's unbound PVCs with delayed binding.
unboundVolumesDelayBinding map[string][]*v1.PersistentVolume
}
type volumeBinder struct { type volumeBinder struct {
kubeClient clientset.Interface kubeClient clientset.Interface
@ -263,7 +274,7 @@ func NewVolumeBinder(
// FindPodVolumes finds the matching PVs for PVCs and nodes to provision PVs // FindPodVolumes finds the matching PVs for PVCs and nodes to provision PVs
// for the given pod and node. If the node does not fit, conflict reasons are // for the given pod and node. If the node does not fit, conflict reasons are
// returned. // returned.
func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) { func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, podVolumeClaims *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) {
podVolumes = &PodVolumes{} podVolumes = &PodVolumes{}
// Warning: Below log needs high verbosity as it can be printed several times (#60933). // Warning: Below log needs high verbosity as it can be printed several times (#60933).
@ -318,22 +329,22 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*
}() }()
// Check PV node affinity on bound volumes // Check PV node affinity on bound volumes
if len(boundClaims) > 0 { if len(podVolumeClaims.boundClaims) > 0 {
boundVolumesSatisfied, boundPVsFound, err = b.checkBoundClaims(boundClaims, node, pod) boundVolumesSatisfied, boundPVsFound, err = b.checkBoundClaims(podVolumeClaims.boundClaims, node, pod)
if err != nil { if err != nil {
return return
} }
} }
// Find matching volumes and node for unbound claims // Find matching volumes and node for unbound claims
if len(claimsToBind) > 0 { if len(podVolumeClaims.unboundClaimsDelayBinding) > 0 {
var ( var (
claimsToFindMatching []*v1.PersistentVolumeClaim claimsToFindMatching []*v1.PersistentVolumeClaim
claimsToProvision []*v1.PersistentVolumeClaim claimsToProvision []*v1.PersistentVolumeClaim
) )
// Filter out claims to provision // Filter out claims to provision
for _, claim := range claimsToBind { for _, claim := range podVolumeClaims.unboundClaimsDelayBinding {
if selectedNode, ok := claim.Annotations[volume.AnnSelectedNode]; ok { if selectedNode, ok := claim.Annotations[volume.AnnSelectedNode]; ok {
if selectedNode != node.Name { if selectedNode != node.Name {
// Fast path, skip unmatched node. // Fast path, skip unmatched node.
@ -349,7 +360,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*
// Find matching volumes // Find matching volumes
if len(claimsToFindMatching) > 0 { if len(claimsToFindMatching) > 0 {
var unboundClaims []*v1.PersistentVolumeClaim var unboundClaims []*v1.PersistentVolumeClaim
unboundVolumesSatisfied, staticBindings, unboundClaims, err = b.findMatchingVolumes(pod, claimsToFindMatching, node) unboundVolumesSatisfied, staticBindings, unboundClaims, err = b.findMatchingVolumes(pod, claimsToFindMatching, podVolumeClaims.unboundVolumesDelayBinding, node)
if err != nil { if err != nil {
return return
} }
@ -804,40 +815,49 @@ func (b *volumeBinder) arePodVolumesBound(pod *v1.Pod) bool {
return true return true
} }
// GetPodVolumes returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning) // GetPodVolumeClaims returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning),
// and unbound with immediate binding (including prebound) // unbound with immediate binding (including prebound) and PVs that belong to storage classes of unbound PVCs with delayed binding.
func (b *volumeBinder) GetPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentVolumeClaim, unboundClaimsDelayBinding []*v1.PersistentVolumeClaim, unboundClaimsImmediate []*v1.PersistentVolumeClaim, err error) { func (b *volumeBinder) GetPodVolumeClaims(pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error) {
boundClaims = []*v1.PersistentVolumeClaim{} podVolumeClaims = &PodVolumeClaims{
unboundClaimsImmediate = []*v1.PersistentVolumeClaim{} boundClaims: []*v1.PersistentVolumeClaim{},
unboundClaimsDelayBinding = []*v1.PersistentVolumeClaim{} unboundClaimsImmediate: []*v1.PersistentVolumeClaim{},
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{},
}
for _, vol := range pod.Spec.Volumes { for _, vol := range pod.Spec.Volumes {
volumeBound, pvc, err := b.isVolumeBound(pod, &vol) volumeBound, pvc, err := b.isVolumeBound(pod, &vol)
if err != nil { if err != nil {
return nil, nil, nil, err return podVolumeClaims, err
} }
if pvc == nil { if pvc == nil {
continue continue
} }
if volumeBound { if volumeBound {
boundClaims = append(boundClaims, pvc) podVolumeClaims.boundClaims = append(podVolumeClaims.boundClaims, pvc)
} else { } else {
delayBindingMode, err := volume.IsDelayBindingMode(pvc, b.classLister) delayBindingMode, err := volume.IsDelayBindingMode(pvc, b.classLister)
if err != nil { if err != nil {
return nil, nil, nil, err return podVolumeClaims, err
} }
// Prebound PVCs are treated as unbound immediate binding // Prebound PVCs are treated as unbound immediate binding
if delayBindingMode && pvc.Spec.VolumeName == "" { if delayBindingMode && pvc.Spec.VolumeName == "" {
// Scheduler path // Scheduler path
unboundClaimsDelayBinding = append(unboundClaimsDelayBinding, pvc) podVolumeClaims.unboundClaimsDelayBinding = append(podVolumeClaims.unboundClaimsDelayBinding, pvc)
} else { } else {
// !delayBindingMode || pvc.Spec.VolumeName != "" // !delayBindingMode || pvc.Spec.VolumeName != ""
// Immediate binding should have already been bound // Immediate binding should have already been bound
unboundClaimsImmediate = append(unboundClaimsImmediate, pvc) podVolumeClaims.unboundClaimsImmediate = append(podVolumeClaims.unboundClaimsImmediate, pvc)
} }
} }
} }
return boundClaims, unboundClaimsDelayBinding, unboundClaimsImmediate, nil
podVolumeClaims.unboundVolumesDelayBinding = map[string][]*v1.PersistentVolume{}
for _, pvc := range podVolumeClaims.unboundClaimsDelayBinding {
// Get storage class name from each PVC
storageClassName := volume.GetPersistentVolumeClaimClass(pvc)
podVolumeClaims.unboundVolumesDelayBinding[storageClassName] = b.pvCache.ListPVs(storageClassName)
}
return podVolumeClaims, nil
} }
func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node *v1.Node, pod *v1.Pod) (bool, bool, error) { func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node *v1.Node, pod *v1.Pod) (bool, bool, error) {
@ -876,7 +896,7 @@ func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node
// findMatchingVolumes tries to find matching volumes for given claims, // findMatchingVolumes tries to find matching volumes for given claims,
// and return unbound claims for further provision. // and return unbound claims for further provision.
func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (foundMatches bool, bindings []*BindingInfo, unboundClaims []*v1.PersistentVolumeClaim, err error) { func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.PersistentVolumeClaim, unboundVolumesDelayBinding map[string][]*v1.PersistentVolume, node *v1.Node) (foundMatches bool, bindings []*BindingInfo, unboundClaims []*v1.PersistentVolumeClaim, err error) {
// Sort all the claims by increasing size request to get the smallest fits // Sort all the claims by increasing size request to get the smallest fits
sort.Sort(byPVCSize(claimsToBind)) sort.Sort(byPVCSize(claimsToBind))
@ -887,10 +907,10 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi
for _, pvc := range claimsToBind { for _, pvc := range claimsToBind {
// Get storage class name from each PVC // Get storage class name from each PVC
storageClassName := volume.GetPersistentVolumeClaimClass(pvc) storageClassName := volume.GetPersistentVolumeClaimClass(pvc)
allPVs := b.pvCache.ListPVs(storageClassName) pvs := unboundVolumesDelayBinding[storageClassName]
// Find a matching PV // Find a matching PV
pv, err := volume.FindMatchingVolume(pvc, allPVs, node, chosenPVs, true) pv, err := volume.FindMatchingVolume(pvc, pvs, node, chosenPVs, true)
if err != nil { if err != nil {
return false, nil, nil, err return false, nil, nil, err
} }

View File

@ -845,14 +845,14 @@ func checkReasons(t *testing.T, actual, expected ConflictReasons) {
// findPodVolumes gets and finds volumes for given pod and node // findPodVolumes gets and finds volumes for given pod and node
func findPodVolumes(binder SchedulerVolumeBinder, pod *v1.Pod, node *v1.Node) (*PodVolumes, ConflictReasons, error) { func findPodVolumes(binder SchedulerVolumeBinder, pod *v1.Pod, node *v1.Node) (*PodVolumes, ConflictReasons, error) {
boundClaims, claimsToBind, unboundClaimsImmediate, err := binder.GetPodVolumes(pod) podVolumeClaims, err := binder.GetPodVolumeClaims(pod)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
if len(unboundClaimsImmediate) > 0 { if len(podVolumeClaims.unboundClaimsImmediate) > 0 {
return nil, nil, fmt.Errorf("pod has unbound immediate PersistentVolumeClaims") return nil, nil, fmt.Errorf("pod has unbound immediate PersistentVolumeClaims")
} }
return binder.FindPodVolumes(pod, boundClaims, claimsToBind, node) return binder.FindPodVolumes(pod, podVolumeClaims, node)
} }
func TestFindPodVolumesWithoutProvisioning(t *testing.T) { func TestFindPodVolumesWithoutProvisioning(t *testing.T) {

View File

@ -49,9 +49,9 @@ type FakeVolumeBinder struct {
var _ SchedulerVolumeBinder = &FakeVolumeBinder{} var _ SchedulerVolumeBinder = &FakeVolumeBinder{}
// GetPodVolumes implements SchedulerVolumeBinder.GetPodVolumes. // GetPodVolumeClaims implements SchedulerVolumeBinder.GetPodVolumes.
func (b *FakeVolumeBinder) GetPodVolumes(pod *v1.Pod) (boundClaims, unboundClaimsDelayBinding, unboundClaimsImmediate []*v1.PersistentVolumeClaim, err error) { func (b *FakeVolumeBinder) GetPodVolumeClaims(pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error) {
return nil, nil, nil, nil return &PodVolumeClaims{}, nil
} }
// GetEligibleNodes implements SchedulerVolumeBinder.GetEligibleNodes. // GetEligibleNodes implements SchedulerVolumeBinder.GetEligibleNodes.
@ -60,7 +60,7 @@ func (b *FakeVolumeBinder) GetEligibleNodes(boundClaims []*v1.PersistentVolumeCl
} }
// FindPodVolumes implements SchedulerVolumeBinder.FindPodVolumes. // FindPodVolumes implements SchedulerVolumeBinder.FindPodVolumes.
func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, _, _ []*v1.PersistentVolumeClaim, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) { func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, _ *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) {
return nil, b.config.FindReasons, b.config.FindErr return nil, b.config.FindReasons, b.config.FindErr
} }

View File

@ -47,14 +47,13 @@ const (
// framework.CycleState, in the later phases we don't need to call Write method // framework.CycleState, in the later phases we don't need to call Write method
// to update the value // to update the value
type stateData struct { type stateData struct {
skip bool // set true if pod does not have PVCs skip bool // set true if pod does not have PVCs
boundClaims []*v1.PersistentVolumeClaim allBound bool
claimsToBind []*v1.PersistentVolumeClaim
allBound bool
// podVolumesByNode holds the pod's volume information found in the Filter // podVolumesByNode holds the pod's volume information found in the Filter
// phase for each node // phase for each node
// it's initialized in the PreFilter phase // it's initialized in the PreFilter phase
podVolumesByNode map[string]*PodVolumes podVolumesByNode map[string]*PodVolumes
podVolumeClaims *PodVolumeClaims
sync.Mutex sync.Mutex
} }
@ -170,11 +169,11 @@ func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleSt
state.Write(stateKey, &stateData{skip: true}) state.Write(stateKey, &stateData{skip: true})
return nil, nil return nil, nil
} }
boundClaims, claimsToBind, unboundClaimsImmediate, err := pl.Binder.GetPodVolumes(pod) podVolumeClaims, err := pl.Binder.GetPodVolumeClaims(pod)
if err != nil { if err != nil {
return nil, framework.AsStatus(err) return nil, framework.AsStatus(err)
} }
if len(unboundClaimsImmediate) > 0 { if len(podVolumeClaims.unboundClaimsImmediate) > 0 {
// Return UnschedulableAndUnresolvable error if immediate claims are // Return UnschedulableAndUnresolvable error if immediate claims are
// not bound. Pod will be moved to active/backoff queues once these // not bound. Pod will be moved to active/backoff queues once these
// claims are bound by PV controller. // claims are bound by PV controller.
@ -184,13 +183,20 @@ func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleSt
} }
// Attempt to reduce down the number of nodes to consider in subsequent scheduling stages if pod has bound claims. // Attempt to reduce down the number of nodes to consider in subsequent scheduling stages if pod has bound claims.
var result *framework.PreFilterResult var result *framework.PreFilterResult
if eligibleNodes := pl.Binder.GetEligibleNodes(boundClaims); eligibleNodes != nil { if eligibleNodes := pl.Binder.GetEligibleNodes(podVolumeClaims.boundClaims); eligibleNodes != nil {
result = &framework.PreFilterResult{ result = &framework.PreFilterResult{
NodeNames: eligibleNodes, NodeNames: eligibleNodes,
} }
} }
state.Write(stateKey, &stateData{boundClaims: boundClaims, claimsToBind: claimsToBind, podVolumesByNode: make(map[string]*PodVolumes)}) state.Write(stateKey, &stateData{
podVolumesByNode: make(map[string]*PodVolumes),
podVolumeClaims: &PodVolumeClaims{
boundClaims: podVolumeClaims.boundClaims,
unboundClaimsDelayBinding: podVolumeClaims.unboundClaimsDelayBinding,
unboundVolumesDelayBinding: podVolumeClaims.unboundVolumesDelayBinding,
},
})
return result, nil return result, nil
} }
@ -241,7 +247,7 @@ func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, p
return nil return nil
} }
podVolumes, reasons, err := pl.Binder.FindPodVolumes(pod, state.boundClaims, state.claimsToBind, node) podVolumes, reasons, err := pl.Binder.FindPodVolumes(pod, state.podVolumeClaims, node)
if err != nil { if err != nil {
return framework.AsStatus(err) return framework.AsStatus(err)

View File

@ -115,10 +115,13 @@ func TestVolumeBinding(t *testing.T) {
makePV("pv-a", waitSC.Name).withPhase(v1.VolumeAvailable).PersistentVolume, makePV("pv-a", waitSC.Name).withPhase(v1.VolumeAvailable).PersistentVolume,
}, },
wantStateAfterPreFilter: &stateData{ wantStateAfterPreFilter: &stateData{
boundClaims: []*v1.PersistentVolumeClaim{ podVolumeClaims: &PodVolumeClaims{
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim, boundClaims: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
},
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{},
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{},
}, },
claimsToBind: []*v1.PersistentVolumeClaim{},
podVolumesByNode: map[string]*PodVolumes{}, podVolumesByNode: map[string]*PodVolumes{},
}, },
wantFilterStatus: []*framework.Status{ wantFilterStatus: []*framework.Status{
@ -150,11 +153,14 @@ func TestVolumeBinding(t *testing.T) {
NodeNames: sets.NewString("node-a"), NodeNames: sets.NewString("node-a"),
}, },
wantStateAfterPreFilter: &stateData{ wantStateAfterPreFilter: &stateData{
boundClaims: []*v1.PersistentVolumeClaim{ podVolumeClaims: &PodVolumeClaims{
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim, boundClaims: []*v1.PersistentVolumeClaim{
makePVC("pvc-b", waitSC.Name).withBoundPV("pv-b").PersistentVolumeClaim, makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
makePVC("pvc-b", waitSC.Name).withBoundPV("pv-b").PersistentVolumeClaim,
},
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{},
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{},
}, },
claimsToBind: []*v1.PersistentVolumeClaim{},
podVolumesByNode: map[string]*PodVolumes{}, podVolumesByNode: map[string]*PodVolumes{},
}, },
wantFilterStatus: []*framework.Status{ wantFilterStatus: []*framework.Status{
@ -223,9 +229,12 @@ func TestVolumeBinding(t *testing.T) {
makePVC("pvc-a", waitSC.Name).PersistentVolumeClaim, makePVC("pvc-a", waitSC.Name).PersistentVolumeClaim,
}, },
wantStateAfterPreFilter: &stateData{ wantStateAfterPreFilter: &stateData{
boundClaims: []*v1.PersistentVolumeClaim{}, podVolumeClaims: &PodVolumeClaims{
claimsToBind: []*v1.PersistentVolumeClaim{ boundClaims: []*v1.PersistentVolumeClaim{},
makePVC("pvc-a", waitSC.Name).PersistentVolumeClaim, unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", waitSC.Name).PersistentVolumeClaim,
},
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{waitSC.Name: {}},
}, },
podVolumesByNode: map[string]*PodVolumes{}, podVolumesByNode: map[string]*PodVolumes{},
}, },
@ -252,11 +261,20 @@ func TestVolumeBinding(t *testing.T) {
withNodeAffinity(map[string][]string{"foo": {"bar"}}).PersistentVolume, withNodeAffinity(map[string][]string{"foo": {"bar"}}).PersistentVolume,
}, },
wantStateAfterPreFilter: &stateData{ wantStateAfterPreFilter: &stateData{
boundClaims: []*v1.PersistentVolumeClaim{ podVolumeClaims: &PodVolumeClaims{
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim, boundClaims: []*v1.PersistentVolumeClaim{
}, makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
claimsToBind: []*v1.PersistentVolumeClaim{ },
makePVC("pvc-b", waitSC.Name).PersistentVolumeClaim, unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
makePVC("pvc-b", waitSC.Name).PersistentVolumeClaim,
},
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{
waitSC.Name: {
makePV("pv-a", waitSC.Name).
withPhase(v1.VolumeAvailable).
withNodeAffinity(map[string][]string{"foo": {"bar"}}).PersistentVolume,
},
},
}, },
podVolumesByNode: map[string]*PodVolumes{}, podVolumesByNode: map[string]*PodVolumes{},
}, },
@ -292,10 +310,13 @@ func TestVolumeBinding(t *testing.T) {
}, },
wantPreFilterStatus: nil, wantPreFilterStatus: nil,
wantStateAfterPreFilter: &stateData{ wantStateAfterPreFilter: &stateData{
boundClaims: []*v1.PersistentVolumeClaim{ podVolumeClaims: &PodVolumeClaims{
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim, boundClaims: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
},
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{},
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{},
}, },
claimsToBind: []*v1.PersistentVolumeClaim{},
podVolumesByNode: map[string]*PodVolumes{}, podVolumesByNode: map[string]*PodVolumes{},
}, },
wantFilterStatus: []*framework.Status{ wantFilterStatus: []*framework.Status{
@ -356,9 +377,31 @@ func TestVolumeBinding(t *testing.T) {
}, },
wantPreFilterStatus: nil, wantPreFilterStatus: nil,
wantStateAfterPreFilter: &stateData{ wantStateAfterPreFilter: &stateData{
boundClaims: []*v1.PersistentVolumeClaim{}, podVolumeClaims: &PodVolumeClaims{
claimsToBind: []*v1.PersistentVolumeClaim{ boundClaims: []*v1.PersistentVolumeClaim{},
makePVC("pvc-a", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim, unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
},
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{
waitSC.Name: {
makePV("pv-a-0", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("200Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
makePV("pv-a-1", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("200Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
makePV("pv-b-0", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
makePV("pv-b-1", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
},
},
}, },
podVolumesByNode: map[string]*PodVolumes{}, podVolumesByNode: map[string]*PodVolumes{},
}, },
@ -424,10 +467,50 @@ func TestVolumeBinding(t *testing.T) {
}, },
wantPreFilterStatus: nil, wantPreFilterStatus: nil,
wantStateAfterPreFilter: &stateData{ wantStateAfterPreFilter: &stateData{
boundClaims: []*v1.PersistentVolumeClaim{}, podVolumeClaims: &PodVolumeClaims{
claimsToBind: []*v1.PersistentVolumeClaim{ boundClaims: []*v1.PersistentVolumeClaim{},
makePVC("pvc-0", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim, unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
makePVC("pvc-1", waitHDDSC.Name).withRequestStorage(resource.MustParse("100Gi")).PersistentVolumeClaim, makePVC("pvc-0", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
makePVC("pvc-1", waitHDDSC.Name).withRequestStorage(resource.MustParse("100Gi")).PersistentVolumeClaim,
},
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{
waitHDDSC.Name: {
makePV("pv-a-2", waitHDDSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("200Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
makePV("pv-a-3", waitHDDSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("200Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
makePV("pv-b-2", waitHDDSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
makePV("pv-b-3", waitHDDSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
},
waitSC.Name: {
makePV("pv-a-0", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("200Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
makePV("pv-a-1", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("200Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
makePV("pv-b-0", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
makePV("pv-b-1", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
},
},
}, },
podVolumesByNode: map[string]*PodVolumes{}, podVolumesByNode: map[string]*PodVolumes{},
}, },
@ -503,9 +586,43 @@ func TestVolumeBinding(t *testing.T) {
}, },
wantPreFilterStatus: nil, wantPreFilterStatus: nil,
wantStateAfterPreFilter: &stateData{ wantStateAfterPreFilter: &stateData{
boundClaims: []*v1.PersistentVolumeClaim{}, podVolumeClaims: &PodVolumeClaims{
claimsToBind: []*v1.PersistentVolumeClaim{ boundClaims: []*v1.PersistentVolumeClaim{},
makePVC("pvc-a", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim, unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
},
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{
waitSC.Name: {
makePV("pv-a-0", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("200Gi")).
withNodeAffinity(map[string][]string{
"topology.kubernetes.io/region": {"region-a"},
"topology.kubernetes.io/zone": {"zone-a"},
}).PersistentVolume,
makePV("pv-a-1", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("200Gi")).
withNodeAffinity(map[string][]string{
"topology.kubernetes.io/region": {"region-a"},
"topology.kubernetes.io/zone": {"zone-a"},
}).PersistentVolume,
makePV("pv-b-0", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{
"topology.kubernetes.io/region": {"region-b"},
"topology.kubernetes.io/zone": {"zone-b"},
}).PersistentVolume,
makePV("pv-b-1", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{
"topology.kubernetes.io/region": {"region-b"},
"topology.kubernetes.io/zone": {"zone-b"},
}).PersistentVolume,
},
},
}, },
podVolumesByNode: map[string]*PodVolumes{}, podVolumesByNode: map[string]*PodVolumes{},
}, },
@ -604,9 +721,44 @@ func TestVolumeBinding(t *testing.T) {
}, },
wantPreFilterStatus: nil, wantPreFilterStatus: nil,
wantStateAfterPreFilter: &stateData{ wantStateAfterPreFilter: &stateData{
boundClaims: []*v1.PersistentVolumeClaim{}, podVolumeClaims: &PodVolumeClaims{
claimsToBind: []*v1.PersistentVolumeClaim{ boundClaims: []*v1.PersistentVolumeClaim{},
makePVC("pvc-a", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim, unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
},
unboundClaimsImmediate: nil,
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{
waitSC.Name: {
makePV("pv-a-0", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("200Gi")).
withNodeAffinity(map[string][]string{
"topology.kubernetes.io/region": {"region-a"},
"topology.kubernetes.io/zone": {"zone-a"},
}).PersistentVolume,
makePV("pv-a-1", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("200Gi")).
withNodeAffinity(map[string][]string{
"topology.kubernetes.io/region": {"region-a"},
"topology.kubernetes.io/zone": {"zone-a"},
}).PersistentVolume,
makePV("pv-b-0", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{
"topology.kubernetes.io/region": {"region-b"},
"topology.kubernetes.io/zone": {"zone-b"},
}).PersistentVolume,
makePV("pv-b-1", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{
"topology.kubernetes.io/region": {"region-b"},
"topology.kubernetes.io/zone": {"zone-b"},
}).PersistentVolume,
},
},
}, },
podVolumesByNode: map[string]*PodVolumes{}, podVolumesByNode: map[string]*PodVolumes{},
}, },
@ -708,7 +860,14 @@ func TestVolumeBinding(t *testing.T) {
} }
stateCmpOpts := []cmp.Option{ stateCmpOpts := []cmp.Option{
cmp.AllowUnexported(stateData{}), cmp.AllowUnexported(stateData{}),
cmp.AllowUnexported(PodVolumeClaims{}),
cmpopts.IgnoreFields(stateData{}, "Mutex"), cmpopts.IgnoreFields(stateData{}, "Mutex"),
cmpopts.SortSlices(func(a *v1.PersistentVolume, b *v1.PersistentVolume) bool {
return a.Name < b.Name
}),
cmpopts.SortSlices(func(a v1.NodeSelectorRequirement, b v1.NodeSelectorRequirement) bool {
return a.Key < b.Key
}),
} }
if diff := cmp.Diff(item.wantStateAfterPreFilter, got, stateCmpOpts...); diff != "" { if diff := cmp.Diff(item.wantStateAfterPreFilter, got, stateCmpOpts...); diff != "" {
t.Errorf("state got after prefilter does not match (-want,+got):\n%s", diff) t.Errorf("state got after prefilter does not match (-want,+got):\n%s", diff)