Merge pull request #114098 from kidddddddddddddddddddddd/feat/pv_list

Optimize volumebinding by moving PV list calls to PreFilter
This commit is contained in:
Kubernetes Prow Robot 2022-12-22 10:11:37 -08:00 committed by GitHub
commit 3a5829044c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 257 additions and 72 deletions

View File

@ -147,9 +147,9 @@ type InTreeToCSITranslator interface {
// 2. Once all the assume operations are done in e), the scheduler processes the next Pod in the scheduler queue
// while the actual binding operation occurs in the background.
type SchedulerVolumeBinder interface {
// GetPodVolumes returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning)
// and unbound with immediate binding (including prebound)
GetPodVolumes(pod *v1.Pod) (boundClaims, unboundClaimsDelayBinding, unboundClaimsImmediate []*v1.PersistentVolumeClaim, err error)
// GetPodVolumeClaims returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning),
// unbound with immediate binding (including prebound) and PVs that belong to storage classes of unbound PVCs with delayed binding.
GetPodVolumeClaims(pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error)
// GetEligibleNodes checks the existing bound claims of the pod to determine if the list of nodes can be
// potentially reduced down to a subset of eligible nodes based on the bound claims which then can be used
@ -172,7 +172,7 @@ type SchedulerVolumeBinder interface {
// for volumes that still need to be created.
//
// This function is called by the scheduler VolumeBinding plugin and can be called in parallel
FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error)
FindPodVolumes(pod *v1.Pod, podVolumeClaims *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error)
// AssumePodVolumes will:
// 1. Take the PV matches for unbound PVCs and update the PV cache assuming
@ -199,6 +199,17 @@ type SchedulerVolumeBinder interface {
BindPodVolumes(ctx context.Context, assumedPod *v1.Pod, podVolumes *PodVolumes) error
}
type PodVolumeClaims struct {
// boundClaims are the pod's bound PVCs.
boundClaims []*v1.PersistentVolumeClaim
// unboundClaimsDelayBinding are the pod's unbound with delayed binding (including provisioning) PVCs.
unboundClaimsDelayBinding []*v1.PersistentVolumeClaim
// unboundClaimsImmediate are the pod's unbound with immediate binding PVCs (i.e., supposed to be bound already) .
unboundClaimsImmediate []*v1.PersistentVolumeClaim
// unboundVolumesDelayBinding are PVs that belong to storage classes of the pod's unbound PVCs with delayed binding.
unboundVolumesDelayBinding map[string][]*v1.PersistentVolume
}
type volumeBinder struct {
kubeClient clientset.Interface
@ -263,7 +274,7 @@ func NewVolumeBinder(
// FindPodVolumes finds the matching PVs for PVCs and nodes to provision PVs
// for the given pod and node. If the node does not fit, conflict reasons are
// returned.
func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) {
func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, podVolumeClaims *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) {
podVolumes = &PodVolumes{}
// Warning: Below log needs high verbosity as it can be printed several times (#60933).
@ -318,22 +329,22 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*
}()
// Check PV node affinity on bound volumes
if len(boundClaims) > 0 {
boundVolumesSatisfied, boundPVsFound, err = b.checkBoundClaims(boundClaims, node, pod)
if len(podVolumeClaims.boundClaims) > 0 {
boundVolumesSatisfied, boundPVsFound, err = b.checkBoundClaims(podVolumeClaims.boundClaims, node, pod)
if err != nil {
return
}
}
// Find matching volumes and node for unbound claims
if len(claimsToBind) > 0 {
if len(podVolumeClaims.unboundClaimsDelayBinding) > 0 {
var (
claimsToFindMatching []*v1.PersistentVolumeClaim
claimsToProvision []*v1.PersistentVolumeClaim
)
// Filter out claims to provision
for _, claim := range claimsToBind {
for _, claim := range podVolumeClaims.unboundClaimsDelayBinding {
if selectedNode, ok := claim.Annotations[volume.AnnSelectedNode]; ok {
if selectedNode != node.Name {
// Fast path, skip unmatched node.
@ -349,7 +360,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*
// Find matching volumes
if len(claimsToFindMatching) > 0 {
var unboundClaims []*v1.PersistentVolumeClaim
unboundVolumesSatisfied, staticBindings, unboundClaims, err = b.findMatchingVolumes(pod, claimsToFindMatching, node)
unboundVolumesSatisfied, staticBindings, unboundClaims, err = b.findMatchingVolumes(pod, claimsToFindMatching, podVolumeClaims.unboundVolumesDelayBinding, node)
if err != nil {
return
}
@ -804,40 +815,49 @@ func (b *volumeBinder) arePodVolumesBound(pod *v1.Pod) bool {
return true
}
// GetPodVolumes returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning)
// and unbound with immediate binding (including prebound)
func (b *volumeBinder) GetPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentVolumeClaim, unboundClaimsDelayBinding []*v1.PersistentVolumeClaim, unboundClaimsImmediate []*v1.PersistentVolumeClaim, err error) {
boundClaims = []*v1.PersistentVolumeClaim{}
unboundClaimsImmediate = []*v1.PersistentVolumeClaim{}
unboundClaimsDelayBinding = []*v1.PersistentVolumeClaim{}
// GetPodVolumeClaims returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning),
// unbound with immediate binding (including prebound) and PVs that belong to storage classes of unbound PVCs with delayed binding.
func (b *volumeBinder) GetPodVolumeClaims(pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error) {
podVolumeClaims = &PodVolumeClaims{
boundClaims: []*v1.PersistentVolumeClaim{},
unboundClaimsImmediate: []*v1.PersistentVolumeClaim{},
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{},
}
for _, vol := range pod.Spec.Volumes {
volumeBound, pvc, err := b.isVolumeBound(pod, &vol)
if err != nil {
return nil, nil, nil, err
return podVolumeClaims, err
}
if pvc == nil {
continue
}
if volumeBound {
boundClaims = append(boundClaims, pvc)
podVolumeClaims.boundClaims = append(podVolumeClaims.boundClaims, pvc)
} else {
delayBindingMode, err := volume.IsDelayBindingMode(pvc, b.classLister)
if err != nil {
return nil, nil, nil, err
return podVolumeClaims, err
}
// Prebound PVCs are treated as unbound immediate binding
if delayBindingMode && pvc.Spec.VolumeName == "" {
// Scheduler path
unboundClaimsDelayBinding = append(unboundClaimsDelayBinding, pvc)
podVolumeClaims.unboundClaimsDelayBinding = append(podVolumeClaims.unboundClaimsDelayBinding, pvc)
} else {
// !delayBindingMode || pvc.Spec.VolumeName != ""
// Immediate binding should have already been bound
unboundClaimsImmediate = append(unboundClaimsImmediate, pvc)
podVolumeClaims.unboundClaimsImmediate = append(podVolumeClaims.unboundClaimsImmediate, pvc)
}
}
}
return boundClaims, unboundClaimsDelayBinding, unboundClaimsImmediate, nil
podVolumeClaims.unboundVolumesDelayBinding = map[string][]*v1.PersistentVolume{}
for _, pvc := range podVolumeClaims.unboundClaimsDelayBinding {
// Get storage class name from each PVC
storageClassName := volume.GetPersistentVolumeClaimClass(pvc)
podVolumeClaims.unboundVolumesDelayBinding[storageClassName] = b.pvCache.ListPVs(storageClassName)
}
return podVolumeClaims, nil
}
func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node *v1.Node, pod *v1.Pod) (bool, bool, error) {
@ -876,7 +896,7 @@ func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node
// findMatchingVolumes tries to find matching volumes for given claims,
// and return unbound claims for further provision.
func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (foundMatches bool, bindings []*BindingInfo, unboundClaims []*v1.PersistentVolumeClaim, err error) {
func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.PersistentVolumeClaim, unboundVolumesDelayBinding map[string][]*v1.PersistentVolume, node *v1.Node) (foundMatches bool, bindings []*BindingInfo, unboundClaims []*v1.PersistentVolumeClaim, err error) {
// Sort all the claims by increasing size request to get the smallest fits
sort.Sort(byPVCSize(claimsToBind))
@ -887,10 +907,10 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi
for _, pvc := range claimsToBind {
// Get storage class name from each PVC
storageClassName := volume.GetPersistentVolumeClaimClass(pvc)
allPVs := b.pvCache.ListPVs(storageClassName)
pvs := unboundVolumesDelayBinding[storageClassName]
// Find a matching PV
pv, err := volume.FindMatchingVolume(pvc, allPVs, node, chosenPVs, true)
pv, err := volume.FindMatchingVolume(pvc, pvs, node, chosenPVs, true)
if err != nil {
return false, nil, nil, err
}

View File

@ -845,14 +845,14 @@ func checkReasons(t *testing.T, actual, expected ConflictReasons) {
// findPodVolumes gets and finds volumes for given pod and node
func findPodVolumes(binder SchedulerVolumeBinder, pod *v1.Pod, node *v1.Node) (*PodVolumes, ConflictReasons, error) {
boundClaims, claimsToBind, unboundClaimsImmediate, err := binder.GetPodVolumes(pod)
podVolumeClaims, err := binder.GetPodVolumeClaims(pod)
if err != nil {
return nil, nil, err
}
if len(unboundClaimsImmediate) > 0 {
if len(podVolumeClaims.unboundClaimsImmediate) > 0 {
return nil, nil, fmt.Errorf("pod has unbound immediate PersistentVolumeClaims")
}
return binder.FindPodVolumes(pod, boundClaims, claimsToBind, node)
return binder.FindPodVolumes(pod, podVolumeClaims, node)
}
func TestFindPodVolumesWithoutProvisioning(t *testing.T) {

View File

@ -49,9 +49,9 @@ type FakeVolumeBinder struct {
var _ SchedulerVolumeBinder = &FakeVolumeBinder{}
// GetPodVolumes implements SchedulerVolumeBinder.GetPodVolumes.
func (b *FakeVolumeBinder) GetPodVolumes(pod *v1.Pod) (boundClaims, unboundClaimsDelayBinding, unboundClaimsImmediate []*v1.PersistentVolumeClaim, err error) {
return nil, nil, nil, nil
// GetPodVolumeClaims implements SchedulerVolumeBinder.GetPodVolumes.
func (b *FakeVolumeBinder) GetPodVolumeClaims(pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error) {
return &PodVolumeClaims{}, nil
}
// GetEligibleNodes implements SchedulerVolumeBinder.GetEligibleNodes.
@ -60,7 +60,7 @@ func (b *FakeVolumeBinder) GetEligibleNodes(boundClaims []*v1.PersistentVolumeCl
}
// FindPodVolumes implements SchedulerVolumeBinder.FindPodVolumes.
func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, _, _ []*v1.PersistentVolumeClaim, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) {
func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, _ *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) {
return nil, b.config.FindReasons, b.config.FindErr
}

View File

@ -48,13 +48,12 @@ const (
// to update the value
type stateData struct {
skip bool // set true if pod does not have PVCs
boundClaims []*v1.PersistentVolumeClaim
claimsToBind []*v1.PersistentVolumeClaim
allBound bool
// podVolumesByNode holds the pod's volume information found in the Filter
// phase for each node
// it's initialized in the PreFilter phase
podVolumesByNode map[string]*PodVolumes
podVolumeClaims *PodVolumeClaims
sync.Mutex
}
@ -170,11 +169,11 @@ func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleSt
state.Write(stateKey, &stateData{skip: true})
return nil, nil
}
boundClaims, claimsToBind, unboundClaimsImmediate, err := pl.Binder.GetPodVolumes(pod)
podVolumeClaims, err := pl.Binder.GetPodVolumeClaims(pod)
if err != nil {
return nil, framework.AsStatus(err)
}
if len(unboundClaimsImmediate) > 0 {
if len(podVolumeClaims.unboundClaimsImmediate) > 0 {
// Return UnschedulableAndUnresolvable error if immediate claims are
// not bound. Pod will be moved to active/backoff queues once these
// claims are bound by PV controller.
@ -184,13 +183,20 @@ func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleSt
}
// Attempt to reduce down the number of nodes to consider in subsequent scheduling stages if pod has bound claims.
var result *framework.PreFilterResult
if eligibleNodes := pl.Binder.GetEligibleNodes(boundClaims); eligibleNodes != nil {
if eligibleNodes := pl.Binder.GetEligibleNodes(podVolumeClaims.boundClaims); eligibleNodes != nil {
result = &framework.PreFilterResult{
NodeNames: eligibleNodes,
}
}
state.Write(stateKey, &stateData{boundClaims: boundClaims, claimsToBind: claimsToBind, podVolumesByNode: make(map[string]*PodVolumes)})
state.Write(stateKey, &stateData{
podVolumesByNode: make(map[string]*PodVolumes),
podVolumeClaims: &PodVolumeClaims{
boundClaims: podVolumeClaims.boundClaims,
unboundClaimsDelayBinding: podVolumeClaims.unboundClaimsDelayBinding,
unboundVolumesDelayBinding: podVolumeClaims.unboundVolumesDelayBinding,
},
})
return result, nil
}
@ -241,7 +247,7 @@ func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, p
return nil
}
podVolumes, reasons, err := pl.Binder.FindPodVolumes(pod, state.boundClaims, state.claimsToBind, node)
podVolumes, reasons, err := pl.Binder.FindPodVolumes(pod, state.podVolumeClaims, node)
if err != nil {
return framework.AsStatus(err)

View File

@ -115,10 +115,13 @@ func TestVolumeBinding(t *testing.T) {
makePV("pv-a", waitSC.Name).withPhase(v1.VolumeAvailable).PersistentVolume,
},
wantStateAfterPreFilter: &stateData{
podVolumeClaims: &PodVolumeClaims{
boundClaims: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
},
claimsToBind: []*v1.PersistentVolumeClaim{},
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{},
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{},
},
podVolumesByNode: map[string]*PodVolumes{},
},
wantFilterStatus: []*framework.Status{
@ -150,11 +153,14 @@ func TestVolumeBinding(t *testing.T) {
NodeNames: sets.NewString("node-a"),
},
wantStateAfterPreFilter: &stateData{
podVolumeClaims: &PodVolumeClaims{
boundClaims: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
makePVC("pvc-b", waitSC.Name).withBoundPV("pv-b").PersistentVolumeClaim,
},
claimsToBind: []*v1.PersistentVolumeClaim{},
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{},
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{},
},
podVolumesByNode: map[string]*PodVolumes{},
},
wantFilterStatus: []*framework.Status{
@ -223,10 +229,13 @@ func TestVolumeBinding(t *testing.T) {
makePVC("pvc-a", waitSC.Name).PersistentVolumeClaim,
},
wantStateAfterPreFilter: &stateData{
podVolumeClaims: &PodVolumeClaims{
boundClaims: []*v1.PersistentVolumeClaim{},
claimsToBind: []*v1.PersistentVolumeClaim{
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", waitSC.Name).PersistentVolumeClaim,
},
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{waitSC.Name: {}},
},
podVolumesByNode: map[string]*PodVolumes{},
},
wantFilterStatus: []*framework.Status{
@ -252,12 +261,21 @@ func TestVolumeBinding(t *testing.T) {
withNodeAffinity(map[string][]string{"foo": {"bar"}}).PersistentVolume,
},
wantStateAfterPreFilter: &stateData{
podVolumeClaims: &PodVolumeClaims{
boundClaims: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
},
claimsToBind: []*v1.PersistentVolumeClaim{
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
makePVC("pvc-b", waitSC.Name).PersistentVolumeClaim,
},
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{
waitSC.Name: {
makePV("pv-a", waitSC.Name).
withPhase(v1.VolumeAvailable).
withNodeAffinity(map[string][]string{"foo": {"bar"}}).PersistentVolume,
},
},
},
podVolumesByNode: map[string]*PodVolumes{},
},
wantFilterStatus: []*framework.Status{
@ -292,10 +310,13 @@ func TestVolumeBinding(t *testing.T) {
},
wantPreFilterStatus: nil,
wantStateAfterPreFilter: &stateData{
podVolumeClaims: &PodVolumeClaims{
boundClaims: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
},
claimsToBind: []*v1.PersistentVolumeClaim{},
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{},
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{},
},
podVolumesByNode: map[string]*PodVolumes{},
},
wantFilterStatus: []*framework.Status{
@ -356,10 +377,32 @@ func TestVolumeBinding(t *testing.T) {
},
wantPreFilterStatus: nil,
wantStateAfterPreFilter: &stateData{
podVolumeClaims: &PodVolumeClaims{
boundClaims: []*v1.PersistentVolumeClaim{},
claimsToBind: []*v1.PersistentVolumeClaim{
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
},
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{
waitSC.Name: {
makePV("pv-a-0", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("200Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
makePV("pv-a-1", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("200Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
makePV("pv-b-0", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
makePV("pv-b-1", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
},
},
},
podVolumesByNode: map[string]*PodVolumes{},
},
wantFilterStatus: []*framework.Status{
@ -424,11 +467,51 @@ func TestVolumeBinding(t *testing.T) {
},
wantPreFilterStatus: nil,
wantStateAfterPreFilter: &stateData{
podVolumeClaims: &PodVolumeClaims{
boundClaims: []*v1.PersistentVolumeClaim{},
claimsToBind: []*v1.PersistentVolumeClaim{
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
makePVC("pvc-0", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
makePVC("pvc-1", waitHDDSC.Name).withRequestStorage(resource.MustParse("100Gi")).PersistentVolumeClaim,
},
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{
waitHDDSC.Name: {
makePV("pv-a-2", waitHDDSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("200Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
makePV("pv-a-3", waitHDDSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("200Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
makePV("pv-b-2", waitHDDSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
makePV("pv-b-3", waitHDDSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
},
waitSC.Name: {
makePV("pv-a-0", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("200Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
makePV("pv-a-1", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("200Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
makePV("pv-b-0", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
makePV("pv-b-1", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
},
},
},
podVolumesByNode: map[string]*PodVolumes{},
},
wantFilterStatus: []*framework.Status{
@ -503,10 +586,44 @@ func TestVolumeBinding(t *testing.T) {
},
wantPreFilterStatus: nil,
wantStateAfterPreFilter: &stateData{
podVolumeClaims: &PodVolumeClaims{
boundClaims: []*v1.PersistentVolumeClaim{},
claimsToBind: []*v1.PersistentVolumeClaim{
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
},
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{
waitSC.Name: {
makePV("pv-a-0", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("200Gi")).
withNodeAffinity(map[string][]string{
"topology.kubernetes.io/region": {"region-a"},
"topology.kubernetes.io/zone": {"zone-a"},
}).PersistentVolume,
makePV("pv-a-1", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("200Gi")).
withNodeAffinity(map[string][]string{
"topology.kubernetes.io/region": {"region-a"},
"topology.kubernetes.io/zone": {"zone-a"},
}).PersistentVolume,
makePV("pv-b-0", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{
"topology.kubernetes.io/region": {"region-b"},
"topology.kubernetes.io/zone": {"zone-b"},
}).PersistentVolume,
makePV("pv-b-1", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{
"topology.kubernetes.io/region": {"region-b"},
"topology.kubernetes.io/zone": {"zone-b"},
}).PersistentVolume,
},
},
},
podVolumesByNode: map[string]*PodVolumes{},
},
wantFilterStatus: []*framework.Status{
@ -604,10 +721,45 @@ func TestVolumeBinding(t *testing.T) {
},
wantPreFilterStatus: nil,
wantStateAfterPreFilter: &stateData{
podVolumeClaims: &PodVolumeClaims{
boundClaims: []*v1.PersistentVolumeClaim{},
claimsToBind: []*v1.PersistentVolumeClaim{
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
},
unboundClaimsImmediate: nil,
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{
waitSC.Name: {
makePV("pv-a-0", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("200Gi")).
withNodeAffinity(map[string][]string{
"topology.kubernetes.io/region": {"region-a"},
"topology.kubernetes.io/zone": {"zone-a"},
}).PersistentVolume,
makePV("pv-a-1", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("200Gi")).
withNodeAffinity(map[string][]string{
"topology.kubernetes.io/region": {"region-a"},
"topology.kubernetes.io/zone": {"zone-a"},
}).PersistentVolume,
makePV("pv-b-0", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{
"topology.kubernetes.io/region": {"region-b"},
"topology.kubernetes.io/zone": {"zone-b"},
}).PersistentVolume,
makePV("pv-b-1", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{
"topology.kubernetes.io/region": {"region-b"},
"topology.kubernetes.io/zone": {"zone-b"},
}).PersistentVolume,
},
},
},
podVolumesByNode: map[string]*PodVolumes{},
},
wantFilterStatus: []*framework.Status{
@ -708,7 +860,14 @@ func TestVolumeBinding(t *testing.T) {
}
stateCmpOpts := []cmp.Option{
cmp.AllowUnexported(stateData{}),
cmp.AllowUnexported(PodVolumeClaims{}),
cmpopts.IgnoreFields(stateData{}, "Mutex"),
cmpopts.SortSlices(func(a *v1.PersistentVolume, b *v1.PersistentVolume) bool {
return a.Name < b.Name
}),
cmpopts.SortSlices(func(a v1.NodeSelectorRequirement, b v1.NodeSelectorRequirement) bool {
return a.Key < b.Key
}),
}
if diff := cmp.Diff(item.wantStateAfterPreFilter, got, stateCmpOpts...); diff != "" {
t.Errorf("state got after prefilter does not match (-want,+got):\n%s", diff)