mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 12:43:23 +00:00
Merge pull request #109877 from yibozhuang/local-pv-prefilter-result
scheduler volumebinding: leverage PreFilterResult for bound local PVs
This commit is contained in:
commit
c2b5457dfa
@ -45,6 +45,7 @@ import (
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/metrics"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// ConflictReason is used for the special strings which explain why
|
||||
@ -126,6 +127,8 @@ type InTreeToCSITranslator interface {
|
||||
// 1. The scheduler takes a Pod off the scheduler queue and processes it serially:
|
||||
// a. Invokes all pre-filter plugins for the pod. GetPodVolumes() is invoked
|
||||
// here, pod volume information will be saved in current scheduling cycle state for later use.
|
||||
// If pod has bound immediate PVCs, GetEligibleNodes() is invoked to potentially reduce
|
||||
// down the list of eligible nodes based on the bound PV's NodeAffinity (if any).
|
||||
// b. Invokes all filter plugins, parallelized across nodes. FindPodVolumes() is invoked here.
|
||||
// c. Invokes all score plugins. Future/TBD
|
||||
// d. Selects the best node for the Pod.
|
||||
@ -148,6 +151,14 @@ type SchedulerVolumeBinder interface {
|
||||
// and unbound with immediate binding (including prebound)
|
||||
GetPodVolumes(pod *v1.Pod) (boundClaims, unboundClaimsDelayBinding, unboundClaimsImmediate []*v1.PersistentVolumeClaim, err error)
|
||||
|
||||
// GetEligibleNodes checks the existing bound claims of the pod to determine if the list of nodes can be
|
||||
// potentially reduced down to a subset of eligible nodes based on the bound claims which then can be used
|
||||
// in subsequent scheduling stages.
|
||||
//
|
||||
// If eligibleNodes is 'nil', then it indicates that such eligible node reduction cannot be made
|
||||
// and all nodes should be considered.
|
||||
GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.String)
|
||||
|
||||
// FindPodVolumes checks if all of a Pod's PVCs can be satisfied by the
|
||||
// node and returns pod's volumes information.
|
||||
//
|
||||
@ -208,6 +219,8 @@ type volumeBinder struct {
|
||||
csiStorageCapacityLister storagelisters.CSIStorageCapacityLister
|
||||
}
|
||||
|
||||
var _ SchedulerVolumeBinder = &volumeBinder{}
|
||||
|
||||
// CapacityCheck contains additional parameters for NewVolumeBinder that
|
||||
// are only needed when checking volume sizes against available storage
|
||||
// capacity is desired.
|
||||
@ -248,7 +261,7 @@ func NewVolumeBinder(
|
||||
}
|
||||
|
||||
// FindPodVolumes finds the matching PVs for PVCs and nodes to provision PVs
|
||||
// for the given pod and node. If the node does not fit, confilict reasons are
|
||||
// for the given pod and node. If the node does not fit, conflict reasons are
|
||||
// returned.
|
||||
func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) {
|
||||
podVolumes = &PodVolumes{}
|
||||
@ -356,6 +369,55 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*
|
||||
return
|
||||
}
|
||||
|
||||
// GetEligibleNodes checks the existing bound claims of the pod to determine if the list of nodes can be
|
||||
// potentially reduced down to a subset of eligible nodes based on the bound claims which then can be used
|
||||
// in subsequent scheduling stages.
|
||||
//
|
||||
// Returning 'nil' for eligibleNodes indicates that such eligible node reduction cannot be made and all nodes
|
||||
// should be considered.
|
||||
func (b *volumeBinder) GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.String) {
|
||||
if len(boundClaims) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var errs []error
|
||||
for _, pvc := range boundClaims {
|
||||
pvName := pvc.Spec.VolumeName
|
||||
pv, err := b.pvCache.GetPV(pvName)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// if the PersistentVolume is local and has node affinity matching specific node(s),
|
||||
// add them to the eligible nodes
|
||||
nodeNames := util.GetLocalPersistentVolumeNodeNames(pv)
|
||||
if len(nodeNames) != 0 {
|
||||
// on the first found list of eligible nodes for the local PersistentVolume,
|
||||
// insert to the eligible node set.
|
||||
if eligibleNodes == nil {
|
||||
eligibleNodes = sets.NewString(nodeNames...)
|
||||
} else {
|
||||
// for subsequent finding of eligible nodes for the local PersistentVolume,
|
||||
// take the intersection of the nodes with the existing eligible nodes
|
||||
// for cases if PV1 has node affinity to node1 and PV2 has node affinity to node2,
|
||||
// then the eligible node list should be empty.
|
||||
eligibleNodes = eligibleNodes.Intersection(sets.NewString(nodeNames...))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
klog.V(4).InfoS("GetEligibleNodes: one or more error occurred finding eligible nodes", "error", errs)
|
||||
return nil
|
||||
}
|
||||
|
||||
if eligibleNodes != nil {
|
||||
klog.V(4).InfoS("GetEligibleNodes: reduced down eligible nodes", "nodes", eligibleNodes)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AssumePodVolumes will take the matching PVs and PVCs to provision in pod's
|
||||
// volume information for the chosen node, and:
|
||||
// 1. Update the pvCache with the new prebound PV.
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
@ -31,6 +32,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/informers"
|
||||
@ -58,6 +60,9 @@ var (
|
||||
boundPVCNode1a = makeTestPVC("unbound-pvc", "1G", "", pvcBound, "pv-node1a", "1", &waitClass)
|
||||
immediateUnboundPVC = makeTestPVC("immediate-unbound-pvc", "1G", "", pvcUnbound, "", "1", &immediateClass)
|
||||
immediateBoundPVC = makeTestPVC("immediate-bound-pvc", "1G", "", pvcBound, "pv-bound-immediate", "1", &immediateClass)
|
||||
localPreboundPVC1a = makeTestPVC("local-prebound-pvc-1a", "1G", "", pvcPrebound, "local-pv-node1a", "1", &waitClass)
|
||||
localPreboundPVC1b = makeTestPVC("local-prebound-pvc-1b", "1G", "", pvcPrebound, "local-pv-node1b", "1", &waitClass)
|
||||
localPreboundPVC2a = makeTestPVC("local-prebound-pvc-2a", "1G", "", pvcPrebound, "local-pv-node2a", "1", &waitClass)
|
||||
|
||||
// PVCs for dynamic provisioning
|
||||
provisionedPVC = makeTestPVC("provisioned-pvc", "1Gi", "", pvcUnbound, "", "1", &waitClassWithProvisioner)
|
||||
@ -89,6 +94,9 @@ var (
|
||||
pvNode1bBoundHigherVersion = makeTestPV("pv-node1b", "node1", "10G", "2", unboundPVC2, waitClass)
|
||||
pvBoundImmediate = makeTestPV("pv-bound-immediate", "node1", "1G", "1", immediateBoundPVC, immediateClass)
|
||||
pvBoundImmediateNode2 = makeTestPV("pv-bound-immediate", "node2", "1G", "1", immediateBoundPVC, immediateClass)
|
||||
localPVNode1a = makeLocalPV("local-pv-node1a", "node1", "5G", "1", nil, waitClass)
|
||||
localPVNode1b = makeLocalPV("local-pv-node1b", "node1", "10G", "1", nil, waitClass)
|
||||
localPVNode2a = makeLocalPV("local-pv-node2a", "node2", "5G", "1", nil, waitClass)
|
||||
|
||||
// PVs for CSI migration
|
||||
migrationPVBound = makeTestPVForCSIMigration(zone1Labels, boundMigrationPVC, true)
|
||||
@ -718,6 +726,12 @@ func makeTestPVForCSIMigration(labels map[string]string, pvc *v1.PersistentVolum
|
||||
return pv
|
||||
}
|
||||
|
||||
func makeLocalPV(name, node, capacity, version string, boundToPVC *v1.PersistentVolumeClaim, className string) *v1.PersistentVolume {
|
||||
pv := makeTestPV(name, node, capacity, version, boundToPVC, className)
|
||||
pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Key = v1.LabelHostname
|
||||
return pv
|
||||
}
|
||||
|
||||
func pvcSetSelectedNode(pvc *v1.PersistentVolumeClaim, node string) *v1.PersistentVolumeClaim {
|
||||
newPVC := pvc.DeepCopy()
|
||||
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, volume.AnnSelectedNode, node)
|
||||
@ -2318,3 +2332,129 @@ func TestCapacity(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetEligibleNodes(t *testing.T) {
|
||||
type scenarioType struct {
|
||||
// Inputs
|
||||
pvcs []*v1.PersistentVolumeClaim
|
||||
pvs []*v1.PersistentVolume
|
||||
nodes []*v1.Node
|
||||
|
||||
// Expected return values
|
||||
eligibleNodes sets.String
|
||||
}
|
||||
|
||||
scenarios := map[string]scenarioType{
|
||||
"no-bound-claims": {},
|
||||
"no-nodes-found": {
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
preboundPVC,
|
||||
preboundPVCNode1a,
|
||||
},
|
||||
},
|
||||
"pv-not-found": {
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
preboundPVC,
|
||||
preboundPVCNode1a,
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
node1,
|
||||
},
|
||||
},
|
||||
"node-affinity-mismatch": {
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
preboundPVC,
|
||||
preboundPVCNode1a,
|
||||
},
|
||||
pvs: []*v1.PersistentVolume{
|
||||
pvNode1a,
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
node1,
|
||||
node2,
|
||||
},
|
||||
},
|
||||
"local-pv-with-node-affinity": {
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
localPreboundPVC1a,
|
||||
localPreboundPVC1b,
|
||||
},
|
||||
pvs: []*v1.PersistentVolume{
|
||||
localPVNode1a,
|
||||
localPVNode1b,
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
node1,
|
||||
node2,
|
||||
},
|
||||
eligibleNodes: sets.NewString("node1"),
|
||||
},
|
||||
"multi-local-pv-with-different-nodes": {
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
localPreboundPVC1a,
|
||||
localPreboundPVC1b,
|
||||
localPreboundPVC2a,
|
||||
},
|
||||
pvs: []*v1.PersistentVolume{
|
||||
localPVNode1a,
|
||||
localPVNode1b,
|
||||
localPVNode2a,
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
node1,
|
||||
node2,
|
||||
},
|
||||
eligibleNodes: sets.NewString(),
|
||||
},
|
||||
"local-and-non-local-pv": {
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
localPreboundPVC1a,
|
||||
localPreboundPVC1b,
|
||||
preboundPVC,
|
||||
immediateBoundPVC,
|
||||
},
|
||||
pvs: []*v1.PersistentVolume{
|
||||
localPVNode1a,
|
||||
localPVNode1b,
|
||||
pvNode1a,
|
||||
pvBoundImmediate,
|
||||
pvBoundImmediateNode2,
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
node1,
|
||||
node2,
|
||||
},
|
||||
eligibleNodes: sets.NewString("node1"),
|
||||
},
|
||||
}
|
||||
|
||||
run := func(t *testing.T, scenario scenarioType) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Setup
|
||||
testEnv := newTestBinder(t, ctx.Done())
|
||||
testEnv.initVolumes(scenario.pvs, scenario.pvs)
|
||||
|
||||
testEnv.initNodes(scenario.nodes)
|
||||
testEnv.initClaims(scenario.pvcs, scenario.pvcs)
|
||||
|
||||
// Execute
|
||||
eligibleNodes := testEnv.binder.GetEligibleNodes(scenario.pvcs)
|
||||
|
||||
// Validate
|
||||
if reflect.DeepEqual(scenario.eligibleNodes, eligibleNodes) {
|
||||
fmt.Println("foo")
|
||||
}
|
||||
|
||||
if compDiff := cmp.Diff(scenario.eligibleNodes, eligibleNodes, cmp.Comparer(func(a, b sets.String) bool {
|
||||
return reflect.DeepEqual(a, b)
|
||||
})); compDiff != "" {
|
||||
t.Errorf("Unexpected eligible nodes (-want +got):\n%s", compDiff)
|
||||
}
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) { run(t, scenario) })
|
||||
}
|
||||
}
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// FakeVolumeBinderConfig holds configurations for fake volume binder.
|
||||
@ -46,11 +47,18 @@ type FakeVolumeBinder struct {
|
||||
BindCalled bool
|
||||
}
|
||||
|
||||
var _ SchedulerVolumeBinder = &FakeVolumeBinder{}
|
||||
|
||||
// GetPodVolumes implements SchedulerVolumeBinder.GetPodVolumes.
|
||||
func (b *FakeVolumeBinder) GetPodVolumes(pod *v1.Pod) (boundClaims, unboundClaimsDelayBinding, unboundClaimsImmediate []*v1.PersistentVolumeClaim, err error) {
|
||||
return nil, nil, nil, nil
|
||||
}
|
||||
|
||||
// GetEligibleNodes implements SchedulerVolumeBinder.GetEligibleNodes.
|
||||
func (b *FakeVolumeBinder) GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.String) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindPodVolumes implements SchedulerVolumeBinder.FindPodVolumes.
|
||||
func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, _, _ []*v1.PersistentVolumeClaim, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) {
|
||||
return nil, b.config.FindReasons, b.config.FindErr
|
||||
|
@ -182,8 +182,16 @@ func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleSt
|
||||
status.AppendReason("pod has unbound immediate PersistentVolumeClaims")
|
||||
return nil, status
|
||||
}
|
||||
// Attempt to reduce down the number of nodes to consider in subsequent scheduling stages if pod has bound claims.
|
||||
var result *framework.PreFilterResult
|
||||
if eligibleNodes := pl.Binder.GetEligibleNodes(boundClaims); eligibleNodes != nil {
|
||||
result = &framework.PreFilterResult{
|
||||
NodeNames: eligibleNodes,
|
||||
}
|
||||
}
|
||||
|
||||
state.Write(stateKey, &stateData{boundClaims: boundClaims, claimsToBind: claimsToBind, podVolumesByNode: make(map[string]*PodVolumes)})
|
||||
return nil, nil
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// PreFilterExtensions returns prefilter extensions, pod add and remove.
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
@ -79,6 +80,7 @@ func TestVolumeBinding(t *testing.T) {
|
||||
pvs []*v1.PersistentVolume
|
||||
fts feature.Features
|
||||
args *config.VolumeBindingArgs
|
||||
wantPreFilterResult *framework.PreFilterResult
|
||||
wantPreFilterStatus *framework.Status
|
||||
wantStateAfterPreFilter *stateData
|
||||
wantFilterStatus []*framework.Status
|
||||
@ -126,6 +128,42 @@ func TestVolumeBinding(t *testing.T) {
|
||||
0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "all bound with local volumes",
|
||||
pod: makePod("pod-a").withPVCVolume("pvc-a", "volume-a").withPVCVolume("pvc-b", "volume-b").Pod,
|
||||
nodes: []*v1.Node{
|
||||
makeNode("node-a").Node,
|
||||
},
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
|
||||
makePVC("pvc-b", waitSC.Name).withBoundPV("pv-b").PersistentVolumeClaim,
|
||||
},
|
||||
pvs: []*v1.PersistentVolume{
|
||||
makePV("pv-a", waitSC.Name).withPhase(v1.VolumeBound).withNodeAffinity(map[string][]string{
|
||||
v1.LabelHostname: {"node-a"},
|
||||
}).PersistentVolume,
|
||||
makePV("pv-b", waitSC.Name).withPhase(v1.VolumeBound).withNodeAffinity(map[string][]string{
|
||||
v1.LabelHostname: {"node-a"},
|
||||
}).PersistentVolume,
|
||||
},
|
||||
wantPreFilterResult: &framework.PreFilterResult{
|
||||
NodeNames: sets.NewString("node-a"),
|
||||
},
|
||||
wantStateAfterPreFilter: &stateData{
|
||||
boundClaims: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
|
||||
makePVC("pvc-b", waitSC.Name).withBoundPV("pv-b").PersistentVolumeClaim,
|
||||
},
|
||||
claimsToBind: []*v1.PersistentVolumeClaim{},
|
||||
podVolumesByNode: map[string]*PodVolumes{},
|
||||
},
|
||||
wantFilterStatus: []*framework.Status{
|
||||
nil,
|
||||
},
|
||||
wantScores: []int64{
|
||||
0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "PVC does not exist",
|
||||
pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod,
|
||||
@ -654,8 +692,10 @@ func TestVolumeBinding(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
|
||||
t.Logf("Verify: call PreFilter and check status")
|
||||
_, gotPreFilterStatus := p.PreFilter(ctx, state, item.pod)
|
||||
gotPreFilterResult, gotPreFilterStatus := p.PreFilter(ctx, state, item.pod)
|
||||
assert.Equal(t, item.wantPreFilterStatus, gotPreFilterStatus)
|
||||
assert.Equal(t, item.wantPreFilterResult, gotPreFilterResult)
|
||||
|
||||
if !gotPreFilterStatus.IsSuccess() {
|
||||
// scheduler framework will skip Filter if PreFilter fails
|
||||
return
|
||||
|
@ -576,6 +576,44 @@ func IsLocalEphemeralVolume(volume v1.Volume) bool {
|
||||
volume.ConfigMap != nil
|
||||
}
|
||||
|
||||
// GetLocalPersistentVolumeNodeNames returns the node affinity node name(s) for
|
||||
// local PersistentVolumes. nil is returned if the PV does not have any
|
||||
// specific node affinity node selector terms and match expressions.
|
||||
// PersistentVolume with node affinity has select and match expressions
|
||||
// in the form of:
|
||||
//
|
||||
// nodeAffinity:
|
||||
// required:
|
||||
// nodeSelectorTerms:
|
||||
// - matchExpressions:
|
||||
// - key: kubernetes.io/hostname
|
||||
// operator: In
|
||||
// values:
|
||||
// - <node1>
|
||||
// - <node2>
|
||||
func GetLocalPersistentVolumeNodeNames(pv *v1.PersistentVolume) []string {
|
||||
if pv == nil || pv.Spec.NodeAffinity == nil || pv.Spec.NodeAffinity.Required == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var result sets.Set[string]
|
||||
for _, term := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms {
|
||||
var nodes sets.Set[string]
|
||||
for _, matchExpr := range term.MatchExpressions {
|
||||
if matchExpr.Key == v1.LabelHostname && matchExpr.Operator == v1.NodeSelectorOpIn {
|
||||
if nodes == nil {
|
||||
nodes = sets.New(matchExpr.Values...)
|
||||
} else {
|
||||
nodes = nodes.Intersection(sets.New(matchExpr.Values...))
|
||||
}
|
||||
}
|
||||
}
|
||||
result = result.Union(nodes)
|
||||
}
|
||||
|
||||
return sets.List(result)
|
||||
}
|
||||
|
||||
// GetPodVolumeNames returns names of volumes that are used in a pod,
|
||||
// either as filesystem mount or raw block device, together with list
|
||||
// of all SELinux contexts of all containers that use the volumes.
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -942,3 +943,304 @@ func TestGetPodVolumeNames(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPersistentVolumeNodeNames(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pv *v1.PersistentVolume
|
||||
expectedNodeNames []string
|
||||
}{
|
||||
{
|
||||
name: "nil PV",
|
||||
pv: nil,
|
||||
},
|
||||
{
|
||||
name: "PV missing node affinity",
|
||||
pv: &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "PV node affinity missing required",
|
||||
pv: &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
NodeAffinity: &v1.VolumeNodeAffinity{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "PV node affinity required zero selector terms",
|
||||
pv: &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
NodeAffinity: &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedNodeNames: []string{},
|
||||
},
|
||||
{
|
||||
name: "PV node affinity required zero selector terms",
|
||||
pv: &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
NodeAffinity: &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedNodeNames: []string{},
|
||||
},
|
||||
{
|
||||
name: "PV node affinity required zero match expressions",
|
||||
pv: &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
NodeAffinity: &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedNodeNames: []string{},
|
||||
},
|
||||
{
|
||||
name: "PV node affinity required multiple match expressions",
|
||||
pv: &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
NodeAffinity: &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "foo",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
},
|
||||
{
|
||||
Key: "bar",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedNodeNames: []string{},
|
||||
},
|
||||
{
|
||||
name: "PV node affinity required single match expression with no values",
|
||||
pv: &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
NodeAffinity: &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: v1.LabelHostname,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedNodeNames: []string{},
|
||||
},
|
||||
{
|
||||
name: "PV node affinity required single match expression with single node",
|
||||
pv: &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
NodeAffinity: &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: v1.LabelHostname,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{
|
||||
"node1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedNodeNames: []string{
|
||||
"node1",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "PV node affinity required single match expression with multiple nodes",
|
||||
pv: &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
NodeAffinity: &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: v1.LabelHostname,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{
|
||||
"node1",
|
||||
"node2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedNodeNames: []string{
|
||||
"node1",
|
||||
"node2",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "PV node affinity required multiple match expressions with multiple nodes",
|
||||
pv: &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
NodeAffinity: &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "bar",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{
|
||||
"node1",
|
||||
"node2",
|
||||
},
|
||||
},
|
||||
{
|
||||
Key: v1.LabelHostname,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{
|
||||
"node3",
|
||||
"node4",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedNodeNames: []string{
|
||||
"node3",
|
||||
"node4",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "PV node affinity required multiple node selectors multiple match expressions with multiple nodes",
|
||||
pv: &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
NodeAffinity: &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: v1.LabelHostname,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{
|
||||
"node1",
|
||||
"node2",
|
||||
},
|
||||
},
|
||||
{
|
||||
Key: v1.LabelHostname,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{
|
||||
"node2",
|
||||
"node3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: v1.LabelHostname,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{
|
||||
"node1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedNodeNames: []string{
|
||||
"node1",
|
||||
"node2",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodeNames := GetLocalPersistentVolumeNodeNames(test.pv)
|
||||
if diff := cmp.Diff(test.expectedNodeNames, nodeNames); diff != "" {
|
||||
t.Errorf("Unexpected nodeNames (-want, +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -675,7 +675,7 @@ func TestPVAffinityConflict(t *testing.T) {
|
||||
if _, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Failed to create Pod %q: %v", pod.Name, err)
|
||||
}
|
||||
// Give time to shceduler to attempt to schedule pod
|
||||
// Give time to scheduler to attempt to schedule pod
|
||||
if err := waitForPodUnschedulable(config.client, pod); err != nil {
|
||||
t.Errorf("Failed as Pod %s was not unschedulable: %v", pod.Name, err)
|
||||
}
|
||||
@ -690,8 +690,8 @@ func TestPVAffinityConflict(t *testing.T) {
|
||||
if strings.Compare(p.Status.Conditions[0].Reason, "Unschedulable") != 0 {
|
||||
t.Fatalf("Failed as Pod %s reason was: %s but expected: Unschedulable", podName, p.Status.Conditions[0].Reason)
|
||||
}
|
||||
if !strings.Contains(p.Status.Conditions[0].Message, "node(s) didn't match Pod's node affinity") || !strings.Contains(p.Status.Conditions[0].Message, "node(s) had volume node affinity conflict") {
|
||||
t.Fatalf("Failed as Pod's %s failure message does not contain expected message: node(s) didn't match Pod's node affinity, node(s) had volume node affinity conflict. Got message %q", podName, p.Status.Conditions[0].Message)
|
||||
if !strings.Contains(p.Status.Conditions[0].Message, "node(s) didn't match Pod's node affinity") {
|
||||
t.Fatalf("Failed as Pod's %s failure message does not contain expected message: node(s) didn't match Pod's node affinity. Got message %q", podName, p.Status.Conditions[0].Message)
|
||||
}
|
||||
// Deleting test pod
|
||||
if err := config.client.CoreV1().Pods(config.ns).Delete(context.TODO(), podName, metav1.DeleteOptions{}); err != nil {
|
||||
|
Loading…
Reference in New Issue
Block a user