Merge pull request #127584 from AxeZhan/automated-cherry-pick-of-#125398-upstream-release-1.31

Automated cherry pick of #125398: [scheduler] When the hostname and nodename of a node do not match, ensure that pods carrying PVs with nodeAffinity are scheduled correctly.
This commit is contained in:
Kubernetes Prow Robot 2024-11-12 09:12:53 +00:00 committed by GitHub
commit dd898b82d0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 196 additions and 649 deletions

View File

@ -48,7 +48,6 @@ import (
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/metrics"
"k8s.io/kubernetes/pkg/scheduler/util/assumecache"
"k8s.io/kubernetes/pkg/volume/util"
)
// ConflictReason is used for the special strings which explain why
@ -130,8 +129,6 @@ type InTreeToCSITranslator interface {
// 1. The scheduler takes a Pod off the scheduler queue and processes it serially:
// a. Invokes all pre-filter plugins for the pod. GetPodVolumeClaims() is invoked
// here, pod volume information will be saved in current scheduling cycle state for later use.
// If pod has bound immediate PVCs, GetEligibleNodes() is invoked to potentially reduce
// down the list of eligible nodes based on the bound PV's NodeAffinity (if any).
// b. Invokes all filter plugins, parallelized across nodes. FindPodVolumes() is invoked here.
// c. Invokes all score plugins. Future/TBD
// d. Selects the best node for the Pod.
@ -154,14 +151,6 @@ type SchedulerVolumeBinder interface {
// unbound with immediate binding (including prebound) and PVs that belong to storage classes of unbound PVCs with delayed binding.
GetPodVolumeClaims(logger klog.Logger, pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error)
// GetEligibleNodes checks the existing bound claims of the pod to determine if the list of nodes can be
// potentially reduced down to a subset of eligible nodes based on the bound claims which then can be used
// in subsequent scheduling stages.
//
// If eligibleNodes is 'nil', then it indicates that such eligible node reduction cannot be made
// and all nodes should be considered.
GetEligibleNodes(logger klog.Logger, boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string])
// FindPodVolumes checks if all of a Pod's PVCs can be satisfied by the
// node and returns pod's volumes information.
//
@ -384,55 +373,6 @@ func (b *volumeBinder) FindPodVolumes(logger klog.Logger, pod *v1.Pod, podVolume
return
}
// GetEligibleNodes checks the existing bound claims of the pod to determine if the list of nodes can be
// potentially reduced down to a subset of eligible nodes based on the bound claims which then can be used
// in subsequent scheduling stages.
//
// Returning 'nil' for eligibleNodes indicates that such eligible node reduction cannot be made and all nodes
// should be considered.
func (b *volumeBinder) GetEligibleNodes(logger klog.Logger, boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) {
if len(boundClaims) == 0 {
return
}
var errs []error
for _, pvc := range boundClaims {
pvName := pvc.Spec.VolumeName
pv, err := b.pvCache.GetPV(pvName)
if err != nil {
errs = append(errs, err)
continue
}
// if the PersistentVolume is local and has node affinity matching specific node(s),
// add them to the eligible nodes
nodeNames := util.GetLocalPersistentVolumeNodeNames(pv)
if len(nodeNames) != 0 {
// on the first found list of eligible nodes for the local PersistentVolume,
// insert to the eligible node set.
if eligibleNodes == nil {
eligibleNodes = sets.New(nodeNames...)
} else {
// for subsequent finding of eligible nodes for the local PersistentVolume,
// take the intersection of the nodes with the existing eligible nodes
// for cases if PV1 has node affinity to node1 and PV2 has node affinity to node2,
// then the eligible node list should be empty.
eligibleNodes = eligibleNodes.Intersection(sets.New(nodeNames...))
}
}
}
if len(errs) > 0 {
logger.V(4).Info("GetEligibleNodes: one or more error occurred finding eligible nodes", "error", errs)
return nil
}
if eligibleNodes != nil {
logger.V(4).Info("GetEligibleNodes: reduced down eligible nodes", "nodes", eligibleNodes)
}
return
}
// AssumePodVolumes will take the matching PVs and PVCs to provision in pod's
// volume information for the chosen node, and:
// 1. Update the pvCache with the new prebound PV.

View File

@ -20,7 +20,6 @@ import (
"context"
"fmt"
"os"
"reflect"
"sort"
"testing"
"time"
@ -32,7 +31,6 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/informers"
@ -63,9 +61,6 @@ var (
boundPVCNode1a = makeTestPVC("unbound-pvc", "1G", "", pvcBound, "pv-node1a", "1", &waitClass)
immediateUnboundPVC = makeTestPVC("immediate-unbound-pvc", "1G", "", pvcUnbound, "", "1", &immediateClass)
immediateBoundPVC = makeTestPVC("immediate-bound-pvc", "1G", "", pvcBound, "pv-bound-immediate", "1", &immediateClass)
localPreboundPVC1a = makeTestPVC("local-prebound-pvc-1a", "1G", "", pvcPrebound, "local-pv-node1a", "1", &waitClass)
localPreboundPVC1b = makeTestPVC("local-prebound-pvc-1b", "1G", "", pvcPrebound, "local-pv-node1b", "1", &waitClass)
localPreboundPVC2a = makeTestPVC("local-prebound-pvc-2a", "1G", "", pvcPrebound, "local-pv-node2a", "1", &waitClass)
// PVCs for dynamic provisioning
provisionedPVC = makeTestPVC("provisioned-pvc", "1Gi", "", pvcUnbound, "", "1", &waitClassWithProvisioner)
@ -97,9 +92,6 @@ var (
pvNode1bBoundHigherVersion = makeTestPV("pv-node1b", "node1", "10G", "2", unboundPVC2, waitClass)
pvBoundImmediate = makeTestPV("pv-bound-immediate", "node1", "1G", "1", immediateBoundPVC, immediateClass)
pvBoundImmediateNode2 = makeTestPV("pv-bound-immediate", "node2", "1G", "1", immediateBoundPVC, immediateClass)
localPVNode1a = makeLocalPV("local-pv-node1a", "node1", "5G", "1", nil, waitClass)
localPVNode1b = makeLocalPV("local-pv-node1b", "node1", "10G", "1", nil, waitClass)
localPVNode2a = makeLocalPV("local-pv-node2a", "node2", "5G", "1", nil, waitClass)
// PVs for CSI migration
migrationPVBound = makeTestPVForCSIMigration(zone1Labels, boundMigrationPVC, true)
@ -709,12 +701,6 @@ func makeTestPVForCSIMigration(labels map[string]string, pvc *v1.PersistentVolum
return pv
}
func makeLocalPV(name, node, capacity, version string, boundToPVC *v1.PersistentVolumeClaim, className string) *v1.PersistentVolume {
pv := makeTestPV(name, node, capacity, version, boundToPVC, className)
pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Key = v1.LabelHostname
return pv
}
func pvcSetSelectedNode(pvc *v1.PersistentVolumeClaim, node string) *v1.PersistentVolumeClaim {
newPVC := pvc.DeepCopy()
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, volume.AnnSelectedNode, node)
@ -2326,130 +2312,3 @@ func TestCapacity(t *testing.T) {
})
}
}
func TestGetEligibleNodes(t *testing.T) {
type scenarioType struct {
// Inputs
pvcs []*v1.PersistentVolumeClaim
pvs []*v1.PersistentVolume
nodes []*v1.Node
// Expected return values
eligibleNodes sets.Set[string]
}
scenarios := map[string]scenarioType{
"no-bound-claims": {},
"no-nodes-found": {
pvcs: []*v1.PersistentVolumeClaim{
preboundPVC,
preboundPVCNode1a,
},
},
"pv-not-found": {
pvcs: []*v1.PersistentVolumeClaim{
preboundPVC,
preboundPVCNode1a,
},
nodes: []*v1.Node{
node1,
},
},
"node-affinity-mismatch": {
pvcs: []*v1.PersistentVolumeClaim{
preboundPVC,
preboundPVCNode1a,
},
pvs: []*v1.PersistentVolume{
pvNode1a,
},
nodes: []*v1.Node{
node1,
node2,
},
},
"local-pv-with-node-affinity": {
pvcs: []*v1.PersistentVolumeClaim{
localPreboundPVC1a,
localPreboundPVC1b,
},
pvs: []*v1.PersistentVolume{
localPVNode1a,
localPVNode1b,
},
nodes: []*v1.Node{
node1,
node2,
},
eligibleNodes: sets.New("node1"),
},
"multi-local-pv-with-different-nodes": {
pvcs: []*v1.PersistentVolumeClaim{
localPreboundPVC1a,
localPreboundPVC1b,
localPreboundPVC2a,
},
pvs: []*v1.PersistentVolume{
localPVNode1a,
localPVNode1b,
localPVNode2a,
},
nodes: []*v1.Node{
node1,
node2,
},
eligibleNodes: sets.New[string](),
},
"local-and-non-local-pv": {
pvcs: []*v1.PersistentVolumeClaim{
localPreboundPVC1a,
localPreboundPVC1b,
preboundPVC,
immediateBoundPVC,
},
pvs: []*v1.PersistentVolume{
localPVNode1a,
localPVNode1b,
pvNode1a,
pvBoundImmediate,
pvBoundImmediateNode2,
},
nodes: []*v1.Node{
node1,
node2,
},
eligibleNodes: sets.New("node1"),
},
}
run := func(t *testing.T, scenario scenarioType) {
logger, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Setup
testEnv := newTestBinder(t, ctx)
testEnv.initVolumes(scenario.pvs, scenario.pvs)
testEnv.initNodes(scenario.nodes)
testEnv.initClaims(scenario.pvcs, scenario.pvcs)
// Execute
eligibleNodes := testEnv.binder.GetEligibleNodes(logger, scenario.pvcs)
// Validate
if reflect.DeepEqual(scenario.eligibleNodes, eligibleNodes) {
fmt.Println("foo")
}
if compDiff := cmp.Diff(scenario.eligibleNodes, eligibleNodes, cmp.Comparer(func(a, b sets.Set[string]) bool {
return reflect.DeepEqual(a, b)
})); compDiff != "" {
t.Errorf("Unexpected eligible nodes (-want +got):\n%s", compDiff)
}
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) { run(t, scenario) })
}
}

View File

@ -20,7 +20,6 @@ import (
"context"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
)
@ -55,11 +54,6 @@ func (b *FakeVolumeBinder) GetPodVolumeClaims(_ klog.Logger, pod *v1.Pod) (podVo
return &PodVolumeClaims{}, nil
}
// GetEligibleNodes implements SchedulerVolumeBinder.GetEligibleNodes.
func (b *FakeVolumeBinder) GetEligibleNodes(_ klog.Logger, boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) {
return nil
}
// FindPodVolumes implements SchedulerVolumeBinder.FindPodVolumes.
func (b *FakeVolumeBinder) FindPodVolumes(_ klog.Logger, pod *v1.Pod, _ *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) {
return nil, b.config.FindReasons, b.config.FindErr

View File

@ -340,14 +340,6 @@ func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleSt
status.AppendReason("pod has unbound immediate PersistentVolumeClaims")
return nil, status
}
// Attempt to reduce down the number of nodes to consider in subsequent scheduling stages if pod has bound claims.
var result *framework.PreFilterResult
if eligibleNodes := pl.Binder.GetEligibleNodes(logger, podVolumeClaims.boundClaims); eligibleNodes != nil {
result = &framework.PreFilterResult{
NodeNames: eligibleNodes,
}
}
state.Write(stateKey, &stateData{
podVolumesByNode: make(map[string]*PodVolumes),
podVolumeClaims: &PodVolumeClaims{
@ -356,7 +348,7 @@ func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleSt
unboundVolumesDelayBinding: podVolumeClaims.unboundVolumesDelayBinding,
},
})
return result, nil
return nil, nil
}
// PreFilterExtensions returns prefilter extensions, pod add and remove.

View File

@ -27,7 +27,6 @@ import (
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/klog/v2/ktesting"
@ -127,43 +126,6 @@ func TestVolumeBinding(t *testing.T) {
},
wantPreScoreStatus: framework.NewStatus(framework.Skip),
},
{
name: "all bound with local volumes",
pod: makePod("pod-a").withPVCVolume("pvc-a", "volume-a").withPVCVolume("pvc-b", "volume-b").Pod,
nodes: []*v1.Node{
makeNode("node-a").Node,
},
pvcs: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
makePVC("pvc-b", waitSC.Name).withBoundPV("pv-b").PersistentVolumeClaim,
},
pvs: []*v1.PersistentVolume{
makePV("pv-a", waitSC.Name).withPhase(v1.VolumeBound).withNodeAffinity(map[string][]string{
v1.LabelHostname: {"node-a"},
}).PersistentVolume,
makePV("pv-b", waitSC.Name).withPhase(v1.VolumeBound).withNodeAffinity(map[string][]string{
v1.LabelHostname: {"node-a"},
}).PersistentVolume,
},
wantPreFilterResult: &framework.PreFilterResult{
NodeNames: sets.New("node-a"),
},
wantStateAfterPreFilter: &stateData{
podVolumeClaims: &PodVolumeClaims{
boundClaims: []*v1.PersistentVolumeClaim{
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
makePVC("pvc-b", waitSC.Name).withBoundPV("pv-b").PersistentVolumeClaim,
},
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{},
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{},
},
podVolumesByNode: map[string]*PodVolumes{},
},
wantFilterStatus: []*framework.Status{
nil,
},
wantPreScoreStatus: framework.NewStatus(framework.Skip),
},
{
name: "PVC does not exist",
pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod,

View File

@ -1805,8 +1805,9 @@ func TestSchedulerSchedulePod(t *testing.T) {
name string
registerPlugins []tf.RegisterPluginFunc
extenders []tf.FakeExtender
nodes []string
nodes []*v1.Node
pvcs []v1.PersistentVolumeClaim
pvs []v1.PersistentVolume
pod *v1.Pod
pods []*v1.Pod
wantNodes sets.Set[string]
@ -1819,9 +1820,12 @@ func TestSchedulerSchedulePod(t *testing.T) {
tf.RegisterFilterPlugin("FalseFilter", tf.NewFalseFilterPlugin),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"node1", "node2"},
pod: st.MakePod().Name("2").UID("2").Obj(),
name: "test 1",
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}},
},
pod: st.MakePod().Name("2").UID("2").Obj(),
name: "test 1",
wErr: &framework.FitError{
Pod: st.MakePod().Name("2").UID("2").Obj(),
NumAllNodes: 2,
@ -1841,7 +1845,10 @@ func TestSchedulerSchedulePod(t *testing.T) {
tf.RegisterScorePlugin("EqualPrioritizerPlugin", tf.NewEqualPrioritizerPlugin(), 1),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"node1", "node2"},
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}},
},
pod: st.MakePod().Name("ignore").UID("ignore").Obj(),
wantNodes: sets.New("node1", "node2"),
name: "test 2",
@ -1854,7 +1861,10 @@ func TestSchedulerSchedulePod(t *testing.T) {
tf.RegisterFilterPlugin("MatchFilter", tf.NewMatchFilterPlugin),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"node1", "node2"},
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}},
},
pod: st.MakePod().Name("node2").UID("node2").Obj(),
wantNodes: sets.New("node2"),
name: "test 3",
@ -1867,7 +1877,11 @@ func TestSchedulerSchedulePod(t *testing.T) {
tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"3", "2", "1"},
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "3", Labels: map[string]string{"kubernetes.io/hostname": "3"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "2", Labels: map[string]string{"kubernetes.io/hostname": "2"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "1", Labels: map[string]string{"kubernetes.io/hostname": "1"}}},
},
pod: st.MakePod().Name("ignore").UID("ignore").Obj(),
wantNodes: sets.New("3"),
name: "test 4",
@ -1880,7 +1894,11 @@ func TestSchedulerSchedulePod(t *testing.T) {
tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"3", "2", "1"},
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "3", Labels: map[string]string{"kubernetes.io/hostname": "3"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "2", Labels: map[string]string{"kubernetes.io/hostname": "2"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "1", Labels: map[string]string{"kubernetes.io/hostname": "1"}}},
},
pod: st.MakePod().Name("2").UID("2").Obj(),
wantNodes: sets.New("2"),
name: "test 5",
@ -1894,7 +1912,11 @@ func TestSchedulerSchedulePod(t *testing.T) {
tf.RegisterScorePlugin("ReverseNumericMap", newReverseNumericMapPlugin(), 2),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"3", "2", "1"},
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "3", Labels: map[string]string{"kubernetes.io/hostname": "3"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "2", Labels: map[string]string{"kubernetes.io/hostname": "2"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "1", Labels: map[string]string{"kubernetes.io/hostname": "1"}}},
},
pod: st.MakePod().Name("2").UID("2").Obj(),
wantNodes: sets.New("1"),
name: "test 6",
@ -1908,9 +1930,13 @@ func TestSchedulerSchedulePod(t *testing.T) {
tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"3", "2", "1"},
pod: st.MakePod().Name("2").UID("2").Obj(),
name: "test 7",
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "3", Labels: map[string]string{"kubernetes.io/hostname": "3"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "2", Labels: map[string]string{"kubernetes.io/hostname": "2"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "1", Labels: map[string]string{"kubernetes.io/hostname": "1"}}},
},
pod: st.MakePod().Name("2").UID("2").Obj(),
name: "test 7",
wErr: &framework.FitError{
Pod: st.MakePod().Name("2").UID("2").Obj(),
NumAllNodes: 3,
@ -1935,9 +1961,12 @@ func TestSchedulerSchedulePod(t *testing.T) {
pods: []*v1.Pod{
st.MakePod().Name("2").UID("2").Node("2").Phase(v1.PodRunning).Obj(),
},
pod: st.MakePod().Name("2").UID("2").Obj(),
nodes: []string{"1", "2"},
name: "test 8",
pod: st.MakePod().Name("2").UID("2").Obj(),
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "1", Labels: map[string]string{"kubernetes.io/hostname": "1"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "2", Labels: map[string]string{"kubernetes.io/hostname": "2"}}},
},
name: "test 8",
wErr: &framework.FitError{
Pod: st.MakePod().Name("2").UID("2").Obj(),
NumAllNodes: 2,
@ -1959,13 +1988,19 @@ func TestSchedulerSchedulePod(t *testing.T) {
tf.RegisterScorePlugin("EqualPrioritizerPlugin", tf.NewEqualPrioritizerPlugin(), 1),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"node1", "node2"},
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}},
},
pvcs: []v1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", UID: types.UID("existingPVC"), Namespace: v1.NamespaceDefault},
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "existingPV"},
},
},
pvs: []v1.PersistentVolume{
{ObjectMeta: metav1.ObjectMeta{Name: "existingPV"}},
},
pod: st.MakePod().Name("ignore").UID("ignore").Namespace(v1.NamespaceDefault).PVC("existingPVC").Obj(),
wantNodes: sets.New("node1", "node2"),
name: "existing PVC",
@ -1979,9 +2014,12 @@ func TestSchedulerSchedulePod(t *testing.T) {
tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"node1", "node2"},
pod: st.MakePod().Name("ignore").UID("ignore").PVC("unknownPVC").Obj(),
name: "unknown PVC",
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}},
},
pod: st.MakePod().Name("ignore").UID("ignore").PVC("unknownPVC").Obj(),
name: "unknown PVC",
wErr: &framework.FitError{
Pod: st.MakePod().Name("ignore").UID("ignore").PVC("unknownPVC").Obj(),
NumAllNodes: 2,
@ -2003,10 +2041,13 @@ func TestSchedulerSchedulePod(t *testing.T) {
tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"node1", "node2"},
pvcs: []v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", UID: types.UID("existingPVC"), Namespace: v1.NamespaceDefault, DeletionTimestamp: &metav1.Time{}}}},
pod: st.MakePod().Name("ignore").UID("ignore").Namespace(v1.NamespaceDefault).PVC("existingPVC").Obj(),
name: "deleted PVC",
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}},
},
pvcs: []v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", UID: types.UID("existingPVC"), Namespace: v1.NamespaceDefault, DeletionTimestamp: &metav1.Time{}}}},
pod: st.MakePod().Name("ignore").UID("ignore").Namespace(v1.NamespaceDefault).PVC("existingPVC").Obj(),
name: "deleted PVC",
wErr: &framework.FitError{
Pod: st.MakePod().Name("ignore").UID("ignore").Namespace(v1.NamespaceDefault).PVC("existingPVC").Obj(),
NumAllNodes: 2,
@ -2028,10 +2069,13 @@ func TestSchedulerSchedulePod(t *testing.T) {
tf.RegisterScorePlugin("TrueMap", newTrueMapPlugin(), 2),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"2", "1"},
pod: st.MakePod().Name("2").Obj(),
name: "test error with priority map",
wErr: fmt.Errorf("running Score plugins: %w", fmt.Errorf(`plugin "FalseMap" failed with: %w`, errPrioritize)),
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "2", Labels: map[string]string{"kubernetes.io/hostname": "2"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "1", Labels: map[string]string{"kubernetes.io/hostname": "1"}}},
},
pod: st.MakePod().Name("2").Obj(),
name: "test error with priority map",
wErr: fmt.Errorf("running Score plugins: %w", fmt.Errorf(`plugin "FalseMap" failed with: %w`, errPrioritize)),
},
{
name: "test podtopologyspread plugin - 2 nodes with maxskew=1",
@ -2045,8 +2089,11 @@ func TestSchedulerSchedulePod(t *testing.T) {
),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"node1", "node2"},
pod: st.MakePod().Name("p").UID("p").Label("foo", "").SpreadConstraint(1, "hostname", v1.DoNotSchedule, &metav1.LabelSelector{
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}},
},
pod: st.MakePod().Name("p").UID("p").Label("foo", "").SpreadConstraint(1, "kubernetes.io/hostname", v1.DoNotSchedule, &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "foo",
@ -2073,8 +2120,12 @@ func TestSchedulerSchedulePod(t *testing.T) {
tf.RegisterScorePlugin("EqualPrioritizerPlugin", tf.NewEqualPrioritizerPlugin(), 1),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"node1", "node2", "node3"},
pod: st.MakePod().Name("p").UID("p").Label("foo", "").SpreadConstraint(2, "hostname", v1.DoNotSchedule, &metav1.LabelSelector{
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: map[string]string{"kubernetes.io/hostname": "node3"}}},
},
pod: st.MakePod().Name("p").UID("p").Label("foo", "").SpreadConstraint(2, "kubernetes.io/hostname", v1.DoNotSchedule, &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "foo",
@ -2101,7 +2152,9 @@ func TestSchedulerSchedulePod(t *testing.T) {
tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"3"},
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "3", Labels: map[string]string{"kubernetes.io/hostname": "3"}}},
},
pod: st.MakePod().Name("test-filter").UID("test-filter").Obj(),
wantNodes: nil,
wErr: &framework.FitError{
@ -2132,7 +2185,11 @@ func TestSchedulerSchedulePod(t *testing.T) {
Predicates: []tf.FitPredicate{tf.FalsePredicateExtender},
},
},
nodes: []string{"1", "2", "3"},
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "1", Labels: map[string]string{"kubernetes.io/hostname": "1"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "2", Labels: map[string]string{"kubernetes.io/hostname": "2"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "3", Labels: map[string]string{"kubernetes.io/hostname": "3"}}},
},
pod: st.MakePod().Name("test-filter").UID("test-filter").Obj(),
wantNodes: nil,
wErr: &framework.FitError{
@ -2159,7 +2216,9 @@ func TestSchedulerSchedulePod(t *testing.T) {
tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"3"},
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "3", Labels: map[string]string{"kubernetes.io/hostname": "3"}}},
},
pod: st.MakePod().Name("test-filter").UID("test-filter").Obj(),
wantNodes: nil,
wErr: &framework.FitError{
@ -2184,7 +2243,10 @@ func TestSchedulerSchedulePod(t *testing.T) {
tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"1", "2"},
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "1", Labels: map[string]string{"kubernetes.io/hostname": "1"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "2", Labels: map[string]string{"kubernetes.io/hostname": "2"}}},
},
pod: st.MakePod().Name("test-filter").UID("test-filter").Obj(),
wantNodes: nil,
wErr: nil,
@ -2199,7 +2261,10 @@ func TestSchedulerSchedulePod(t *testing.T) {
),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"1", "2"},
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "1", Labels: map[string]string{"kubernetes.io/hostname": "1"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "2", Labels: map[string]string{"kubernetes.io/hostname": "2"}}},
},
pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
wantNodes: nil,
wErr: &framework.FitError{
@ -2225,7 +2290,10 @@ func TestSchedulerSchedulePod(t *testing.T) {
),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"1", "2"},
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "1", Labels: map[string]string{"kubernetes.io/hostname": "1"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "2", Labels: map[string]string{"kubernetes.io/hostname": "2"}}},
},
pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
wantNodes: nil,
wErr: fmt.Errorf(`running PreFilter plugin "FakePreFilter": %w`, errors.New("injected error status")),
@ -2248,7 +2316,11 @@ func TestSchedulerSchedulePod(t *testing.T) {
),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"node1", "node2", "node3"},
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: map[string]string{"kubernetes.io/hostname": "node3"}}},
},
pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
wantNodes: sets.New("node2"),
// since this case has no score plugin, we'll only try to find one node in Filter stage
@ -2272,8 +2344,12 @@ func TestSchedulerSchedulePod(t *testing.T) {
),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"node1", "node2", "node3"},
pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: map[string]string{"kubernetes.io/hostname": "node3"}}},
},
pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
wErr: &framework.FitError{
Pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
NumAllNodes: 3,
@ -2302,8 +2378,10 @@ func TestSchedulerSchedulePod(t *testing.T) {
),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"node1"},
pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}},
},
pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
wErr: &framework.FitError{
Pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
NumAllNodes: 1,
@ -2330,8 +2408,11 @@ func TestSchedulerSchedulePod(t *testing.T) {
),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"node1", "node2"},
pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}},
},
pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
wErr: &framework.FitError{
Pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
NumAllNodes: 2,
@ -2376,7 +2457,11 @@ func TestSchedulerSchedulePod(t *testing.T) {
tf.RegisterScorePlugin("EqualPrioritizerPlugin", tf.NewEqualPrioritizerPlugin(), 1),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"node1", "node2", "node3"},
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: map[string]string{"kubernetes.io/hostname": "node3"}}},
},
pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
wantNodes: sets.New("node2", "node3"),
wantEvaluatedNodes: ptr.To[int32](3),
@ -2392,7 +2477,10 @@ func TestSchedulerSchedulePod(t *testing.T) {
framework.NewStatus(framework.Error, "this score function shouldn't be executed because this plugin returned Skip in the PreScore"),
), "PreScore", "Score"),
},
nodes: []string{"node1", "node2"},
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}},
},
pod: st.MakePod().Name("ignore").UID("ignore").Obj(),
wantNodes: sets.New("node1", "node2"),
},
@ -2403,7 +2491,11 @@ func TestSchedulerSchedulePod(t *testing.T) {
tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"node1", "node2", "node3"},
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: map[string]string{"kubernetes.io/hostname": "node3"}}},
},
pod: st.MakePod().Name("pod1").UID("pod1").Obj(),
wantNodes: sets.New("node1", "node2", "node3"),
wantEvaluatedNodes: ptr.To[int32](1),
@ -2418,7 +2510,11 @@ func TestSchedulerSchedulePod(t *testing.T) {
),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"node1", "node2", "node3"},
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "node1"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"kubernetes.io/hostname": "node2"}}},
{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: map[string]string{"kubernetes.io/hostname": "node3"}}},
},
pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
wantNodes: sets.New("node1", "node2"),
// since this case has no score plugin, we'll only try to find one node in Filter stage
@ -2437,7 +2533,9 @@ func TestSchedulerSchedulePod(t *testing.T) {
tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"1", "2"},
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "1", Labels: map[string]string{"kubernetes.io/hostname": "1"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "2", Labels: map[string]string{"kubernetes.io/hostname": "2"}}},
},
pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
wantNodes: nil,
wErr: &framework.FitError{
@ -2449,6 +2547,50 @@ func TestSchedulerSchedulePod(t *testing.T) {
},
},
},
{
registerPlugins: []tf.RegisterPluginFunc{
tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
tf.RegisterPreFilterPlugin(volumebinding.Name, frameworkruntime.FactoryAdapter(fts, volumebinding.New)),
tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
tf.RegisterScorePlugin("EqualPrioritizerPlugin", tf.NewEqualPrioritizerPlugin(), 1),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"kubernetes.io/hostname": "host1"}}},
},
pvcs: []v1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{Name: "PVC1", UID: types.UID("PVC1"), Namespace: v1.NamespaceDefault},
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "PV1"},
},
},
pvs: []v1.PersistentVolume{
{
ObjectMeta: metav1.ObjectMeta{Name: "PV1", UID: types.UID("PV1")},
Spec: v1.PersistentVolumeSpec{
NodeAffinity: &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "kubernetes.io/hostname",
Operator: v1.NodeSelectorOpIn,
Values: []string{"host1"},
},
},
},
},
},
},
},
},
},
pod: st.MakePod().Name("pod1").UID("pod1").Namespace(v1.NamespaceDefault).PVC("PVC1").Obj(),
wantNodes: sets.New("node1"),
name: "hostname and nodename of the node do not match",
wErr: nil,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
@ -2461,8 +2603,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
cache.AddPod(logger, pod)
}
var nodes []*v1.Node
for _, name := range test.nodes {
node := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: name, Labels: map[string]string{"hostname": name}}}
for _, node := range test.nodes {
nodes = append(nodes, node)
cache.AddNode(logger, node)
}
@ -2472,10 +2613,9 @@ func TestSchedulerSchedulePod(t *testing.T) {
for _, pvc := range test.pvcs {
metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, volume.AnnBindCompleted, "true")
cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, &pvc, metav1.CreateOptions{})
if pvName := pvc.Spec.VolumeName; pvName != "" {
pv := v1.PersistentVolume{ObjectMeta: metav1.ObjectMeta{Name: pvName}}
cs.CoreV1().PersistentVolumes().Create(ctx, &pv, metav1.CreateOptions{})
}
}
for _, pv := range test.pvs {
_, _ = cs.CoreV1().PersistentVolumes().Create(ctx, &pv, metav1.CreateOptions{})
}
snapshot := internalcache.NewSnapshot(test.pods, nodes)
fwk, err := tf.NewFramework(

View File

@ -511,44 +511,6 @@ func IsLocalEphemeralVolume(volume v1.Volume) bool {
volume.ConfigMap != nil
}
// GetLocalPersistentVolumeNodeNames returns the node affinity node name(s) for
// local PersistentVolumes. nil is returned if the PV does not have any
// specific node affinity node selector terms and match expressions.
// PersistentVolume with node affinity has select and match expressions
// in the form of:
//
// nodeAffinity:
// required:
// nodeSelectorTerms:
// - matchExpressions:
// - key: kubernetes.io/hostname
// operator: In
// values:
// - <node1>
// - <node2>
func GetLocalPersistentVolumeNodeNames(pv *v1.PersistentVolume) []string {
if pv == nil || pv.Spec.NodeAffinity == nil || pv.Spec.NodeAffinity.Required == nil {
return nil
}
var result sets.Set[string]
for _, term := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms {
var nodes sets.Set[string]
for _, matchExpr := range term.MatchExpressions {
if matchExpr.Key == v1.LabelHostname && matchExpr.Operator == v1.NodeSelectorOpIn {
if nodes == nil {
nodes = sets.New(matchExpr.Values...)
} else {
nodes = nodes.Intersection(sets.New(matchExpr.Values...))
}
}
}
result = result.Union(nodes)
}
return sets.List(result)
}
// GetPodVolumeNames returns names of volumes that are used in a pod,
// either as filesystem mount or raw block device, together with list
// of all SELinux contexts of all containers that use the volumes.

View File

@ -22,7 +22,6 @@ import (
"runtime"
"testing"
"github.com/google/go-cmp/cmp"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -962,304 +961,3 @@ func TestGetPodVolumeNames(t *testing.T) {
})
}
}
func TestGetPersistentVolumeNodeNames(t *testing.T) {
tests := []struct {
name string
pv *v1.PersistentVolume
expectedNodeNames []string
}{
{
name: "nil PV",
pv: nil,
},
{
name: "PV missing node affinity",
pv: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
},
},
{
name: "PV node affinity missing required",
pv: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: v1.PersistentVolumeSpec{
NodeAffinity: &v1.VolumeNodeAffinity{},
},
},
},
{
name: "PV node affinity required zero selector terms",
pv: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: v1.PersistentVolumeSpec{
NodeAffinity: &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{},
},
},
},
},
expectedNodeNames: []string{},
},
{
name: "PV node affinity required zero selector terms",
pv: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: v1.PersistentVolumeSpec{
NodeAffinity: &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{},
},
},
},
},
expectedNodeNames: []string{},
},
{
name: "PV node affinity required zero match expressions",
pv: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: v1.PersistentVolumeSpec{
NodeAffinity: &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{},
},
},
},
},
},
},
expectedNodeNames: []string{},
},
{
name: "PV node affinity required multiple match expressions",
pv: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: v1.PersistentVolumeSpec{
NodeAffinity: &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "foo",
Operator: v1.NodeSelectorOpIn,
},
{
Key: "bar",
Operator: v1.NodeSelectorOpIn,
},
},
},
},
},
},
},
},
expectedNodeNames: []string{},
},
{
name: "PV node affinity required single match expression with no values",
pv: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: v1.PersistentVolumeSpec{
NodeAffinity: &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: v1.LabelHostname,
Operator: v1.NodeSelectorOpIn,
Values: []string{},
},
},
},
},
},
},
},
},
expectedNodeNames: []string{},
},
{
name: "PV node affinity required single match expression with single node",
pv: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: v1.PersistentVolumeSpec{
NodeAffinity: &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: v1.LabelHostname,
Operator: v1.NodeSelectorOpIn,
Values: []string{
"node1",
},
},
},
},
},
},
},
},
},
expectedNodeNames: []string{
"node1",
},
},
{
name: "PV node affinity required single match expression with multiple nodes",
pv: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: v1.PersistentVolumeSpec{
NodeAffinity: &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: v1.LabelHostname,
Operator: v1.NodeSelectorOpIn,
Values: []string{
"node1",
"node2",
},
},
},
},
},
},
},
},
},
expectedNodeNames: []string{
"node1",
"node2",
},
},
{
name: "PV node affinity required multiple match expressions with multiple nodes",
pv: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: v1.PersistentVolumeSpec{
NodeAffinity: &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "bar",
Operator: v1.NodeSelectorOpIn,
Values: []string{
"node1",
"node2",
},
},
{
Key: v1.LabelHostname,
Operator: v1.NodeSelectorOpIn,
Values: []string{
"node3",
"node4",
},
},
},
},
},
},
},
},
},
expectedNodeNames: []string{
"node3",
"node4",
},
},
{
name: "PV node affinity required multiple node selectors multiple match expressions with multiple nodes",
pv: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: v1.PersistentVolumeSpec{
NodeAffinity: &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: v1.LabelHostname,
Operator: v1.NodeSelectorOpIn,
Values: []string{
"node1",
"node2",
},
},
{
Key: v1.LabelHostname,
Operator: v1.NodeSelectorOpIn,
Values: []string{
"node2",
"node3",
},
},
},
},
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: v1.LabelHostname,
Operator: v1.NodeSelectorOpIn,
Values: []string{
"node1",
},
},
},
},
},
},
},
},
},
expectedNodeNames: []string{
"node1",
"node2",
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
nodeNames := GetLocalPersistentVolumeNodeNames(test.pv)
if diff := cmp.Diff(test.expectedNodeNames, nodeNames); diff != "" {
t.Errorf("Unexpected nodeNames (-want, +got):\n%s", diff)
}
})
}
}