Add unit tests for CSI predicate

This commit is contained in:
Hemant Kumar 2019-06-04 14:27:05 -04:00 committed by Fabio Bertinatto
parent 00b0ab86af
commit 6abc04d059
3 changed files with 395 additions and 102 deletions

View File

@ -67,12 +67,15 @@ go_test(
"//pkg/volume/util:go_default_library", "//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
"//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library",
"//vendor/k8s.io/utils/pointer:go_default_library",
], ],
) )

View File

@ -19,58 +19,34 @@ package predicates
import ( import (
"fmt" "fmt"
"reflect" "reflect"
"strings"
"testing" "testing"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing" featuregatetesting "k8s.io/component-base/featuregate/testing"
csilibplugins "k8s.io/csi-translation-lib/plugins"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
const (
ebsCSIDriverName = csilibplugins.AWSEBSDriverName
gceCSIDriverName = csilibplugins.GCEPDDriverName
hostpathInTreePluginName = "kubernetes.io/hostpath"
) )
func TestCSIVolumeCountPredicate(t *testing.T) { func TestCSIVolumeCountPredicate(t *testing.T) {
// for pods with CSI pvcs
oneVolPod := &v1.Pod{
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "csi-ebs-0",
},
},
},
},
},
}
twoVolPod := &v1.Pod{
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "cs-ebs-1",
},
},
},
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "csi-ebs-2",
},
},
},
},
},
}
runningPod := &v1.Pod{ runningPod := &v1.Pod{
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Volumes: []v1.Volume{ Volumes: []v1.Volume{
{ {
VolumeSource: v1.VolumeSource{ VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "csi-ebs-3", ClaimName: "csi-ebs.csi.aws.com-3",
}, },
}, },
}, },
@ -140,14 +116,95 @@ func TestCSIVolumeCountPredicate(t *testing.T) {
{ {
VolumeSource: v1.VolumeSource{ VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "cs-gce-1", ClaimName: "csi-pd.csi.storage.gke.io-1",
}, },
}, },
}, },
{ {
VolumeSource: v1.VolumeSource{ VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "csi-gce-2", ClaimName: "csi-pd.csi.storage.gke.io-2",
},
},
},
},
},
}
// In-tree volumes
inTreeOneVolPod := &v1.Pod{
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "csi-kubernetes.io/aws-ebs-0",
},
},
},
},
},
}
inTreeTwoVolPod := &v1.Pod{
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "csi-kubernetes.io/aws-ebs-1",
},
},
},
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "csi-kubernetes.io/aws-ebs-2",
},
},
},
},
},
}
// pods with matching csi driver names
csiEBSOneVolPod := &v1.Pod{
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "csi-ebs.csi.aws.com-0",
},
},
},
},
},
}
csiEBSTwoVolPod := &v1.Pod{
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "csi-ebs.csi.aws.com-1",
},
},
},
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "csi-ebs.csi.aws.com-2",
},
},
},
},
},
}
inTreeNonMigratableOneVolPod := &v1.Pod{
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "csi-kubernetes.io/hostpath-0",
}, },
}, },
}, },
@ -156,112 +213,260 @@ func TestCSIVolumeCountPredicate(t *testing.T) {
} }
tests := []struct { tests := []struct {
newPod *v1.Pod newPod *v1.Pod
existingPods []*v1.Pod existingPods []*v1.Pod
filterName string filterName string
maxVols int maxVols int
driverNames []string driverNames []string
fits bool fits bool
test string test string
migrationEnabled bool
limitSource string
expectedFailureReason *PredicateFailureError
}{ }{
{ {
newPod: oneVolPod, newPod: csiEBSOneVolPod,
existingPods: []*v1.Pod{runningPod, twoVolPod}, existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod},
filterName: "csi", filterName: "csi",
maxVols: 4, maxVols: 4,
driverNames: []string{"ebs"}, driverNames: []string{ebsCSIDriverName},
fits: true, fits: true,
test: "fits when node capacity >= new pods CSI volume", test: "fits when node volume limit >= new pods CSI volume",
limitSource: "node",
}, },
{ {
newPod: oneVolPod, newPod: csiEBSOneVolPod,
existingPods: []*v1.Pod{runningPod, twoVolPod}, existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod},
filterName: "csi", filterName: "csi",
maxVols: 2, maxVols: 2,
driverNames: []string{"ebs"}, driverNames: []string{ebsCSIDriverName},
fits: false, fits: false,
test: "doesn't when node capacity <= pods CSI volume", test: "doesn't when node volume limit <= pods CSI volume",
limitSource: "node",
},
{
newPod: csiEBSOneVolPod,
existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod},
filterName: "csi",
maxVols: 2,
driverNames: []string{ebsCSIDriverName},
fits: true,
test: "should when driver does not support volume limits",
limitSource: "csinode-with-no-limit",
}, },
// should count pending PVCs // should count pending PVCs
{ {
newPod: oneVolPod, newPod: csiEBSOneVolPod,
existingPods: []*v1.Pod{pendingVolumePod, twoVolPod}, existingPods: []*v1.Pod{pendingVolumePod, csiEBSTwoVolPod},
filterName: "csi", filterName: "csi",
maxVols: 2, maxVols: 2,
driverNames: []string{"ebs"}, driverNames: []string{ebsCSIDriverName},
fits: false, fits: false,
test: "count pending PVCs towards capacity <= pods CSI volume", test: "count pending PVCs towards volume limit <= pods CSI volume",
limitSource: "node",
}, },
// two same pending PVCs should be counted as 1 // two same pending PVCs should be counted as 1
{ {
newPod: oneVolPod, newPod: csiEBSOneVolPod,
existingPods: []*v1.Pod{pendingVolumePod, unboundPVCPod2, twoVolPod}, existingPods: []*v1.Pod{pendingVolumePod, unboundPVCPod2, csiEBSTwoVolPod},
filterName: "csi", filterName: "csi",
maxVols: 3, maxVols: 4,
driverNames: []string{"ebs"}, driverNames: []string{ebsCSIDriverName},
fits: true, fits: true,
test: "count multiple pending pvcs towards capacity >= pods CSI volume", test: "count multiple pending pvcs towards volume limit >= pods CSI volume",
limitSource: "node",
}, },
// should count PVCs with invalid PV name but valid SC // should count PVCs with invalid PV name but valid SC
{ {
newPod: oneVolPod, newPod: csiEBSOneVolPod,
existingPods: []*v1.Pod{missingPVPod, twoVolPod}, existingPods: []*v1.Pod{missingPVPod, csiEBSTwoVolPod},
filterName: "csi", filterName: "csi",
maxVols: 2, maxVols: 2,
driverNames: []string{"ebs"}, driverNames: []string{ebsCSIDriverName},
fits: false, fits: false,
test: "should count PVCs with invalid PV name but valid SC", test: "should count PVCs with invalid PV name but valid SC",
limitSource: "node",
}, },
// don't count a volume which has storageclass missing // don't count a volume which has storageclass missing
{ {
newPod: oneVolPod, newPod: csiEBSOneVolPod,
existingPods: []*v1.Pod{runningPod, noSCPVCPod}, existingPods: []*v1.Pod{runningPod, noSCPVCPod},
filterName: "csi", filterName: "csi",
maxVols: 2, maxVols: 2,
driverNames: []string{"ebs"}, driverNames: []string{ebsCSIDriverName},
fits: true, fits: true,
test: "don't count pvcs with missing SC towards capacity", test: "don't count pvcs with missing SC towards volume limit",
limitSource: "node",
}, },
// don't count multiple volume types // don't count multiple volume types
{ {
newPod: oneVolPod, newPod: csiEBSOneVolPod,
existingPods: []*v1.Pod{gceTwoVolPod, twoVolPod}, existingPods: []*v1.Pod{gceTwoVolPod, csiEBSTwoVolPod},
filterName: "csi", filterName: "csi",
maxVols: 2, maxVols: 2,
driverNames: []string{"ebs", "gce"}, driverNames: []string{ebsCSIDriverName, gceCSIDriverName},
fits: true, fits: false,
test: "don't count pvcs with different type towards capacity", test: "count pvcs with the same type towards volume limit",
limitSource: "node",
}, },
{ {
newPod: gceTwoVolPod, newPod: gceTwoVolPod,
existingPods: []*v1.Pod{twoVolPod, runningPod}, existingPods: []*v1.Pod{csiEBSTwoVolPod, runningPod},
filterName: "csi", filterName: "csi",
maxVols: 2, maxVols: 2,
driverNames: []string{"ebs", "gce"}, driverNames: []string{ebsCSIDriverName, gceCSIDriverName},
fits: true, fits: true,
test: "don't count pvcs with different type towards capacity", test: "don't count pvcs with different type towards volume limit",
limitSource: "node",
},
// Tests for in-tree volume migration
{
newPod: inTreeOneVolPod,
existingPods: []*v1.Pod{inTreeTwoVolPod},
filterName: "csi",
maxVols: 2,
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
fits: false,
migrationEnabled: true,
limitSource: "csinode",
test: "should count in-tree volumes if migration is enabled",
},
{
newPod: pendingVolumePod,
existingPods: []*v1.Pod{inTreeTwoVolPod},
filterName: "csi",
maxVols: 2,
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
fits: false,
migrationEnabled: true,
limitSource: "csinode",
test: "should count unbound in-tree volumes if migration is enabled",
},
{
newPod: inTreeOneVolPod,
existingPods: []*v1.Pod{inTreeTwoVolPod},
filterName: "csi",
maxVols: 2,
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
fits: true,
migrationEnabled: false,
limitSource: "csinode",
test: "should not count in-tree volume if migration is disabled",
},
{
newPod: inTreeOneVolPod,
existingPods: []*v1.Pod{inTreeTwoVolPod},
filterName: "csi",
maxVols: 2,
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
fits: true,
migrationEnabled: true,
limitSource: "csinode-with-no-limit",
test: "should not limit pod if volume used does not report limits",
},
{
newPod: inTreeOneVolPod,
existingPods: []*v1.Pod{inTreeTwoVolPod},
filterName: "csi",
maxVols: 2,
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
fits: true,
migrationEnabled: false,
limitSource: "csinode-with-no-limit",
test: "should not limit in-tree pod if migration is disabled",
},
{
newPod: inTreeNonMigratableOneVolPod,
existingPods: []*v1.Pod{csiEBSTwoVolPod},
filterName: "csi",
maxVols: 2,
driverNames: []string{hostpathInTreePluginName, ebsCSIDriverName},
fits: true,
migrationEnabled: true,
limitSource: "csinode",
test: "should not count non-migratable in-tree volumes",
},
// mixed volumes
{
newPod: inTreeOneVolPod,
existingPods: []*v1.Pod{csiEBSTwoVolPod},
filterName: "csi",
maxVols: 2,
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
fits: false,
migrationEnabled: true,
limitSource: "csinode",
test: "should count in-tree and csi volumes if migration is enabled (when scheduling in-tree volumes)",
},
{
newPod: csiEBSOneVolPod,
existingPods: []*v1.Pod{inTreeTwoVolPod},
filterName: "csi",
maxVols: 2,
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
fits: false,
migrationEnabled: true,
limitSource: "csinode",
test: "should count in-tree and csi volumes if migration is enabled (when scheduling csi volumes)",
},
{
newPod: csiEBSOneVolPod,
existingPods: []*v1.Pod{csiEBSTwoVolPod, inTreeTwoVolPod},
filterName: "csi",
maxVols: 3,
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
fits: true,
migrationEnabled: false,
limitSource: "csinode",
test: "should not count in-tree and count csi volumes if migration is disabled (when scheduling csi volumes)",
},
{
newPod: inTreeOneVolPod,
existingPods: []*v1.Pod{csiEBSTwoVolPod},
filterName: "csi",
maxVols: 2,
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
fits: true,
migrationEnabled: false,
limitSource: "csinode",
test: "should not count in-tree and count csi volumes if migration is disabled (when scheduling in-tree volumes)",
}, },
} }
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
expectedFailureReasons := []PredicateFailureReason{ErrMaxVolumeCountExceeded}
// running attachable predicate tests with feature gate and limit present on nodes // running attachable predicate tests with feature gate and limit present on nodes
for _, test := range tests { for _, test := range tests {
node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.driverNames...) t.Run(test.test, func(t *testing.T) {
pred := NewCSIMaxVolumeLimitPredicate(getFakeCSIPVInfo(test.filterName, test.driverNames...), node := getNodeWithPodAndVolumeLimits(test.limitSource, test.existingPods, int64(test.maxVols), test.driverNames...)
getFakeCSIPVCInfo(test.filterName, "csi-sc", test.driverNames...), if test.migrationEnabled {
getFakeCSIStorageClassInfo("csi-sc", test.driverNames[0])) defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigration, true)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigrationAWS, true)()
enableMigrationOnNode(node, csilibplugins.AWSEBSInTreePluginName)
} else {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigration, false)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigrationAWS, false)()
}
fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), node) expectedFailureReasons := []PredicateFailureReason{ErrMaxVolumeCountExceeded}
if err != nil { if test.expectedFailureReason != nil {
t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err) expectedFailureReasons = []PredicateFailureReason{test.expectedFailureReason}
} }
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("Using allocatable [%s]%s: unexpected failure reasons: %v, want: %v", test.filterName, test.test, reasons, expectedFailureReasons) pred := NewCSIMaxVolumeLimitPredicate(getFakeCSIPVInfo(test.filterName, test.driverNames...),
} getFakeCSIPVCInfo(test.filterName, "csi-sc", test.driverNames...),
if fits != test.fits { getFakeCSIStorageClassInfo("csi-sc", test.driverNames[0]))
t.Errorf("Using allocatable [%s]%s: expected %v, got %v", test.filterName, test.test, test.fits, fits)
} fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), node)
if err != nil {
t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err)
}
if !fits && !reflect.DeepEqual(expectedFailureReasons, reasons) {
t.Errorf("Using allocatable [%s]%s: unexpected failure reasons: %v, want: %v", test.filterName, test.test, reasons, expectedFailureReasons)
}
if fits != test.fits {
t.Errorf("Using allocatable [%s]%s: expected %v, got %v", test.filterName, test.test, test.fits, fits)
}
})
} }
} }
@ -281,6 +486,28 @@ func getFakeCSIPVInfo(volumeName string, driverNames ...string) FakePersistentVo
}, },
}, },
} }
switch driver {
case csilibplugins.AWSEBSInTreePluginName:
pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: volumeHandle,
},
}
case hostpathInTreePluginName:
pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/tmp",
},
}
default:
pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
CSI: &v1.CSIPersistentVolumeSource{
Driver: driver,
VolumeHandle: volumeHandle,
},
}
}
pvInfos = append(pvInfos, pv) pvInfos = append(pvInfos, pv)
} }
@ -317,6 +544,22 @@ func getFakeCSIPVCInfo(volumeName, scName string, driverNames ...string) FakePer
return pvcInfos return pvcInfos
} }
func enableMigrationOnNode(nodeInfo *schedulernodeinfo.NodeInfo, pluginName string) {
csiNode := nodeInfo.CSINode()
nodeInfoAnnotations := csiNode.GetAnnotations()
if nodeInfoAnnotations == nil {
nodeInfoAnnotations = map[string]string{}
}
newAnnotationSet := sets.NewString()
newAnnotationSet.Insert(pluginName)
nas := strings.Join(newAnnotationSet.List(), ",")
nodeInfoAnnotations[v1.MigratedPluginsAnnotationKey] = nas
csiNode.Annotations = nodeInfoAnnotations
nodeInfo.SetCSINode(csiNode)
}
func getFakeCSIStorageClassInfo(scName, provisionerName string) FakeStorageClassInfo { func getFakeCSIStorageClassInfo(scName, provisionerName string) FakeStorageClassInfo {
return FakeStorageClassInfo{ return FakeStorageClassInfo{
{ {

View File

@ -24,6 +24,7 @@ import (
"testing" "testing"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/api/storage/v1beta1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
@ -31,6 +32,7 @@ import (
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
volumeutil "k8s.io/kubernetes/pkg/volume/util" volumeutil "k8s.io/kubernetes/pkg/volume/util"
utilpointer "k8s.io/utils/pointer"
) )
func onePVCPod(filterName string) *v1.Pod { func onePVCPod(filterName string) *v1.Pod {
@ -806,7 +808,7 @@ func TestVolumeCountConflicts(t *testing.T) {
// running attachable predicate tests with feature gate and limit present on nodes // running attachable predicate tests with feature gate and limit present on nodes
for _, test := range tests { for _, test := range tests {
node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.filterName) node := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
pred := NewMaxPDVolumeCountPredicate(test.filterName, getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName)) pred := NewMaxPDVolumeCountPredicate(test.filterName, getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName))
fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), node) fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), node)
if err != nil { if err != nil {
@ -937,18 +939,63 @@ func TestMaxVolumeFuncM4(t *testing.T) {
} }
} }
func getNodeWithPodAndVolumeLimits(pods []*v1.Pod, limit int64, driverNames ...string) *schedulernodeinfo.NodeInfo { func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) *schedulernodeinfo.NodeInfo {
nodeInfo := schedulernodeinfo.NewNodeInfo(pods...) nodeInfo := schedulernodeinfo.NewNodeInfo(pods...)
node := &v1.Node{ addLimitToNode := func() {
ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"}, node := &v1.Node{
Status: v1.NodeStatus{ ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"},
Allocatable: v1.ResourceList{}, Status: v1.NodeStatus{
}, Allocatable: v1.ResourceList{},
},
}
for _, driver := range driverNames {
node.Status.Allocatable[getVolumeLimitKey(driver)] = *resource.NewQuantity(limit, resource.DecimalSI)
}
nodeInfo.SetNode(node)
} }
for _, driver := range driverNames {
node.Status.Allocatable[getVolumeLimitKey(driver)] = *resource.NewQuantity(limit, resource.DecimalSI) createCSINode := func() *v1beta1.CSINode {
return &v1beta1.CSINode{
ObjectMeta: metav1.ObjectMeta{Name: "csi-node-for-max-pd-test-1"},
Spec: v1beta1.CSINodeSpec{
Drivers: []v1beta1.CSINodeDriver{},
},
}
}
addLimitToCSINode := func(addLimits bool) {
csiNode := createCSINode()
for _, driver := range driverNames {
driver := v1beta1.CSINodeDriver{
Name: driver,
NodeID: "node-for-max-pd-test-1",
}
if addLimits {
driver.Allocatable = &v1beta1.VolumeNodeResources{
Count: utilpointer.Int32Ptr(int32(limit)),
}
}
csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, driver)
}
nodeInfo.SetCSINode(csiNode)
}
switch limitSource {
case "node":
addLimitToNode()
case "csinode":
addLimitToCSINode(true)
case "both":
addLimitToNode()
addLimitToCSINode(true)
case "csinode-with-no-limit":
addLimitToCSINode(false)
case "no-csi-driver":
csiNode := createCSINode()
nodeInfo.SetCSINode(csiNode)
default:
return nodeInfo
} }
nodeInfo.SetNode(node)
return nodeInfo return nodeInfo
} }