mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Add unit tests for CSI predicate
This commit is contained in:
parent
00b0ab86af
commit
6abc04d059
@ -67,12 +67,15 @@ go_test(
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||
"//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -19,58 +19,34 @@ package predicates
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
const (
|
||||
ebsCSIDriverName = csilibplugins.AWSEBSDriverName
|
||||
gceCSIDriverName = csilibplugins.GCEPDDriverName
|
||||
|
||||
hostpathInTreePluginName = "kubernetes.io/hostpath"
|
||||
)
|
||||
|
||||
func TestCSIVolumeCountPredicate(t *testing.T) {
|
||||
// for pods with CSI pvcs
|
||||
oneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
twoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "cs-ebs-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
runningPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs-3",
|
||||
ClaimName: "csi-ebs.csi.aws.com-3",
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -140,14 +116,95 @@ func TestCSIVolumeCountPredicate(t *testing.T) {
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "cs-gce-1",
|
||||
ClaimName: "csi-pd.csi.storage.gke.io-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-gce-2",
|
||||
ClaimName: "csi-pd.csi.storage.gke.io-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// In-tree volumes
|
||||
inTreeOneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-kubernetes.io/aws-ebs-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
inTreeTwoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-kubernetes.io/aws-ebs-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-kubernetes.io/aws-ebs-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// pods with matching csi driver names
|
||||
csiEBSOneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs.csi.aws.com-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
csiEBSTwoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs.csi.aws.com-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs.csi.aws.com-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
inTreeNonMigratableOneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-kubernetes.io/hostpath-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -156,112 +213,260 @@ func TestCSIVolumeCountPredicate(t *testing.T) {
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
newPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
filterName string
|
||||
maxVols int
|
||||
driverNames []string
|
||||
fits bool
|
||||
test string
|
||||
newPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
filterName string
|
||||
maxVols int
|
||||
driverNames []string
|
||||
fits bool
|
||||
test string
|
||||
migrationEnabled bool
|
||||
limitSource string
|
||||
expectedFailureReason *PredicateFailureError
|
||||
}{
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
existingPods: []*v1.Pod{runningPod, twoVolPod},
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 4,
|
||||
driverNames: []string{"ebs"},
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
fits: true,
|
||||
test: "fits when node capacity >= new pods CSI volume",
|
||||
test: "fits when node volume limit >= new pods CSI volume",
|
||||
limitSource: "node",
|
||||
},
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
existingPods: []*v1.Pod{runningPod, twoVolPod},
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{"ebs"},
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
fits: false,
|
||||
test: "doesn't when node capacity <= pods CSI volume",
|
||||
test: "doesn't when node volume limit <= pods CSI volume",
|
||||
limitSource: "node",
|
||||
},
|
||||
{
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
fits: true,
|
||||
test: "should when driver does not support volume limits",
|
||||
limitSource: "csinode-with-no-limit",
|
||||
},
|
||||
// should count pending PVCs
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
existingPods: []*v1.Pod{pendingVolumePod, twoVolPod},
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{pendingVolumePod, csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{"ebs"},
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
fits: false,
|
||||
test: "count pending PVCs towards capacity <= pods CSI volume",
|
||||
test: "count pending PVCs towards volume limit <= pods CSI volume",
|
||||
limitSource: "node",
|
||||
},
|
||||
// two same pending PVCs should be counted as 1
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
existingPods: []*v1.Pod{pendingVolumePod, unboundPVCPod2, twoVolPod},
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{pendingVolumePod, unboundPVCPod2, csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 3,
|
||||
driverNames: []string{"ebs"},
|
||||
maxVols: 4,
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
fits: true,
|
||||
test: "count multiple pending pvcs towards capacity >= pods CSI volume",
|
||||
test: "count multiple pending pvcs towards volume limit >= pods CSI volume",
|
||||
limitSource: "node",
|
||||
},
|
||||
// should count PVCs with invalid PV name but valid SC
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
existingPods: []*v1.Pod{missingPVPod, twoVolPod},
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{missingPVPod, csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{"ebs"},
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
fits: false,
|
||||
test: "should count PVCs with invalid PV name but valid SC",
|
||||
limitSource: "node",
|
||||
},
|
||||
// don't count a volume which has storageclass missing
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{runningPod, noSCPVCPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{"ebs"},
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
fits: true,
|
||||
test: "don't count pvcs with missing SC towards capacity",
|
||||
test: "don't count pvcs with missing SC towards volume limit",
|
||||
limitSource: "node",
|
||||
},
|
||||
// don't count multiple volume types
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
existingPods: []*v1.Pod{gceTwoVolPod, twoVolPod},
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{gceTwoVolPod, csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{"ebs", "gce"},
|
||||
fits: true,
|
||||
test: "don't count pvcs with different type towards capacity",
|
||||
driverNames: []string{ebsCSIDriverName, gceCSIDriverName},
|
||||
fits: false,
|
||||
test: "count pvcs with the same type towards volume limit",
|
||||
limitSource: "node",
|
||||
},
|
||||
{
|
||||
newPod: gceTwoVolPod,
|
||||
existingPods: []*v1.Pod{twoVolPod, runningPod},
|
||||
existingPods: []*v1.Pod{csiEBSTwoVolPod, runningPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{"ebs", "gce"},
|
||||
driverNames: []string{ebsCSIDriverName, gceCSIDriverName},
|
||||
fits: true,
|
||||
test: "don't count pvcs with different type towards capacity",
|
||||
test: "don't count pvcs with different type towards volume limit",
|
||||
limitSource: "node",
|
||||
},
|
||||
// Tests for in-tree volume migration
|
||||
{
|
||||
newPod: inTreeOneVolPod,
|
||||
existingPods: []*v1.Pod{inTreeTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
fits: false,
|
||||
migrationEnabled: true,
|
||||
limitSource: "csinode",
|
||||
test: "should count in-tree volumes if migration is enabled",
|
||||
},
|
||||
{
|
||||
newPod: pendingVolumePod,
|
||||
existingPods: []*v1.Pod{inTreeTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
fits: false,
|
||||
migrationEnabled: true,
|
||||
limitSource: "csinode",
|
||||
test: "should count unbound in-tree volumes if migration is enabled",
|
||||
},
|
||||
{
|
||||
newPod: inTreeOneVolPod,
|
||||
existingPods: []*v1.Pod{inTreeTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
fits: true,
|
||||
migrationEnabled: false,
|
||||
limitSource: "csinode",
|
||||
test: "should not count in-tree volume if migration is disabled",
|
||||
},
|
||||
{
|
||||
newPod: inTreeOneVolPod,
|
||||
existingPods: []*v1.Pod{inTreeTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
fits: true,
|
||||
migrationEnabled: true,
|
||||
limitSource: "csinode-with-no-limit",
|
||||
test: "should not limit pod if volume used does not report limits",
|
||||
},
|
||||
{
|
||||
newPod: inTreeOneVolPod,
|
||||
existingPods: []*v1.Pod{inTreeTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
fits: true,
|
||||
migrationEnabled: false,
|
||||
limitSource: "csinode-with-no-limit",
|
||||
test: "should not limit in-tree pod if migration is disabled",
|
||||
},
|
||||
{
|
||||
newPod: inTreeNonMigratableOneVolPod,
|
||||
existingPods: []*v1.Pod{csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{hostpathInTreePluginName, ebsCSIDriverName},
|
||||
fits: true,
|
||||
migrationEnabled: true,
|
||||
limitSource: "csinode",
|
||||
test: "should not count non-migratable in-tree volumes",
|
||||
},
|
||||
// mixed volumes
|
||||
{
|
||||
newPod: inTreeOneVolPod,
|
||||
existingPods: []*v1.Pod{csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
fits: false,
|
||||
migrationEnabled: true,
|
||||
limitSource: "csinode",
|
||||
test: "should count in-tree and csi volumes if migration is enabled (when scheduling in-tree volumes)",
|
||||
},
|
||||
{
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{inTreeTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
fits: false,
|
||||
migrationEnabled: true,
|
||||
limitSource: "csinode",
|
||||
test: "should count in-tree and csi volumes if migration is enabled (when scheduling csi volumes)",
|
||||
},
|
||||
{
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{csiEBSTwoVolPod, inTreeTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 3,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
fits: true,
|
||||
migrationEnabled: false,
|
||||
limitSource: "csinode",
|
||||
test: "should not count in-tree and count csi volumes if migration is disabled (when scheduling csi volumes)",
|
||||
},
|
||||
{
|
||||
newPod: inTreeOneVolPod,
|
||||
existingPods: []*v1.Pod{csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
fits: true,
|
||||
migrationEnabled: false,
|
||||
limitSource: "csinode",
|
||||
test: "should not count in-tree and count csi volumes if migration is disabled (when scheduling in-tree volumes)",
|
||||
},
|
||||
}
|
||||
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrMaxVolumeCountExceeded}
|
||||
// running attachable predicate tests with feature gate and limit present on nodes
|
||||
for _, test := range tests {
|
||||
node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.driverNames...)
|
||||
pred := NewCSIMaxVolumeLimitPredicate(getFakeCSIPVInfo(test.filterName, test.driverNames...),
|
||||
getFakeCSIPVCInfo(test.filterName, "csi-sc", test.driverNames...),
|
||||
getFakeCSIStorageClassInfo("csi-sc", test.driverNames[0]))
|
||||
t.Run(test.test, func(t *testing.T) {
|
||||
node := getNodeWithPodAndVolumeLimits(test.limitSource, test.existingPods, int64(test.maxVols), test.driverNames...)
|
||||
if test.migrationEnabled {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigration, true)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigrationAWS, true)()
|
||||
enableMigrationOnNode(node, csilibplugins.AWSEBSInTreePluginName)
|
||||
} else {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigration, false)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigrationAWS, false)()
|
||||
}
|
||||
|
||||
fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), node)
|
||||
if err != nil {
|
||||
t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err)
|
||||
}
|
||||
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
|
||||
t.Errorf("Using allocatable [%s]%s: unexpected failure reasons: %v, want: %v", test.filterName, test.test, reasons, expectedFailureReasons)
|
||||
}
|
||||
if fits != test.fits {
|
||||
t.Errorf("Using allocatable [%s]%s: expected %v, got %v", test.filterName, test.test, test.fits, fits)
|
||||
}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrMaxVolumeCountExceeded}
|
||||
if test.expectedFailureReason != nil {
|
||||
expectedFailureReasons = []PredicateFailureReason{test.expectedFailureReason}
|
||||
}
|
||||
|
||||
pred := NewCSIMaxVolumeLimitPredicate(getFakeCSIPVInfo(test.filterName, test.driverNames...),
|
||||
getFakeCSIPVCInfo(test.filterName, "csi-sc", test.driverNames...),
|
||||
getFakeCSIStorageClassInfo("csi-sc", test.driverNames[0]))
|
||||
|
||||
fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), node)
|
||||
if err != nil {
|
||||
t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err)
|
||||
}
|
||||
if !fits && !reflect.DeepEqual(expectedFailureReasons, reasons) {
|
||||
t.Errorf("Using allocatable [%s]%s: unexpected failure reasons: %v, want: %v", test.filterName, test.test, reasons, expectedFailureReasons)
|
||||
}
|
||||
if fits != test.fits {
|
||||
t.Errorf("Using allocatable [%s]%s: expected %v, got %v", test.filterName, test.test, test.fits, fits)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -281,6 +486,28 @@ func getFakeCSIPVInfo(volumeName string, driverNames ...string) FakePersistentVo
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
switch driver {
|
||||
case csilibplugins.AWSEBSInTreePluginName:
|
||||
pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: volumeHandle,
|
||||
},
|
||||
}
|
||||
case hostpathInTreePluginName:
|
||||
pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: "/tmp",
|
||||
},
|
||||
}
|
||||
default:
|
||||
pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{
|
||||
Driver: driver,
|
||||
VolumeHandle: volumeHandle,
|
||||
},
|
||||
}
|
||||
}
|
||||
pvInfos = append(pvInfos, pv)
|
||||
}
|
||||
|
||||
@ -317,6 +544,22 @@ func getFakeCSIPVCInfo(volumeName, scName string, driverNames ...string) FakePer
|
||||
return pvcInfos
|
||||
}
|
||||
|
||||
func enableMigrationOnNode(nodeInfo *schedulernodeinfo.NodeInfo, pluginName string) {
|
||||
csiNode := nodeInfo.CSINode()
|
||||
nodeInfoAnnotations := csiNode.GetAnnotations()
|
||||
if nodeInfoAnnotations == nil {
|
||||
nodeInfoAnnotations = map[string]string{}
|
||||
}
|
||||
|
||||
newAnnotationSet := sets.NewString()
|
||||
newAnnotationSet.Insert(pluginName)
|
||||
nas := strings.Join(newAnnotationSet.List(), ",")
|
||||
nodeInfoAnnotations[v1.MigratedPluginsAnnotationKey] = nas
|
||||
|
||||
csiNode.Annotations = nodeInfoAnnotations
|
||||
nodeInfo.SetCSINode(csiNode)
|
||||
}
|
||||
|
||||
func getFakeCSIStorageClassInfo(scName, provisionerName string) FakeStorageClassInfo {
|
||||
return FakeStorageClassInfo{
|
||||
{
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/api/storage/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
@ -31,6 +32,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func onePVCPod(filterName string) *v1.Pod {
|
||||
@ -806,7 +808,7 @@ func TestVolumeCountConflicts(t *testing.T) {
|
||||
|
||||
// running attachable predicate tests with feature gate and limit present on nodes
|
||||
for _, test := range tests {
|
||||
node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.filterName)
|
||||
node := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
||||
pred := NewMaxPDVolumeCountPredicate(test.filterName, getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName))
|
||||
fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), node)
|
||||
if err != nil {
|
||||
@ -937,18 +939,63 @@ func TestMaxVolumeFuncM4(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func getNodeWithPodAndVolumeLimits(pods []*v1.Pod, limit int64, driverNames ...string) *schedulernodeinfo.NodeInfo {
|
||||
func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) *schedulernodeinfo.NodeInfo {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(pods...)
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"},
|
||||
Status: v1.NodeStatus{
|
||||
Allocatable: v1.ResourceList{},
|
||||
},
|
||||
addLimitToNode := func() {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"},
|
||||
Status: v1.NodeStatus{
|
||||
Allocatable: v1.ResourceList{},
|
||||
},
|
||||
}
|
||||
for _, driver := range driverNames {
|
||||
node.Status.Allocatable[getVolumeLimitKey(driver)] = *resource.NewQuantity(limit, resource.DecimalSI)
|
||||
}
|
||||
nodeInfo.SetNode(node)
|
||||
}
|
||||
for _, driver := range driverNames {
|
||||
node.Status.Allocatable[getVolumeLimitKey(driver)] = *resource.NewQuantity(limit, resource.DecimalSI)
|
||||
|
||||
createCSINode := func() *v1beta1.CSINode {
|
||||
return &v1beta1.CSINode{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "csi-node-for-max-pd-test-1"},
|
||||
Spec: v1beta1.CSINodeSpec{
|
||||
Drivers: []v1beta1.CSINodeDriver{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
addLimitToCSINode := func(addLimits bool) {
|
||||
csiNode := createCSINode()
|
||||
for _, driver := range driverNames {
|
||||
driver := v1beta1.CSINodeDriver{
|
||||
Name: driver,
|
||||
NodeID: "node-for-max-pd-test-1",
|
||||
}
|
||||
if addLimits {
|
||||
driver.Allocatable = &v1beta1.VolumeNodeResources{
|
||||
Count: utilpointer.Int32Ptr(int32(limit)),
|
||||
}
|
||||
}
|
||||
csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, driver)
|
||||
}
|
||||
|
||||
nodeInfo.SetCSINode(csiNode)
|
||||
}
|
||||
switch limitSource {
|
||||
case "node":
|
||||
addLimitToNode()
|
||||
case "csinode":
|
||||
addLimitToCSINode(true)
|
||||
case "both":
|
||||
addLimitToNode()
|
||||
addLimitToCSINode(true)
|
||||
case "csinode-with-no-limit":
|
||||
addLimitToCSINode(false)
|
||||
case "no-csi-driver":
|
||||
csiNode := createCSINode()
|
||||
nodeInfo.SetCSINode(csiNode)
|
||||
default:
|
||||
return nodeInfo
|
||||
}
|
||||
nodeInfo.SetNode(node)
|
||||
return nodeInfo
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user