From 320ac4e27705601591929a28d71d9ed08f20b8e5 Mon Sep 17 00:00:00 2001 From: draveness Date: Fri, 27 Dec 2019 16:53:28 +0800 Subject: [PATCH] feat(scheduling): implement azure, cinder, ebs and gce as filter plugin --- pkg/scheduler/algorithm/predicates/BUILD | 12 - .../algorithm/predicates/predicates.go | 414 ----- .../algorithm/predicates/predicates_test.go | 42 - pkg/scheduler/algorithm/predicates/utils.go | 60 - .../framework/plugins/nodevolumelimits/BUILD | 12 +- .../plugins/nodevolumelimits/azure.go | 62 - .../plugins/nodevolumelimits/azure_test.go | 367 ---- .../plugins/nodevolumelimits/cinder.go | 61 - .../plugins/nodevolumelimits/cinder_test.go | 96 -- .../plugins/nodevolumelimits/csi_test.go | 8 +- .../nodevolumelimits/csinode_helper.go | 32 - .../framework/plugins/nodevolumelimits/ebs.go | 62 - .../plugins/nodevolumelimits/ebs_test.go | 561 ------ .../framework/plugins/nodevolumelimits/gce.go | 62 - .../plugins/nodevolumelimits/gce_test.go | 367 ---- .../plugins/nodevolumelimits/non_csi.go | 520 ++++++ .../plugins/nodevolumelimits/non_csi_test.go} | 1522 ++++++++++------- 17 files changed, 1387 insertions(+), 2873 deletions(-) delete mode 100644 pkg/scheduler/framework/plugins/nodevolumelimits/azure.go delete mode 100644 pkg/scheduler/framework/plugins/nodevolumelimits/azure_test.go delete mode 100644 pkg/scheduler/framework/plugins/nodevolumelimits/cinder.go delete mode 100644 pkg/scheduler/framework/plugins/nodevolumelimits/cinder_test.go delete mode 100644 pkg/scheduler/framework/plugins/nodevolumelimits/csinode_helper.go delete mode 100644 pkg/scheduler/framework/plugins/nodevolumelimits/ebs.go delete mode 100644 pkg/scheduler/framework/plugins/nodevolumelimits/ebs_test.go delete mode 100644 pkg/scheduler/framework/plugins/nodevolumelimits/gce.go delete mode 100644 pkg/scheduler/framework/plugins/nodevolumelimits/gce_test.go create mode 100644 pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go rename pkg/scheduler/{algorithm/predicates/max_attachable_volume_predicate_test.go => framework/plugins/nodevolumelimits/non_csi_test.go} (52%) diff --git a/pkg/scheduler/algorithm/predicates/BUILD b/pkg/scheduler/algorithm/predicates/BUILD index 7311e96a56d..8bc39153544 100644 --- a/pkg/scheduler/algorithm/predicates/BUILD +++ b/pkg/scheduler/algorithm/predicates/BUILD @@ -21,17 +21,11 @@ go_library( "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/util:go_default_library", - "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", - "//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library", - "//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) @@ -39,7 +33,6 @@ go_library( go_test( name = "go_default_test", srcs = [ - "max_attachable_volume_predicate_test.go", "predicates_test.go", "utils_test.go", ], @@ -48,19 +41,14 @@ go_test( "//pkg/apis/core:go_default_library", "//pkg/apis/core/v1/helper:go_default_library", "//pkg/features:go_default_library", - "//pkg/scheduler/listers/fake:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", - "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", - "//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library", - "//vendor/k8s.io/utils/pointer:go_default_library", ], ) diff --git a/pkg/scheduler/algorithm/predicates/predicates.go b/pkg/scheduler/algorithm/predicates/predicates.go index ffcc517d0bc..902f0644f5a 100644 --- a/pkg/scheduler/algorithm/predicates/predicates.go +++ b/pkg/scheduler/algorithm/predicates/predicates.go @@ -18,28 +18,19 @@ package predicates import ( "fmt" - "os" - "regexp" - "strconv" "k8s.io/klog" v1 "k8s.io/api/core/v1" - storage "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" - corelisters "k8s.io/client-go/listers/core/v1" - storagelisters "k8s.io/client-go/listers/storage/v1" - csilibplugins "k8s.io/csi-translation-lib/plugins" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/algorithm" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedutil "k8s.io/kubernetes/pkg/scheduler/util" - volumeutil "k8s.io/kubernetes/pkg/volume/util" ) const ( @@ -92,25 +83,8 @@ const ( // EvenPodsSpreadPred defines the name of predicate EvenPodsSpread. EvenPodsSpreadPred = "EvenPodsSpread" - // DefaultMaxGCEPDVolumes defines the maximum number of PD Volumes for GCE. - // GCE instances can have up to 16 PD volumes attached. - DefaultMaxGCEPDVolumes = 16 - // DefaultMaxAzureDiskVolumes defines the maximum number of PD Volumes for Azure. - // Larger Azure VMs can actually have much more disks attached. - // TODO We should determine the max based on VM size - DefaultMaxAzureDiskVolumes = 16 - // KubeMaxPDVols defines the maximum number of PD Volumes per kubelet. KubeMaxPDVols = "KUBE_MAX_PD_VOLS" - - // EBSVolumeFilterType defines the filter name for EBSVolumeFilter. - EBSVolumeFilterType = "EBS" - // GCEPDVolumeFilterType defines the filter name for GCEPDVolumeFilter. - GCEPDVolumeFilterType = "GCE" - // AzureDiskVolumeFilterType defines the filter name for AzureDiskVolumeFilter. - AzureDiskVolumeFilterType = "AzureDisk" - // CinderVolumeFilterType defines the filter name for CinderVolumeFilter. - CinderVolumeFilterType = "Cinder" ) // IMPORTANT NOTE for predicate developers: @@ -145,394 +119,6 @@ func Ordering() []string { // The failure information is given by the error. type FitPredicate func(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) -// MaxPDVolumeCountChecker contains information to check the max number of volumes for a predicate. -type MaxPDVolumeCountChecker struct { - filter VolumeFilter - volumeLimitKey v1.ResourceName - maxVolumeFunc func(node *v1.Node) int - csiNodeLister storagelisters.CSINodeLister - pvLister corelisters.PersistentVolumeLister - pvcLister corelisters.PersistentVolumeClaimLister - scLister storagelisters.StorageClassLister - - // The string below is generated randomly during the struct's initialization. - // It is used to prefix volumeID generated inside the predicate() method to - // avoid conflicts with any real volume. - randomVolumeIDPrefix string -} - -// VolumeFilter contains information on how to filter PD Volumes when checking PD Volume caps. -type VolumeFilter struct { - // Filter normal volumes - FilterVolume func(vol *v1.Volume) (id string, relevant bool) - FilterPersistentVolume func(pv *v1.PersistentVolume) (id string, relevant bool) - // MatchProvisioner evaluates if the StorageClass provisioner matches the running predicate - MatchProvisioner func(sc *storage.StorageClass) (relevant bool) - // IsMigrated returns a boolean specifying whether the plugin is migrated to a CSI driver - IsMigrated func(csiNode *storage.CSINode) bool -} - -// NewMaxPDVolumeCountPredicate creates a predicate which evaluates whether a pod can fit based on the -// number of volumes which match a filter that it requests, and those that are already present. -// -// DEPRECATED -// All cloudprovider specific predicates defined here are deprecated in favour of CSI volume limit -// predicate - MaxCSIVolumeCountPred. -// -// The predicate looks for both volumes used directly, as well as PVC volumes that are backed by relevant volume -// types, counts the number of unique volumes, and rejects the new pod if it would place the total count over -// the maximum. -func NewMaxPDVolumeCountPredicate(filterName string, csiNodeLister storagelisters.CSINodeLister, scLister storagelisters.StorageClassLister, - pvLister corelisters.PersistentVolumeLister, pvcLister corelisters.PersistentVolumeClaimLister) FitPredicate { - var filter VolumeFilter - var volumeLimitKey v1.ResourceName - - switch filterName { - - case EBSVolumeFilterType: - filter = EBSVolumeFilter - volumeLimitKey = v1.ResourceName(volumeutil.EBSVolumeLimitKey) - case GCEPDVolumeFilterType: - filter = GCEPDVolumeFilter - volumeLimitKey = v1.ResourceName(volumeutil.GCEVolumeLimitKey) - case AzureDiskVolumeFilterType: - filter = AzureDiskVolumeFilter - volumeLimitKey = v1.ResourceName(volumeutil.AzureVolumeLimitKey) - case CinderVolumeFilterType: - filter = CinderVolumeFilter - volumeLimitKey = v1.ResourceName(volumeutil.CinderVolumeLimitKey) - default: - klog.Fatalf("Wrong filterName, Only Support %v %v %v ", EBSVolumeFilterType, - GCEPDVolumeFilterType, AzureDiskVolumeFilterType) - return nil - - } - c := &MaxPDVolumeCountChecker{ - filter: filter, - volumeLimitKey: volumeLimitKey, - maxVolumeFunc: getMaxVolumeFunc(filterName), - csiNodeLister: csiNodeLister, - pvLister: pvLister, - pvcLister: pvcLister, - scLister: scLister, - randomVolumeIDPrefix: rand.String(32), - } - - return c.predicate -} - -func getMaxVolumeFunc(filterName string) func(node *v1.Node) int { - return func(node *v1.Node) int { - maxVolumesFromEnv := getMaxVolLimitFromEnv() - if maxVolumesFromEnv > 0 { - return maxVolumesFromEnv - } - - var nodeInstanceType string - for k, v := range node.ObjectMeta.Labels { - if k == v1.LabelInstanceType || k == v1.LabelInstanceTypeStable { - nodeInstanceType = v - break - } - } - switch filterName { - case EBSVolumeFilterType: - return getMaxEBSVolume(nodeInstanceType) - case GCEPDVolumeFilterType: - return DefaultMaxGCEPDVolumes - case AzureDiskVolumeFilterType: - return DefaultMaxAzureDiskVolumes - case CinderVolumeFilterType: - return volumeutil.DefaultMaxCinderVolumes - default: - return -1 - } - } -} - -func getMaxEBSVolume(nodeInstanceType string) int { - if ok, _ := regexp.MatchString(volumeutil.EBSNitroLimitRegex, nodeInstanceType); ok { - return volumeutil.DefaultMaxEBSNitroVolumeLimit - } - return volumeutil.DefaultMaxEBSVolumes -} - -// getMaxVolLimitFromEnv checks the max PD volumes environment variable, otherwise returning a default value. -func getMaxVolLimitFromEnv() int { - if rawMaxVols := os.Getenv(KubeMaxPDVols); rawMaxVols != "" { - if parsedMaxVols, err := strconv.Atoi(rawMaxVols); err != nil { - klog.Errorf("Unable to parse maximum PD volumes value, using default: %v", err) - } else if parsedMaxVols <= 0 { - klog.Errorf("Maximum PD volumes must be a positive value, using default") - } else { - return parsedMaxVols - } - } - - return -1 -} - -func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace string, filteredVolumes map[string]bool) error { - for i := range volumes { - vol := &volumes[i] - if id, ok := c.filter.FilterVolume(vol); ok { - filteredVolumes[id] = true - } else if vol.PersistentVolumeClaim != nil { - pvcName := vol.PersistentVolumeClaim.ClaimName - if pvcName == "" { - return fmt.Errorf("PersistentVolumeClaim had no name") - } - - // Until we know real ID of the volume use namespace/pvcName as substitute - // with a random prefix (calculated and stored inside 'c' during initialization) - // to avoid conflicts with existing volume IDs. - pvID := fmt.Sprintf("%s-%s/%s", c.randomVolumeIDPrefix, namespace, pvcName) - - pvc, err := c.pvcLister.PersistentVolumeClaims(namespace).Get(pvcName) - if err != nil || pvc == nil { - // If the PVC is invalid, we don't count the volume because - // there's no guarantee that it belongs to the running predicate. - klog.V(4).Infof("Unable to look up PVC info for %s/%s, assuming PVC doesn't match predicate when counting limits: %v", namespace, pvcName, err) - continue - } - - pvName := pvc.Spec.VolumeName - if pvName == "" { - // PVC is not bound. It was either deleted and created again or - // it was forcefully unbound by admin. The pod can still use the - // original PV where it was bound to, so we count the volume if - // it belongs to the running predicate. - if c.matchProvisioner(pvc) { - klog.V(4).Infof("PVC %s/%s is not bound, assuming PVC matches predicate when counting limits", namespace, pvcName) - filteredVolumes[pvID] = true - } - continue - } - - pv, err := c.pvLister.Get(pvName) - if err != nil || pv == nil { - // If the PV is invalid and PVC belongs to the running predicate, - // log the error and count the PV towards the PV limit. - if c.matchProvisioner(pvc) { - klog.V(4).Infof("Unable to look up PV info for %s/%s/%s, assuming PV matches predicate when counting limits: %v", namespace, pvcName, pvName, err) - filteredVolumes[pvID] = true - } - continue - } - - if id, ok := c.filter.FilterPersistentVolume(pv); ok { - filteredVolumes[id] = true - } - } - } - - return nil -} - -// matchProvisioner helps identify if the given PVC belongs to the running predicate. -func (c *MaxPDVolumeCountChecker) matchProvisioner(pvc *v1.PersistentVolumeClaim) bool { - if pvc.Spec.StorageClassName == nil { - return false - } - - storageClass, err := c.scLister.Get(*pvc.Spec.StorageClassName) - if err != nil || storageClass == nil { - return false - } - - return c.filter.MatchProvisioner(storageClass) -} - -func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { - // If a pod doesn't have any volume attached to it, the predicate will always be true. - // Thus we make a fast path for it, to avoid unnecessary computations in this case. - if len(pod.Spec.Volumes) == 0 { - return true, nil, nil - } - - newVolumes := make(map[string]bool) - if err := c.filterVolumes(pod.Spec.Volumes, pod.Namespace, newVolumes); err != nil { - return false, nil, err - } - - // quick return - if len(newVolumes) == 0 { - return true, nil, nil - } - - node := nodeInfo.Node() - if node == nil { - return false, nil, fmt.Errorf("node not found") - } - - var ( - csiNode *storage.CSINode - err error - ) - if c.csiNodeLister != nil { - csiNode, err = c.csiNodeLister.Get(node.Name) - if err != nil { - // we don't fail here because the CSINode object is only necessary - // for determining whether the migration is enabled or not - klog.V(5).Infof("Could not get a CSINode object for the node: %v", err) - } - } - - // if a plugin has been migrated to a CSI driver, defer to the CSI predicate - if c.filter.IsMigrated(csiNode) { - return true, nil, nil - } - - // count unique volumes - existingVolumes := make(map[string]bool) - for _, existingPod := range nodeInfo.Pods() { - if err := c.filterVolumes(existingPod.Spec.Volumes, existingPod.Namespace, existingVolumes); err != nil { - return false, nil, err - } - } - numExistingVolumes := len(existingVolumes) - - // filter out already-mounted volumes - for k := range existingVolumes { - if _, ok := newVolumes[k]; ok { - delete(newVolumes, k) - } - } - - numNewVolumes := len(newVolumes) - maxAttachLimit := c.maxVolumeFunc(node) - - volumeLimits := nodeInfo.VolumeLimits() - if maxAttachLimitFromAllocatable, ok := volumeLimits[c.volumeLimitKey]; ok { - maxAttachLimit = int(maxAttachLimitFromAllocatable) - } - - if numExistingVolumes+numNewVolumes > maxAttachLimit { - // violates MaxEBSVolumeCount or MaxGCEPDVolumeCount - return false, []PredicateFailureReason{ErrMaxVolumeCountExceeded}, nil - } - if nodeInfo != nil && nodeInfo.TransientInfo != nil && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) { - nodeInfo.TransientInfo.TransientLock.Lock() - defer nodeInfo.TransientInfo.TransientLock.Unlock() - nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount = maxAttachLimit - numExistingVolumes - nodeInfo.TransientInfo.TransNodeInfo.RequestedVolumes = numNewVolumes - } - return true, nil, nil -} - -// EBSVolumeFilter is a VolumeFilter for filtering AWS ElasticBlockStore Volumes. -var EBSVolumeFilter = VolumeFilter{ - FilterVolume: func(vol *v1.Volume) (string, bool) { - if vol.AWSElasticBlockStore != nil { - return vol.AWSElasticBlockStore.VolumeID, true - } - return "", false - }, - - FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { - if pv.Spec.AWSElasticBlockStore != nil { - return pv.Spec.AWSElasticBlockStore.VolumeID, true - } - return "", false - }, - - MatchProvisioner: func(sc *storage.StorageClass) (relevant bool) { - if sc.Provisioner == csilibplugins.AWSEBSInTreePluginName { - return true - } - return false - }, - - IsMigrated: func(csiNode *storage.CSINode) bool { - return isCSIMigrationOn(csiNode, csilibplugins.AWSEBSInTreePluginName) - }, -} - -// GCEPDVolumeFilter is a VolumeFilter for filtering GCE PersistentDisk Volumes. -var GCEPDVolumeFilter = VolumeFilter{ - FilterVolume: func(vol *v1.Volume) (string, bool) { - if vol.GCEPersistentDisk != nil { - return vol.GCEPersistentDisk.PDName, true - } - return "", false - }, - - FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { - if pv.Spec.GCEPersistentDisk != nil { - return pv.Spec.GCEPersistentDisk.PDName, true - } - return "", false - }, - - MatchProvisioner: func(sc *storage.StorageClass) (relevant bool) { - if sc.Provisioner == csilibplugins.GCEPDInTreePluginName { - return true - } - return false - }, - - IsMigrated: func(csiNode *storage.CSINode) bool { - return isCSIMigrationOn(csiNode, csilibplugins.GCEPDInTreePluginName) - }, -} - -// AzureDiskVolumeFilter is a VolumeFilter for filtering Azure Disk Volumes. -var AzureDiskVolumeFilter = VolumeFilter{ - FilterVolume: func(vol *v1.Volume) (string, bool) { - if vol.AzureDisk != nil { - return vol.AzureDisk.DiskName, true - } - return "", false - }, - - FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { - if pv.Spec.AzureDisk != nil { - return pv.Spec.AzureDisk.DiskName, true - } - return "", false - }, - - MatchProvisioner: func(sc *storage.StorageClass) (relevant bool) { - if sc.Provisioner == csilibplugins.AzureDiskInTreePluginName { - return true - } - return false - }, - - IsMigrated: func(csiNode *storage.CSINode) bool { - return isCSIMigrationOn(csiNode, csilibplugins.AzureDiskInTreePluginName) - }, -} - -// CinderVolumeFilter is a VolumeFilter for filtering Cinder Volumes. -// It will be deprecated once Openstack cloudprovider has been removed from in-tree. -var CinderVolumeFilter = VolumeFilter{ - FilterVolume: func(vol *v1.Volume) (string, bool) { - if vol.Cinder != nil { - return vol.Cinder.VolumeID, true - } - return "", false - }, - - FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { - if pv.Spec.Cinder != nil { - return pv.Spec.Cinder.VolumeID, true - } - return "", false - }, - - MatchProvisioner: func(sc *storage.StorageClass) (relevant bool) { - if sc.Provisioner == csilibplugins.CinderInTreePluginName { - return true - } - return false - }, - - IsMigrated: func(csiNode *storage.CSINode) bool { - return isCSIMigrationOn(csiNode, csilibplugins.CinderInTreePluginName) - }, -} - // GetResourceRequest returns a *schedulernodeinfo.Resource that covers the largest // width in each resource dimension. Because init-containers run sequentially, we collect // the max in each dimension iteratively. In contrast, we sum the resource vectors for diff --git a/pkg/scheduler/algorithm/predicates/predicates_test.go b/pkg/scheduler/algorithm/predicates/predicates_test.go index 0c0c8917818..1b4185d6326 100644 --- a/pkg/scheduler/algorithm/predicates/predicates_test.go +++ b/pkg/scheduler/algorithm/predicates/predicates_test.go @@ -17,7 +17,6 @@ limitations under the License. package predicates import ( - "os" "reflect" "strconv" "strings" @@ -1719,44 +1718,3 @@ func TestPodToleratesTaints(t *testing.T) { }) } } - -func TestGetMaxVols(t *testing.T) { - previousValue := os.Getenv(KubeMaxPDVols) - - tests := []struct { - rawMaxVols string - expected int - name string - }{ - { - rawMaxVols: "invalid", - expected: -1, - name: "Unable to parse maximum PD volumes value, using default value", - }, - { - rawMaxVols: "-2", - expected: -1, - name: "Maximum PD volumes must be a positive value, using default value", - }, - { - rawMaxVols: "40", - expected: 40, - name: "Parse maximum PD volumes value from env", - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - os.Setenv(KubeMaxPDVols, test.rawMaxVols) - result := getMaxVolLimitFromEnv() - if result != test.expected { - t.Errorf("expected %v got %v", test.expected, result) - } - }) - } - - os.Unsetenv(KubeMaxPDVols) - if previousValue != "" { - os.Setenv(KubeMaxPDVols, previousValue) - } -} diff --git a/pkg/scheduler/algorithm/predicates/utils.go b/pkg/scheduler/algorithm/predicates/utils.go index d74a4a106d4..7ff192ee388 100644 --- a/pkg/scheduler/algorithm/predicates/utils.go +++ b/pkg/scheduler/algorithm/predicates/utils.go @@ -17,15 +17,8 @@ limitations under the License. package predicates import ( - "strings" - v1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/sets" - utilfeature "k8s.io/apiserver/pkg/util/feature" - csilibplugins "k8s.io/csi-translation-lib/plugins" - "k8s.io/kubernetes/pkg/features" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) @@ -94,56 +87,3 @@ func SetPredicatesOrderingDuringTest(value []string) func() { predicatesOrdering = origVal } } - -// isCSIMigrationOn returns a boolean value indicating whether -// the CSI migration has been enabled for a particular storage plugin. -func isCSIMigrationOn(csiNode *storagev1.CSINode, pluginName string) bool { - if csiNode == nil || len(pluginName) == 0 { - return false - } - - // In-tree storage to CSI driver migration feature should be enabled, - // along with the plugin-specific one - if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) { - return false - } - - switch pluginName { - case csilibplugins.AWSEBSInTreePluginName: - if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAWS) { - return false - } - case csilibplugins.GCEPDInTreePluginName: - if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationGCE) { - return false - } - case csilibplugins.AzureDiskInTreePluginName: - if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAzureDisk) { - return false - } - case csilibplugins.CinderInTreePluginName: - if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationOpenStack) { - return false - } - default: - return false - } - - // The plugin name should be listed in the CSINode object annotation. - // This indicates that the plugin has been migrated to a CSI driver in the node. - csiNodeAnn := csiNode.GetAnnotations() - if csiNodeAnn == nil { - return false - } - - var mpaSet sets.String - mpa := csiNodeAnn[v1.MigratedPluginsAnnotationKey] - if len(mpa) == 0 { - mpaSet = sets.NewString() - } else { - tok := strings.Split(mpa, ",") - mpaSet = sets.NewString(tok...) - } - - return mpaSet.Has(pluginName) -} diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/BUILD b/pkg/scheduler/framework/plugins/nodevolumelimits/BUILD index 9cfa99ef33c..442032a6b80 100644 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/BUILD +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/BUILD @@ -3,12 +3,8 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = [ - "azure.go", - "cinder.go", "csi.go", - "csinode_helper.go", - "ebs.go", - "gce.go", + "non_csi.go", "utils.go", ], importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits", @@ -17,7 +13,6 @@ go_library( "//pkg/apis/core/v1/helper:go_default_library", "//pkg/features:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", - "//pkg/scheduler/framework/plugins/migration:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/volume/util:go_default_library", @@ -39,11 +34,8 @@ go_library( go_test( name = "go_default_test", srcs = [ - "azure_test.go", - "cinder_test.go", "csi_test.go", - "ebs_test.go", - "gce_test.go", + "non_csi_test.go", ], embed = [":go_default_library"], deps = [ diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/azure.go b/pkg/scheduler/framework/plugins/nodevolumelimits/azure.go deleted file mode 100644 index 2417bc7e0b1..00000000000 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/azure.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodevolumelimits - -import ( - "context" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" - framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - "k8s.io/kubernetes/pkg/scheduler/nodeinfo" -) - -// AzureDiskLimits is a plugin that checks node volume limits. -type AzureDiskLimits struct { - predicate predicates.FitPredicate -} - -var _ framework.FilterPlugin = &AzureDiskLimits{} - -// AzureDiskName is the name of the plugin used in the plugin registry and configurations. -const AzureDiskName = "AzureDiskLimits" - -// Name returns name of the plugin. It is used in logs, etc. -func (pl *AzureDiskLimits) Name() string { - return AzureDiskName -} - -// Filter invoked at the filter extension point. -func (pl *AzureDiskLimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { - // metadata is not needed - _, reasons, err := pl.predicate(pod, nil, nodeInfo) - return migration.PredicateResultToFrameworkStatus(reasons, err) -} - -// NewAzureDisk returns function that initializes a new plugin and returns it. -func NewAzureDisk(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) { - informerFactory := handle.SharedInformerFactory() - pvLister := informerFactory.Core().V1().PersistentVolumes().Lister() - pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister() - scLister := informerFactory.Storage().V1().StorageClasses().Lister() - - return &AzureDiskLimits{ - predicate: predicates.NewMaxPDVolumeCountPredicate(predicates.AzureDiskVolumeFilterType, getCSINodeListerIfEnabled(informerFactory), scLister, pvLister, pvcLister), - }, nil -} diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/azure_test.go b/pkg/scheduler/framework/plugins/nodevolumelimits/azure_test.go deleted file mode 100644 index 575b9d07f35..00000000000 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/azure_test.go +++ /dev/null @@ -1,367 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodevolumelimits - -import ( - "context" - "reflect" - "testing" - - v1 "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" - framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" -) - -func TestAzureDiskLimits(t *testing.T) { - oneVolPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"}, - }, - }, - }, - }, - } - twoVolPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp1"}, - }, - }, - { - VolumeSource: v1.VolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp2"}, - }, - }, - }, - }, - } - splitVolsPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{}, - }, - }, - { - VolumeSource: v1.VolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "svp"}, - }, - }, - }, - }, - } - nonApplicablePod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{}, - }, - }, - }, - }, - } - deletedPVCPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "deletedPVC", - }, - }, - }, - }, - }, - } - twoDeletedPVCPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "deletedPVC", - }, - }, - }, - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "anotherDeletedPVC", - }, - }, - }, - }, - }, - } - deletedPVPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "pvcWithDeletedPV", - }, - }, - }, - }, - }, - } - // deletedPVPod2 is a different pod than deletedPVPod but using the same PVC - deletedPVPod2 := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "pvcWithDeletedPV", - }, - }, - }, - }, - }, - } - // anotherDeletedPVPod is a different pod than deletedPVPod and uses another PVC - anotherDeletedPVPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "anotherPVCWithDeletedPV", - }, - }, - }, - }, - }, - } - emptyPod := &v1.Pod{ - Spec: v1.PodSpec{}, - } - unboundPVCPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "unboundPVC", - }, - }, - }, - }, - }, - } - // Different pod than unboundPVCPod, but using the same unbound PVC - unboundPVCPod2 := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "unboundPVC", - }, - }, - }, - }, - }, - } - - // pod with unbound PVC that's different to unboundPVC - anotherUnboundPVCPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "anotherUnboundPVC", - }, - }, - }, - }, - }, - } - - tests := []struct { - newPod *v1.Pod - existingPods []*v1.Pod - filterName string - driverName string - maxVols int - test string - wantStatus *framework.Status - }{ - { - newPod: oneVolPod, - existingPods: []*v1.Pod{twoVolPod, oneVolPod}, - filterName: predicates.AzureDiskVolumeFilterType, - maxVols: 4, - test: "fits when node capacity >= new pod's AzureDisk volumes", - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{oneVolPod}, - filterName: predicates.AzureDiskVolumeFilterType, - maxVols: 2, - test: "fit when node capacity < new pod's AzureDisk volumes", - }, - { - newPod: splitVolsPod, - existingPods: []*v1.Pod{twoVolPod}, - filterName: predicates.AzureDiskVolumeFilterType, - maxVols: 3, - test: "new pod's count ignores non-AzureDisk volumes", - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod}, - filterName: predicates.AzureDiskVolumeFilterType, - maxVols: 3, - test: "existing pods' counts ignore non-AzureDisk volumes", - }, - { - newPod: onePVCPod(predicates.AzureDiskVolumeFilterType), - existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod}, - filterName: predicates.AzureDiskVolumeFilterType, - maxVols: 3, - test: "new pod's count considers PVCs backed by AzureDisk volumes", - }, - { - newPod: splitPVCPod(predicates.AzureDiskVolumeFilterType), - existingPods: []*v1.Pod{splitVolsPod, oneVolPod}, - filterName: predicates.AzureDiskVolumeFilterType, - maxVols: 3, - test: "new pod's count ignores PVCs not backed by AzureDisk volumes", - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{oneVolPod, onePVCPod(predicates.AzureDiskVolumeFilterType)}, - filterName: predicates.AzureDiskVolumeFilterType, - maxVols: 3, - test: "existing pods' counts considers PVCs backed by AzureDisk volumes", - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{oneVolPod, twoVolPod, onePVCPod(predicates.AzureDiskVolumeFilterType)}, - filterName: predicates.AzureDiskVolumeFilterType, - maxVols: 4, - test: "already-mounted AzureDisk volumes are always ok to allow", - }, - { - newPod: splitVolsPod, - existingPods: []*v1.Pod{oneVolPod, oneVolPod, onePVCPod(predicates.AzureDiskVolumeFilterType)}, - filterName: predicates.AzureDiskVolumeFilterType, - maxVols: 3, - test: "the same AzureDisk volumes are not counted multiple times", - }, - { - newPod: onePVCPod(predicates.AzureDiskVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVCPod}, - filterName: predicates.AzureDiskVolumeFilterType, - maxVols: 2, - test: "pod with missing PVC is counted towards the PV limit", - }, - { - newPod: onePVCPod(predicates.AzureDiskVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVCPod}, - filterName: predicates.AzureDiskVolumeFilterType, - maxVols: 3, - test: "pod with missing PVC is counted towards the PV limit", - }, - { - newPod: onePVCPod(predicates.AzureDiskVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, twoDeletedPVCPod}, - filterName: predicates.AzureDiskVolumeFilterType, - maxVols: 3, - test: "pod with missing two PVCs is counted towards the PV limit twice", - }, - { - newPod: onePVCPod(predicates.AzureDiskVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: predicates.AzureDiskVolumeFilterType, - maxVols: 2, - test: "pod with missing PV is counted towards the PV limit", - }, - { - newPod: onePVCPod(predicates.AzureDiskVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: predicates.AzureDiskVolumeFilterType, - maxVols: 3, - test: "pod with missing PV is counted towards the PV limit", - }, - { - newPod: deletedPVPod2, - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: predicates.AzureDiskVolumeFilterType, - maxVols: 2, - test: "two pods missing the same PV are counted towards the PV limit only once", - }, - { - newPod: anotherDeletedPVPod, - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: predicates.AzureDiskVolumeFilterType, - maxVols: 2, - test: "two pods missing different PVs are counted towards the PV limit twice", - }, - { - newPod: onePVCPod(predicates.AzureDiskVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: predicates.AzureDiskVolumeFilterType, - maxVols: 2, - test: "pod with unbound PVC is counted towards the PV limit", - }, - { - newPod: onePVCPod(predicates.AzureDiskVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: predicates.AzureDiskVolumeFilterType, - maxVols: 3, - test: "pod with unbound PVC is counted towards the PV limit", - }, - { - newPod: unboundPVCPod2, - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: predicates.AzureDiskVolumeFilterType, - maxVols: 2, - test: "the same unbound PVC in multiple pods is counted towards the PV limit only once", - }, - { - newPod: anotherUnboundPVCPod, - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: predicates.AzureDiskVolumeFilterType, - maxVols: 2, - test: "two different unbound PVCs are counted towards the PV limit as two volumes", - }, - } - - for _, test := range tests { - t.Run(test.test, func(t *testing.T) { - node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName) - p := &AzureDiskLimits{ - predicate: predicates.NewMaxPDVolumeCountPredicate(test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName)), - } - gotStatus := p.Filter(context.Background(), nil, test.newPod, node) - if !reflect.DeepEqual(gotStatus, test.wantStatus) { - t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) - } - }) - } -} diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/cinder.go b/pkg/scheduler/framework/plugins/nodevolumelimits/cinder.go deleted file mode 100644 index 6b584ad8046..00000000000 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/cinder.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodevolumelimits - -import ( - "context" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" - framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - "k8s.io/kubernetes/pkg/scheduler/nodeinfo" -) - -// CinderLimits is a plugin that checks node volume limits. -type CinderLimits struct { - predicate predicates.FitPredicate -} - -var _ framework.FilterPlugin = &CinderLimits{} - -// CinderName is the name of the plugin used in the plugin registry and configurations. -const CinderName = "CinderLimits" - -// Name returns name of the plugin. It is used in logs, etc. -func (pl *CinderLimits) Name() string { - return CinderName -} - -// Filter invoked at the filter extension point. -func (pl *CinderLimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { - // metadata is not needed - _, reasons, err := pl.predicate(pod, nil, nodeInfo) - return migration.PredicateResultToFrameworkStatus(reasons, err) -} - -// NewCinder returns function that initializes a new plugin and returns it. -func NewCinder(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) { - informerFactory := handle.SharedInformerFactory() - pvLister := informerFactory.Core().V1().PersistentVolumes().Lister() - pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister() - scLister := informerFactory.Storage().V1().StorageClasses().Lister() - return &CinderLimits{ - predicate: predicates.NewMaxPDVolumeCountPredicate(predicates.CinderVolumeFilterType, getCSINodeListerIfEnabled(informerFactory), scLister, pvLister, pvcLister), - }, nil -} diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/cinder_test.go b/pkg/scheduler/framework/plugins/nodevolumelimits/cinder_test.go deleted file mode 100644 index 20ebb14de82..00000000000 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/cinder_test.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodevolumelimits - -import ( - "context" - "reflect" - "testing" - - v1 "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" - framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" -) - -func TestCinderLimits(t *testing.T) { - twoVolCinderPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - Cinder: &v1.CinderVolumeSource{VolumeID: "tvp1"}, - }, - }, - { - VolumeSource: v1.VolumeSource{ - Cinder: &v1.CinderVolumeSource{VolumeID: "tvp2"}, - }, - }, - }, - }, - } - oneVolCinderPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - Cinder: &v1.CinderVolumeSource{VolumeID: "ovp"}, - }, - }, - }, - }, - } - - tests := []struct { - newPod *v1.Pod - existingPods []*v1.Pod - filterName string - driverName string - maxVols int - test string - wantStatus *framework.Status - }{ - { - newPod: oneVolCinderPod, - existingPods: []*v1.Pod{twoVolCinderPod}, - filterName: predicates.CinderVolumeFilterType, - maxVols: 4, - test: "fits when node capacity >= new pod's Cinder volumes", - }, - { - newPod: oneVolCinderPod, - existingPods: []*v1.Pod{twoVolCinderPod}, - filterName: predicates.CinderVolumeFilterType, - maxVols: 2, - test: "not fit when node capacity < new pod's Cinder volumes", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), - }, - } - - for _, test := range tests { - t.Run(test.test, func(t *testing.T) { - node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName) - p := &CinderLimits{ - predicate: predicates.NewMaxPDVolumeCountPredicate(test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName)), - } - gotStatus := p.Filter(context.Background(), nil, test.newPod, node) - if !reflect.DeepEqual(gotStatus, test.wantStatus) { - t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) - } - }) - } -} diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go b/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go index 438893d21ce..5aa4f18487a 100644 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go @@ -52,13 +52,13 @@ const ( // getVolumeLimitKey returns a ResourceName by filter type func getVolumeLimitKey(filterType string) v1.ResourceName { switch filterType { - case predicates.EBSVolumeFilterType: + case ebsVolumeFilterType: return v1.ResourceName(volumeutil.EBSVolumeLimitKey) - case predicates.GCEPDVolumeFilterType: + case gcePDVolumeFilterType: return v1.ResourceName(volumeutil.GCEVolumeLimitKey) - case predicates.AzureDiskVolumeFilterType: + case azureDiskVolumeFilterType: return v1.ResourceName(volumeutil.AzureVolumeLimitKey) - case predicates.CinderVolumeFilterType: + case cinderVolumeFilterType: return v1.ResourceName(volumeutil.CinderVolumeLimitKey) default: return v1.ResourceName(volumeutil.GetCSIAttachLimitKey(filterType)) diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/csinode_helper.go b/pkg/scheduler/framework/plugins/nodevolumelimits/csinode_helper.go deleted file mode 100644 index 196aa3db34d..00000000000 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/csinode_helper.go +++ /dev/null @@ -1,32 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodevolumelimits - -import ( - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/informers" - storagelisters "k8s.io/client-go/listers/storage/v1" - kubefeatures "k8s.io/kubernetes/pkg/features" -) - -// getCSINodeListerIfEnabled returns the CSINode lister or nil if the feature is disabled -func getCSINodeListerIfEnabled(factory informers.SharedInformerFactory) storagelisters.CSINodeLister { - if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CSINodeInfo) { - return nil - } - return factory.Storage().V1().CSINodes().Lister() -} diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/ebs.go b/pkg/scheduler/framework/plugins/nodevolumelimits/ebs.go deleted file mode 100644 index b80f884abeb..00000000000 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/ebs.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodevolumelimits - -import ( - "context" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" - framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - "k8s.io/kubernetes/pkg/scheduler/nodeinfo" -) - -// EBSLimits is a plugin that checks node volume limits. -type EBSLimits struct { - predicate predicates.FitPredicate -} - -var _ framework.FilterPlugin = &EBSLimits{} - -// EBSName is the name of the plugin used in the plugin registry and configurations. -const EBSName = "EBSLimits" - -// Name returns name of the plugin. It is used in logs, etc. -func (pl *EBSLimits) Name() string { - return EBSName -} - -// Filter invoked at the filter extension point. -func (pl *EBSLimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { - // metadata is not needed - _, reasons, err := pl.predicate(pod, nil, nodeInfo) - return migration.PredicateResultToFrameworkStatus(reasons, err) -} - -// NewEBS returns function that initializes a new plugin and returns it. -func NewEBS(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) { - informerFactory := handle.SharedInformerFactory() - pvLister := informerFactory.Core().V1().PersistentVolumes().Lister() - pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister() - scLister := informerFactory.Storage().V1().StorageClasses().Lister() - - return &EBSLimits{ - predicate: predicates.NewMaxPDVolumeCountPredicate(predicates.EBSVolumeFilterType, getCSINodeListerIfEnabled(informerFactory), scLister, pvLister, pvcLister), - }, nil -} diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/ebs_test.go b/pkg/scheduler/framework/plugins/nodevolumelimits/ebs_test.go deleted file mode 100644 index c9aa0d89557..00000000000 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/ebs_test.go +++ /dev/null @@ -1,561 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodevolumelimits - -import ( - "context" - "reflect" - "strings" - "testing" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - csilibplugins "k8s.io/csi-translation-lib/plugins" - "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" - framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake" - utilpointer "k8s.io/utils/pointer" -) - -func onePVCPod(filterName string) *v1.Pod { - return &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "some" + filterName + "Vol", - }, - }, - }, - }, - }, - } -} - -func splitPVCPod(filterName string) *v1.Pod { - return &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "someNon" + filterName + "Vol", - }, - }, - }, - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "some" + filterName + "Vol", - }, - }, - }, - }, - }, - } -} - -func TestEBSLimits(t *testing.T) { - oneVolPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"}, - }, - }, - }, - }, - } - twoVolPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp1"}, - }, - }, - { - VolumeSource: v1.VolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp2"}, - }, - }, - }, - }, - } - unboundPVCwithInvalidSCPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "unboundPVCwithInvalidSCPod", - }, - }, - }, - }, - }, - } - unboundPVCwithDefaultSCPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "unboundPVCwithDefaultSCPod", - }, - }, - }, - }, - }, - } - splitVolsPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{}, - }, - }, - { - VolumeSource: v1.VolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "svp"}, - }, - }, - }, - }, - } - nonApplicablePod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{}, - }, - }, - }, - }, - } - deletedPVCPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "deletedPVC", - }, - }, - }, - }, - }, - } - twoDeletedPVCPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "deletedPVC", - }, - }, - }, - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "anotherDeletedPVC", - }, - }, - }, - }, - }, - } - deletedPVPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "pvcWithDeletedPV", - }, - }, - }, - }, - }, - } - // deletedPVPod2 is a different pod than deletedPVPod but using the same PVC - deletedPVPod2 := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "pvcWithDeletedPV", - }, - }, - }, - }, - }, - } - // anotherDeletedPVPod is a different pod than deletedPVPod and uses another PVC - anotherDeletedPVPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "anotherPVCWithDeletedPV", - }, - }, - }, - }, - }, - } - emptyPod := &v1.Pod{ - Spec: v1.PodSpec{}, - } - unboundPVCPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "unboundPVC", - }, - }, - }, - }, - }, - } - // Different pod than unboundPVCPod, but using the same unbound PVC - unboundPVCPod2 := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "unboundPVC", - }, - }, - }, - }, - }, - } - - // pod with unbound PVC that's different to unboundPVC - anotherUnboundPVCPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "anotherUnboundPVC", - }, - }, - }, - }, - }, - } - - tests := []struct { - newPod *v1.Pod - existingPods []*v1.Pod - filterName string - driverName string - maxVols int - test string - wantStatus *framework.Status - }{ - { - newPod: oneVolPod, - existingPods: []*v1.Pod{twoVolPod, oneVolPod}, - filterName: predicates.EBSVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 4, - test: "fits when node capacity >= new pod's EBS volumes", - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{oneVolPod}, - filterName: predicates.EBSVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 2, - test: "doesn't fit when node capacity < new pod's EBS volumes", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), - }, - { - newPod: splitVolsPod, - existingPods: []*v1.Pod{twoVolPod}, - filterName: predicates.EBSVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 3, - test: "new pod's count ignores non-EBS volumes", - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod}, - filterName: predicates.EBSVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 3, - test: "existing pods' counts ignore non-EBS volumes", - }, - { - newPod: onePVCPod(predicates.EBSVolumeFilterType), - existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod}, - filterName: predicates.EBSVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 3, - test: "new pod's count considers PVCs backed by EBS volumes", - }, - { - newPod: splitPVCPod(predicates.EBSVolumeFilterType), - existingPods: []*v1.Pod{splitVolsPod, oneVolPod}, - filterName: predicates.EBSVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 3, - test: "new pod's count ignores PVCs not backed by EBS volumes", - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{oneVolPod, onePVCPod(predicates.EBSVolumeFilterType)}, - filterName: predicates.EBSVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 3, - test: "existing pods' counts considers PVCs backed by EBS volumes", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{oneVolPod, twoVolPod, onePVCPod(predicates.EBSVolumeFilterType)}, - filterName: predicates.EBSVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 4, - test: "already-mounted EBS volumes are always ok to allow", - }, - { - newPod: splitVolsPod, - existingPods: []*v1.Pod{oneVolPod, oneVolPod, onePVCPod(predicates.EBSVolumeFilterType)}, - filterName: predicates.EBSVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 3, - test: "the same EBS volumes are not counted multiple times", - }, - { - newPod: onePVCPod(predicates.EBSVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVCPod}, - filterName: predicates.EBSVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 1, - test: "missing PVC is not counted towards the PV limit", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), - }, - { - newPod: onePVCPod(predicates.EBSVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVCPod}, - filterName: predicates.EBSVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 2, - test: "missing PVC is not counted towards the PV limit", - }, - { - newPod: onePVCPod(predicates.EBSVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, twoDeletedPVCPod}, - filterName: predicates.EBSVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 2, - test: "two missing PVCs are not counted towards the PV limit twice", - }, - { - newPod: unboundPVCwithInvalidSCPod, - existingPods: []*v1.Pod{oneVolPod}, - filterName: predicates.EBSVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 1, - test: "unbound PVC with invalid SC is not counted towards the PV limit", - }, - { - newPod: unboundPVCwithDefaultSCPod, - existingPods: []*v1.Pod{oneVolPod}, - filterName: predicates.EBSVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 1, - test: "unbound PVC from different provisioner is not counted towards the PV limit", - }, - - { - newPod: onePVCPod(predicates.EBSVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: predicates.EBSVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 2, - test: "pod with missing PV is counted towards the PV limit", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), - }, - { - newPod: onePVCPod(predicates.EBSVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: predicates.EBSVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 3, - test: "pod with missing PV is counted towards the PV limit", - }, - { - newPod: deletedPVPod2, - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: predicates.EBSVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 2, - test: "two pods missing the same PV are counted towards the PV limit only once", - }, - { - newPod: anotherDeletedPVPod, - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: predicates.EBSVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 2, - test: "two pods missing different PVs are counted towards the PV limit twice", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), - }, - { - newPod: onePVCPod(predicates.EBSVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: predicates.EBSVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 2, - test: "pod with unbound PVC is counted towards the PV limit", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), - }, - { - newPod: onePVCPod(predicates.EBSVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: predicates.EBSVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 3, - test: "pod with unbound PVC is counted towards the PV limit", - }, - { - newPod: unboundPVCPod2, - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: predicates.EBSVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 2, - test: "the same unbound PVC in multiple pods is counted towards the PV limit only once", - }, - { - newPod: anotherUnboundPVCPod, - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: predicates.EBSVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 2, - test: "two different unbound PVCs are counted towards the PV limit as two volumes", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), - }, - } - - for _, test := range tests { - t.Run(test.test, func(t *testing.T) { - node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName) - p := &EBSLimits{ - predicate: predicates.NewMaxPDVolumeCountPredicate(test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName)), - } - gotStatus := p.Filter(context.Background(), nil, test.newPod, node) - if !reflect.DeepEqual(gotStatus, test.wantStatus) { - t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) - } - }) - } -} - -func getFakePVCLister(filterName string) fakelisters.PersistentVolumeClaimLister { - return fakelisters.PersistentVolumeClaimLister{ - { - ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"}, - Spec: v1.PersistentVolumeClaimSpec{ - VolumeName: "some" + filterName + "Vol", - StorageClassName: &filterName, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "someNon" + filterName + "Vol"}, - Spec: v1.PersistentVolumeClaimSpec{ - VolumeName: "someNon" + filterName + "Vol", - StorageClassName: &filterName, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "pvcWithDeletedPV"}, - Spec: v1.PersistentVolumeClaimSpec{ - VolumeName: "pvcWithDeletedPV", - StorageClassName: &filterName, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "anotherPVCWithDeletedPV"}, - Spec: v1.PersistentVolumeClaimSpec{ - VolumeName: "anotherPVCWithDeletedPV", - StorageClassName: &filterName, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "unboundPVC"}, - Spec: v1.PersistentVolumeClaimSpec{ - VolumeName: "", - StorageClassName: &filterName, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "anotherUnboundPVC"}, - Spec: v1.PersistentVolumeClaimSpec{ - VolumeName: "", - StorageClassName: &filterName, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "unboundPVCwithDefaultSCPod"}, - Spec: v1.PersistentVolumeClaimSpec{ - VolumeName: "", - StorageClassName: utilpointer.StringPtr("standard-sc"), - }, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "unboundPVCwithInvalidSCPod"}, - Spec: v1.PersistentVolumeClaimSpec{ - VolumeName: "", - StorageClassName: utilpointer.StringPtr("invalid-sc"), - }, - }, - } -} - -func getFakePVLister(filterName string) fakelisters.PersistentVolumeLister { - return fakelisters.PersistentVolumeLister{ - { - ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"}, - Spec: v1.PersistentVolumeSpec{ - PersistentVolumeSource: v1.PersistentVolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: strings.ToLower(filterName) + "Vol"}, - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "someNon" + filterName + "Vol"}, - Spec: v1.PersistentVolumeSpec{ - PersistentVolumeSource: v1.PersistentVolumeSource{}, - }, - }, - } -} diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/gce.go b/pkg/scheduler/framework/plugins/nodevolumelimits/gce.go deleted file mode 100644 index a5730d97784..00000000000 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/gce.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodevolumelimits - -import ( - "context" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" - framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - "k8s.io/kubernetes/pkg/scheduler/nodeinfo" -) - -// GCEPDLimits is a plugin that checks node volume limits. -type GCEPDLimits struct { - predicate predicates.FitPredicate -} - -var _ framework.FilterPlugin = &GCEPDLimits{} - -// GCEPDName is the name of the plugin used in the plugin registry and configurations. -const GCEPDName = "GCEPDLimits" - -// Name returns name of the plugin. It is used in logs, etc. -func (pl *GCEPDLimits) Name() string { - return GCEPDName -} - -// Filter invoked at the filter extension point. -func (pl *GCEPDLimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { - // metadata is not needed - _, reasons, err := pl.predicate(pod, nil, nodeInfo) - return migration.PredicateResultToFrameworkStatus(reasons, err) -} - -// NewGCEPD returns function that initializes a new plugin and returns it. -func NewGCEPD(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) { - informerFactory := handle.SharedInformerFactory() - pvLister := informerFactory.Core().V1().PersistentVolumes().Lister() - pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister() - scLister := informerFactory.Storage().V1().StorageClasses().Lister() - - return &GCEPDLimits{ - predicate: predicates.NewMaxPDVolumeCountPredicate(predicates.GCEPDVolumeFilterType, getCSINodeListerIfEnabled(informerFactory), scLister, pvLister, pvcLister), - }, nil -} diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/gce_test.go b/pkg/scheduler/framework/plugins/nodevolumelimits/gce_test.go deleted file mode 100644 index 957d3cfd485..00000000000 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/gce_test.go +++ /dev/null @@ -1,367 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodevolumelimits - -import ( - "context" - "reflect" - "testing" - - v1 "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" - framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" -) - -func TestGCEPDLimits(t *testing.T) { - oneVolPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"}, - }, - }, - }, - }, - } - twoVolPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp1"}, - }, - }, - { - VolumeSource: v1.VolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp2"}, - }, - }, - }, - }, - } - splitVolsPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{}, - }, - }, - { - VolumeSource: v1.VolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "svp"}, - }, - }, - }, - }, - } - nonApplicablePod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{}, - }, - }, - }, - }, - } - deletedPVCPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "deletedPVC", - }, - }, - }, - }, - }, - } - twoDeletedPVCPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "deletedPVC", - }, - }, - }, - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "anotherDeletedPVC", - }, - }, - }, - }, - }, - } - deletedPVPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "pvcWithDeletedPV", - }, - }, - }, - }, - }, - } - // deletedPVPod2 is a different pod than deletedPVPod but using the same PVC - deletedPVPod2 := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "pvcWithDeletedPV", - }, - }, - }, - }, - }, - } - // anotherDeletedPVPod is a different pod than deletedPVPod and uses another PVC - anotherDeletedPVPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "anotherPVCWithDeletedPV", - }, - }, - }, - }, - }, - } - emptyPod := &v1.Pod{ - Spec: v1.PodSpec{}, - } - unboundPVCPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "unboundPVC", - }, - }, - }, - }, - }, - } - // Different pod than unboundPVCPod, but using the same unbound PVC - unboundPVCPod2 := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "unboundPVC", - }, - }, - }, - }, - }, - } - - // pod with unbound PVC that's different to unboundPVC - anotherUnboundPVCPod := &v1.Pod{ - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - { - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "anotherUnboundPVC", - }, - }, - }, - }, - }, - } - - tests := []struct { - newPod *v1.Pod - existingPods []*v1.Pod - filterName string - driverName string - maxVols int - test string - wantStatus *framework.Status - }{ - { - newPod: oneVolPod, - existingPods: []*v1.Pod{twoVolPod, oneVolPod}, - filterName: predicates.GCEPDVolumeFilterType, - maxVols: 4, - test: "fits when node capacity >= new pod's GCE volumes", - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{oneVolPod}, - filterName: predicates.GCEPDVolumeFilterType, - maxVols: 2, - test: "fit when node capacity < new pod's GCE volumes", - }, - { - newPod: splitVolsPod, - existingPods: []*v1.Pod{twoVolPod}, - filterName: predicates.GCEPDVolumeFilterType, - maxVols: 3, - test: "new pod's count ignores non-GCE volumes", - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod}, - filterName: predicates.GCEPDVolumeFilterType, - maxVols: 3, - test: "existing pods' counts ignore non-GCE volumes", - }, - { - newPod: onePVCPod(predicates.GCEPDVolumeFilterType), - existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod}, - filterName: predicates.GCEPDVolumeFilterType, - maxVols: 3, - test: "new pod's count considers PVCs backed by GCE volumes", - }, - { - newPod: splitPVCPod(predicates.GCEPDVolumeFilterType), - existingPods: []*v1.Pod{splitVolsPod, oneVolPod}, - filterName: predicates.GCEPDVolumeFilterType, - maxVols: 3, - test: "new pod's count ignores PVCs not backed by GCE volumes", - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{oneVolPod, onePVCPod(predicates.GCEPDVolumeFilterType)}, - filterName: predicates.GCEPDVolumeFilterType, - maxVols: 3, - test: "existing pods' counts considers PVCs backed by GCE volumes", - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{oneVolPod, twoVolPod, onePVCPod(predicates.GCEPDVolumeFilterType)}, - filterName: predicates.GCEPDVolumeFilterType, - maxVols: 4, - test: "already-mounted EBS volumes are always ok to allow", - }, - { - newPod: splitVolsPod, - existingPods: []*v1.Pod{oneVolPod, oneVolPod, onePVCPod(predicates.GCEPDVolumeFilterType)}, - filterName: predicates.GCEPDVolumeFilterType, - maxVols: 3, - test: "the same GCE volumes are not counted multiple times", - }, - { - newPod: onePVCPod(predicates.GCEPDVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVCPod}, - filterName: predicates.GCEPDVolumeFilterType, - maxVols: 2, - test: "pod with missing PVC is counted towards the PV limit", - }, - { - newPod: onePVCPod(predicates.GCEPDVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVCPod}, - filterName: predicates.GCEPDVolumeFilterType, - maxVols: 3, - test: "pod with missing PVC is counted towards the PV limit", - }, - { - newPod: onePVCPod(predicates.GCEPDVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, twoDeletedPVCPod}, - filterName: predicates.GCEPDVolumeFilterType, - maxVols: 3, - test: "pod with missing two PVCs is counted towards the PV limit twice", - }, - { - newPod: onePVCPod(predicates.GCEPDVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: predicates.GCEPDVolumeFilterType, - maxVols: 2, - test: "pod with missing PV is counted towards the PV limit", - }, - { - newPod: onePVCPod(predicates.GCEPDVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: predicates.GCEPDVolumeFilterType, - maxVols: 3, - test: "pod with missing PV is counted towards the PV limit", - }, - { - newPod: deletedPVPod2, - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: predicates.GCEPDVolumeFilterType, - maxVols: 2, - test: "two pods missing the same PV are counted towards the PV limit only once", - }, - { - newPod: anotherDeletedPVPod, - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: predicates.GCEPDVolumeFilterType, - maxVols: 2, - test: "two pods missing different PVs are counted towards the PV limit twice", - }, - { - newPod: onePVCPod(predicates.GCEPDVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: predicates.GCEPDVolumeFilterType, - maxVols: 2, - test: "pod with unbound PVC is counted towards the PV limit", - }, - { - newPod: onePVCPod(predicates.GCEPDVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: predicates.GCEPDVolumeFilterType, - maxVols: 3, - test: "pod with unbound PVC is counted towards the PV limit", - }, - { - newPod: unboundPVCPod2, - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: predicates.GCEPDVolumeFilterType, - maxVols: 2, - test: "the same unbound PVC in multiple pods is counted towards the PV limit only once", - }, - { - newPod: anotherUnboundPVCPod, - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: predicates.GCEPDVolumeFilterType, - maxVols: 2, - test: "two different unbound PVCs are counted towards the PV limit as two volumes", - }, - } - - for _, test := range tests { - t.Run(test.test, func(t *testing.T) { - node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName) - p := &GCEPDLimits{ - predicate: predicates.NewMaxPDVolumeCountPredicate(test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName)), - } - gotStatus := p.Filter(context.Background(), nil, test.newPod, node) - if !reflect.DeepEqual(gotStatus, test.wantStatus) { - t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) - } - }) - } -} diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go b/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go new file mode 100644 index 00000000000..c25fc9945ae --- /dev/null +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go @@ -0,0 +1,520 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodevolumelimits + +import ( + "context" + "fmt" + "os" + "regexp" + "strconv" + + v1 "k8s.io/api/core/v1" + storage "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/rand" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/informers" + corelisters "k8s.io/client-go/listers/core/v1" + storagelisters "k8s.io/client-go/listers/storage/v1" + csilibplugins "k8s.io/csi-translation-lib/plugins" + "k8s.io/klog" + "k8s.io/kubernetes/pkg/features" + kubefeatures "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/nodeinfo" + volumeutil "k8s.io/kubernetes/pkg/volume/util" +) + +const ( + // defaultMaxGCEPDVolumes defines the maximum number of PD Volumes for GCE. + // GCE instances can have up to 16 PD volumes attached. + defaultMaxGCEPDVolumes = 16 + // defaultMaxAzureDiskVolumes defines the maximum number of PD Volumes for Azure. + // Larger Azure VMs can actually have much more disks attached. + // TODO We should determine the max based on VM size + defaultMaxAzureDiskVolumes = 16 + + // ebsVolumeFilterType defines the filter name for ebsVolumeFilter. + ebsVolumeFilterType = "EBS" + // gcePDVolumeFilterType defines the filter name for gcePDVolumeFilter. + gcePDVolumeFilterType = "GCE" + // azureDiskVolumeFilterType defines the filter name for azureDiskVolumeFilter. + azureDiskVolumeFilterType = "AzureDisk" + // cinderVolumeFilterType defines the filter name for cinderVolumeFilter. + cinderVolumeFilterType = "Cinder" +) + +// AzureDiskName is the name of the plugin used in the plugin registry and configurations. +const AzureDiskName = "AzureDiskLimits" + +// NewAzureDisk returns function that initializes a new plugin and returns it. +func NewAzureDisk(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) { + informerFactory := handle.SharedInformerFactory() + return newNonCSILimitsWithInformerFactory(azureDiskVolumeFilterType, informerFactory), nil +} + +// CinderName is the name of the plugin used in the plugin registry and configurations. +const CinderName = "CinderLimits" + +// NewCinder returns function that initializes a new plugin and returns it. +func NewCinder(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) { + informerFactory := handle.SharedInformerFactory() + return newNonCSILimitsWithInformerFactory(cinderVolumeFilterType, informerFactory), nil +} + +// EBSName is the name of the plugin used in the plugin registry and configurations. +const EBSName = "EBSLimits" + +// NewEBS returns function that initializes a new plugin and returns it. +func NewEBS(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) { + informerFactory := handle.SharedInformerFactory() + return newNonCSILimitsWithInformerFactory(ebsVolumeFilterType, informerFactory), nil +} + +// GCEPDName is the name of the plugin used in the plugin registry and configurations. +const GCEPDName = "GCEPDLimits" + +// NewGCEPD returns function that initializes a new plugin and returns it. +func NewGCEPD(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) { + informerFactory := handle.SharedInformerFactory() + return newNonCSILimitsWithInformerFactory(gcePDVolumeFilterType, informerFactory), nil +} + +// nonCSILimits contains information to check the max number of volumes for a plugin. +type nonCSILimits struct { + name string + filter VolumeFilter + volumeLimitKey v1.ResourceName + maxVolumeFunc func(node *v1.Node) int + csiNodeLister storagelisters.CSINodeLister + pvLister corelisters.PersistentVolumeLister + pvcLister corelisters.PersistentVolumeClaimLister + scLister storagelisters.StorageClassLister + + // The string below is generated randomly during the struct's initialization. + // It is used to prefix volumeID generated inside the predicate() method to + // avoid conflicts with any real volume. + randomVolumeIDPrefix string +} + +var _ framework.FilterPlugin = &nonCSILimits{} + +// newNonCSILimitsWithInformerFactory returns a plugin with filter name and informer factory. +func newNonCSILimitsWithInformerFactory( + filterName string, + informerFactory informers.SharedInformerFactory, +) framework.Plugin { + pvLister := informerFactory.Core().V1().PersistentVolumes().Lister() + pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister() + scLister := informerFactory.Storage().V1().StorageClasses().Lister() + + return newNonCSILimits(filterName, getCSINodeListerIfEnabled(informerFactory), scLister, pvLister, pvcLister) +} + +// newNonCSILimits creates a plugin which evaluates whether a pod can fit based on the +// number of volumes which match a filter that it requests, and those that are already present. +// +// DEPRECATED +// All cloudprovider specific predicates defined here are deprecated in favour of CSI volume limit +// predicate - MaxCSIVolumeCountPred. +// +// The predicate looks for both volumes used directly, as well as PVC volumes that are backed by relevant volume +// types, counts the number of unique volumes, and rejects the new pod if it would place the total count over +// the maximum. +func newNonCSILimits( + filterName string, + csiNodeLister storagelisters.CSINodeLister, + scLister storagelisters.StorageClassLister, + pvLister corelisters.PersistentVolumeLister, + pvcLister corelisters.PersistentVolumeClaimLister, +) framework.Plugin { + var filter VolumeFilter + var volumeLimitKey v1.ResourceName + var name string + + switch filterName { + case ebsVolumeFilterType: + name = EBSName + filter = ebsVolumeFilter + volumeLimitKey = v1.ResourceName(volumeutil.EBSVolumeLimitKey) + case gcePDVolumeFilterType: + name = GCEPDName + filter = gcePDVolumeFilter + volumeLimitKey = v1.ResourceName(volumeutil.GCEVolumeLimitKey) + case azureDiskVolumeFilterType: + name = AzureDiskName + filter = azureDiskVolumeFilter + volumeLimitKey = v1.ResourceName(volumeutil.AzureVolumeLimitKey) + case cinderVolumeFilterType: + name = CinderName + filter = cinderVolumeFilter + volumeLimitKey = v1.ResourceName(volumeutil.CinderVolumeLimitKey) + default: + klog.Fatalf("Wrong filterName, Only Support %v %v %v %v", ebsVolumeFilterType, + gcePDVolumeFilterType, azureDiskVolumeFilterType, cinderVolumeFilterType) + return nil + } + pl := &nonCSILimits{ + name: name, + filter: filter, + volumeLimitKey: volumeLimitKey, + maxVolumeFunc: getMaxVolumeFunc(filterName), + csiNodeLister: csiNodeLister, + pvLister: pvLister, + pvcLister: pvcLister, + scLister: scLister, + randomVolumeIDPrefix: rand.String(32), + } + + return pl +} + +// Name returns name of the plugin. It is used in logs, etc. +func (pl *nonCSILimits) Name() string { + return pl.name +} + +// Filter invoked at the filter extension point. +func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { + // If a pod doesn't have any volume attached to it, the predicate will always be true. + // Thus we make a fast path for it, to avoid unnecessary computations in this case. + if len(pod.Spec.Volumes) == 0 { + return nil + } + + newVolumes := make(map[string]bool) + if err := pl.filterVolumes(pod.Spec.Volumes, pod.Namespace, newVolumes); err != nil { + return framework.NewStatus(framework.Error, err.Error()) + } + + // quick return + if len(newVolumes) == 0 { + return nil + } + + node := nodeInfo.Node() + if node == nil { + return framework.NewStatus(framework.Error, fmt.Sprintf("node not found")) + } + + var csiNode *storage.CSINode + var err error + if pl.csiNodeLister != nil { + csiNode, err = pl.csiNodeLister.Get(node.Name) + if err != nil { + // we don't fail here because the CSINode object is only necessary + // for determining whether the migration is enabled or not + klog.V(5).Infof("Could not get a CSINode object for the node: %v", err) + } + } + + // if a plugin has been migrated to a CSI driver, defer to the CSI predicate + if pl.filter.IsMigrated(csiNode) { + return nil + } + + // count unique volumes + existingVolumes := make(map[string]bool) + for _, existingPod := range nodeInfo.Pods() { + if err := pl.filterVolumes(existingPod.Spec.Volumes, existingPod.Namespace, existingVolumes); err != nil { + return framework.NewStatus(framework.Error, err.Error()) + } + } + numExistingVolumes := len(existingVolumes) + + // filter out already-mounted volumes + for k := range existingVolumes { + if _, ok := newVolumes[k]; ok { + delete(newVolumes, k) + } + } + + numNewVolumes := len(newVolumes) + maxAttachLimit := pl.maxVolumeFunc(node) + volumeLimits := nodeInfo.VolumeLimits() + if maxAttachLimitFromAllocatable, ok := volumeLimits[pl.volumeLimitKey]; ok { + maxAttachLimit = int(maxAttachLimitFromAllocatable) + } + + if numExistingVolumes+numNewVolumes > maxAttachLimit { + // violates MaxEBSVolumeCount or MaxGCEPDVolumeCount + return framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()) + } + if nodeInfo != nil && nodeInfo.TransientInfo != nil && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) { + nodeInfo.TransientInfo.TransientLock.Lock() + defer nodeInfo.TransientInfo.TransientLock.Unlock() + nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount = maxAttachLimit - numExistingVolumes + nodeInfo.TransientInfo.TransNodeInfo.RequestedVolumes = numNewVolumes + } + return nil +} + +func (pl *nonCSILimits) filterVolumes(volumes []v1.Volume, namespace string, filteredVolumes map[string]bool) error { + for i := range volumes { + vol := &volumes[i] + if id, ok := pl.filter.FilterVolume(vol); ok { + filteredVolumes[id] = true + } else if vol.PersistentVolumeClaim != nil { + pvcName := vol.PersistentVolumeClaim.ClaimName + if pvcName == "" { + return fmt.Errorf("PersistentVolumeClaim had no name") + } + + // Until we know real ID of the volume use namespace/pvcName as substitute + // with a random prefix (calculated and stored inside 'c' during initialization) + // to avoid conflicts with existing volume IDs. + pvID := fmt.Sprintf("%s-%s/%s", pl.randomVolumeIDPrefix, namespace, pvcName) + + pvc, err := pl.pvcLister.PersistentVolumeClaims(namespace).Get(pvcName) + if err != nil || pvc == nil { + // If the PVC is invalid, we don't count the volume because + // there's no guarantee that it belongs to the running predicate. + klog.V(4).Infof("Unable to look up PVC info for %s/%s, assuming PVC doesn't match predicate when counting limits: %v", namespace, pvcName, err) + continue + } + + pvName := pvc.Spec.VolumeName + if pvName == "" { + // PVC is not bound. It was either deleted and created again or + // it was forcefully unbound by admin. The pod can still use the + // original PV where it was bound to, so we count the volume if + // it belongs to the running predicate. + if pl.matchProvisioner(pvc) { + klog.V(4).Infof("PVC %s/%s is not bound, assuming PVC matches predicate when counting limits", namespace, pvcName) + filteredVolumes[pvID] = true + } + continue + } + + pv, err := pl.pvLister.Get(pvName) + if err != nil || pv == nil { + // If the PV is invalid and PVC belongs to the running predicate, + // log the error and count the PV towards the PV limit. + if pl.matchProvisioner(pvc) { + klog.V(4).Infof("Unable to look up PV info for %s/%s/%s, assuming PV matches predicate when counting limits: %v", namespace, pvcName, pvName, err) + filteredVolumes[pvID] = true + } + continue + } + + if id, ok := pl.filter.FilterPersistentVolume(pv); ok { + filteredVolumes[id] = true + } + } + } + + return nil +} + +// matchProvisioner helps identify if the given PVC belongs to the running predicate. +func (pl *nonCSILimits) matchProvisioner(pvc *v1.PersistentVolumeClaim) bool { + if pvc.Spec.StorageClassName == nil { + return false + } + + storageClass, err := pl.scLister.Get(*pvc.Spec.StorageClassName) + if err != nil || storageClass == nil { + return false + } + + return pl.filter.MatchProvisioner(storageClass) +} + +// getMaxVolLimitFromEnv checks the max PD volumes environment variable, otherwise returning a default value. +func getMaxVolLimitFromEnv() int { + if rawMaxVols := os.Getenv(predicates.KubeMaxPDVols); rawMaxVols != "" { + if parsedMaxVols, err := strconv.Atoi(rawMaxVols); err != nil { + klog.Errorf("Unable to parse maximum PD volumes value, using default: %v", err) + } else if parsedMaxVols <= 0 { + klog.Errorf("Maximum PD volumes must be a positive value, using default") + } else { + return parsedMaxVols + } + } + + return -1 +} + +// VolumeFilter contains information on how to filter PD Volumes when checking PD Volume caps. +type VolumeFilter struct { + // Filter normal volumes + FilterVolume func(vol *v1.Volume) (id string, relevant bool) + FilterPersistentVolume func(pv *v1.PersistentVolume) (id string, relevant bool) + // MatchProvisioner evaluates if the StorageClass provisioner matches the running predicate + MatchProvisioner func(sc *storage.StorageClass) (relevant bool) + // IsMigrated returns a boolean specifying whether the plugin is migrated to a CSI driver + IsMigrated func(csiNode *storage.CSINode) bool +} + +// ebsVolumeFilter is a VolumeFilter for filtering AWS ElasticBlockStore Volumes. +var ebsVolumeFilter = VolumeFilter{ + FilterVolume: func(vol *v1.Volume) (string, bool) { + if vol.AWSElasticBlockStore != nil { + return vol.AWSElasticBlockStore.VolumeID, true + } + return "", false + }, + + FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { + if pv.Spec.AWSElasticBlockStore != nil { + return pv.Spec.AWSElasticBlockStore.VolumeID, true + } + return "", false + }, + + MatchProvisioner: func(sc *storage.StorageClass) (relevant bool) { + if sc.Provisioner == csilibplugins.AWSEBSInTreePluginName { + return true + } + return false + }, + + IsMigrated: func(csiNode *storage.CSINode) bool { + return isCSIMigrationOn(csiNode, csilibplugins.AWSEBSInTreePluginName) + }, +} + +// gcePDVolumeFilter is a VolumeFilter for filtering gce PersistentDisk Volumes. +var gcePDVolumeFilter = VolumeFilter{ + FilterVolume: func(vol *v1.Volume) (string, bool) { + if vol.GCEPersistentDisk != nil { + return vol.GCEPersistentDisk.PDName, true + } + return "", false + }, + + FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { + if pv.Spec.GCEPersistentDisk != nil { + return pv.Spec.GCEPersistentDisk.PDName, true + } + return "", false + }, + + MatchProvisioner: func(sc *storage.StorageClass) (relevant bool) { + if sc.Provisioner == csilibplugins.GCEPDInTreePluginName { + return true + } + return false + }, + + IsMigrated: func(csiNode *storage.CSINode) bool { + return isCSIMigrationOn(csiNode, csilibplugins.GCEPDInTreePluginName) + }, +} + +// azureDiskVolumeFilter is a VolumeFilter for filtering azure Disk Volumes. +var azureDiskVolumeFilter = VolumeFilter{ + FilterVolume: func(vol *v1.Volume) (string, bool) { + if vol.AzureDisk != nil { + return vol.AzureDisk.DiskName, true + } + return "", false + }, + + FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { + if pv.Spec.AzureDisk != nil { + return pv.Spec.AzureDisk.DiskName, true + } + return "", false + }, + + MatchProvisioner: func(sc *storage.StorageClass) (relevant bool) { + if sc.Provisioner == csilibplugins.AzureDiskInTreePluginName { + return true + } + return false + }, + + IsMigrated: func(csiNode *storage.CSINode) bool { + return isCSIMigrationOn(csiNode, csilibplugins.AzureDiskInTreePluginName) + }, +} + +// cinderVolumeFilter is a VolumeFilter for filtering cinder Volumes. +// It will be deprecated once Openstack cloudprovider has been removed from in-tree. +var cinderVolumeFilter = VolumeFilter{ + FilterVolume: func(vol *v1.Volume) (string, bool) { + if vol.Cinder != nil { + return vol.Cinder.VolumeID, true + } + return "", false + }, + + FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { + if pv.Spec.Cinder != nil { + return pv.Spec.Cinder.VolumeID, true + } + return "", false + }, + + MatchProvisioner: func(sc *storage.StorageClass) (relevant bool) { + if sc.Provisioner == csilibplugins.CinderInTreePluginName { + return true + } + return false + }, + + IsMigrated: func(csiNode *storage.CSINode) bool { + return isCSIMigrationOn(csiNode, csilibplugins.CinderInTreePluginName) + }, +} + +func getMaxVolumeFunc(filterName string) func(node *v1.Node) int { + return func(node *v1.Node) int { + maxVolumesFromEnv := getMaxVolLimitFromEnv() + if maxVolumesFromEnv > 0 { + return maxVolumesFromEnv + } + + var nodeInstanceType string + for k, v := range node.ObjectMeta.Labels { + if k == v1.LabelInstanceType || k == v1.LabelInstanceTypeStable { + nodeInstanceType = v + break + } + } + switch filterName { + case ebsVolumeFilterType: + return getMaxEBSVolume(nodeInstanceType) + case gcePDVolumeFilterType: + return defaultMaxGCEPDVolumes + case azureDiskVolumeFilterType: + return defaultMaxAzureDiskVolumes + case cinderVolumeFilterType: + return volumeutil.DefaultMaxCinderVolumes + default: + return -1 + } + } +} + +func getMaxEBSVolume(nodeInstanceType string) int { + if ok, _ := regexp.MatchString(volumeutil.EBSNitroLimitRegex, nodeInstanceType); ok { + return volumeutil.DefaultMaxEBSNitroVolumeLimit + } + return volumeutil.DefaultMaxEBSVolumes +} + +// getCSINodeListerIfEnabled returns the CSINode lister or nil if the feature is disabled +func getCSINodeListerIfEnabled(factory informers.SharedInformerFactory) storagelisters.CSINodeLister { + if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CSINodeInfo) { + return nil + } + return factory.Storage().V1().CSINodes().Lister() +} diff --git a/pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go b/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go similarity index 52% rename from pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go rename to pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go index 7e4fec96440..b8d48a06671 100644 --- a/pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,80 +14,429 @@ See the License for the specific language governing permissions and limitations under the License. */ -package predicates +package nodevolumelimits import ( + "context" "os" "reflect" "strings" "testing" v1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" csilibplugins "k8s.io/csi-translation-lib/plugins" + "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake" - schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" - volumeutil "k8s.io/kubernetes/pkg/volume/util" utilpointer "k8s.io/utils/pointer" ) -func getVolumeLimitKey(filterType string) v1.ResourceName { - switch filterType { - case EBSVolumeFilterType: - return v1.ResourceName(volumeutil.EBSVolumeLimitKey) - case GCEPDVolumeFilterType: - return v1.ResourceName(volumeutil.GCEVolumeLimitKey) - case AzureDiskVolumeFilterType: - return v1.ResourceName(volumeutil.AzureVolumeLimitKey) - case CinderVolumeFilterType: - return v1.ResourceName(volumeutil.CinderVolumeLimitKey) - default: - return v1.ResourceName(volumeutil.GetCSIAttachLimitKey(filterType)) +func TestAzureDiskLimits(t *testing.T) { + oneVolPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"}, + }, + }, + }, + }, } -} - -func onePVCPod(filterName string) *v1.Pod { - return &v1.Pod{ + twoVolPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp1"}, + }, + }, + { + VolumeSource: v1.VolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp2"}, + }, + }, + }, + }, + } + splitVolsPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{}, + }, + }, + { + VolumeSource: v1.VolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "svp"}, + }, + }, + }, + }, + } + nonApplicablePod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{}, + }, + }, + }, + }, + } + deletedPVCPod := &v1.Pod{ Spec: v1.PodSpec{ Volumes: []v1.Volume{ { VolumeSource: v1.VolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "some" + filterName + "Vol", + ClaimName: "deletedPVC", }, }, }, }, }, } -} - -func splitPVCPod(filterName string) *v1.Pod { - return &v1.Pod{ + twoDeletedPVCPod := &v1.Pod{ Spec: v1.PodSpec{ Volumes: []v1.Volume{ { VolumeSource: v1.VolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "someNon" + filterName + "Vol", + ClaimName: "deletedPVC", }, }, }, { VolumeSource: v1.VolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "some" + filterName + "Vol", + ClaimName: "anotherDeletedPVC", }, }, }, }, }, } + deletedPVPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvcWithDeletedPV", + }, + }, + }, + }, + }, + } + // deletedPVPod2 is a different pod than deletedPVPod but using the same PVC + deletedPVPod2 := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvcWithDeletedPV", + }, + }, + }, + }, + }, + } + // anotherDeletedPVPod is a different pod than deletedPVPod and uses another PVC + anotherDeletedPVPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "anotherPVCWithDeletedPV", + }, + }, + }, + }, + }, + } + emptyPod := &v1.Pod{ + Spec: v1.PodSpec{}, + } + unboundPVCPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "unboundPVC", + }, + }, + }, + }, + }, + } + // Different pod than unboundPVCPod, but using the same unbound PVC + unboundPVCPod2 := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "unboundPVC", + }, + }, + }, + }, + }, + } + + // pod with unbound PVC that's different to unboundPVC + anotherUnboundPVCPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "anotherUnboundPVC", + }, + }, + }, + }, + }, + } + + tests := []struct { + newPod *v1.Pod + existingPods []*v1.Pod + filterName string + driverName string + maxVols int + test string + wantStatus *framework.Status + }{ + { + newPod: oneVolPod, + existingPods: []*v1.Pod{twoVolPod, oneVolPod}, + filterName: azureDiskVolumeFilterType, + maxVols: 4, + test: "fits when node capacity >= new pod's AzureDisk volumes", + }, + { + newPod: twoVolPod, + existingPods: []*v1.Pod{oneVolPod}, + filterName: azureDiskVolumeFilterType, + maxVols: 2, + test: "fit when node capacity < new pod's AzureDisk volumes", + }, + { + newPod: splitVolsPod, + existingPods: []*v1.Pod{twoVolPod}, + filterName: azureDiskVolumeFilterType, + maxVols: 3, + test: "new pod's count ignores non-AzureDisk volumes", + }, + { + newPod: twoVolPod, + existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod}, + filterName: azureDiskVolumeFilterType, + maxVols: 3, + test: "existing pods' counts ignore non-AzureDisk volumes", + }, + { + newPod: onePVCPod(azureDiskVolumeFilterType), + existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod}, + filterName: azureDiskVolumeFilterType, + maxVols: 3, + test: "new pod's count considers PVCs backed by AzureDisk volumes", + }, + { + newPod: splitPVCPod(azureDiskVolumeFilterType), + existingPods: []*v1.Pod{splitVolsPod, oneVolPod}, + filterName: azureDiskVolumeFilterType, + maxVols: 3, + test: "new pod's count ignores PVCs not backed by AzureDisk volumes", + }, + { + newPod: twoVolPod, + existingPods: []*v1.Pod{oneVolPod, onePVCPod(azureDiskVolumeFilterType)}, + filterName: azureDiskVolumeFilterType, + maxVols: 3, + test: "existing pods' counts considers PVCs backed by AzureDisk volumes", + }, + { + newPod: twoVolPod, + existingPods: []*v1.Pod{oneVolPod, twoVolPod, onePVCPod(azureDiskVolumeFilterType)}, + filterName: azureDiskVolumeFilterType, + maxVols: 4, + test: "already-mounted AzureDisk volumes are always ok to allow", + }, + { + newPod: splitVolsPod, + existingPods: []*v1.Pod{oneVolPod, oneVolPod, onePVCPod(azureDiskVolumeFilterType)}, + filterName: azureDiskVolumeFilterType, + maxVols: 3, + test: "the same AzureDisk volumes are not counted multiple times", + }, + { + newPod: onePVCPod(azureDiskVolumeFilterType), + existingPods: []*v1.Pod{oneVolPod, deletedPVCPod}, + filterName: azureDiskVolumeFilterType, + maxVols: 2, + test: "pod with missing PVC is counted towards the PV limit", + }, + { + newPod: onePVCPod(azureDiskVolumeFilterType), + existingPods: []*v1.Pod{oneVolPod, deletedPVCPod}, + filterName: azureDiskVolumeFilterType, + maxVols: 3, + test: "pod with missing PVC is counted towards the PV limit", + }, + { + newPod: onePVCPod(azureDiskVolumeFilterType), + existingPods: []*v1.Pod{oneVolPod, twoDeletedPVCPod}, + filterName: azureDiskVolumeFilterType, + maxVols: 3, + test: "pod with missing two PVCs is counted towards the PV limit twice", + }, + { + newPod: onePVCPod(azureDiskVolumeFilterType), + existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, + filterName: azureDiskVolumeFilterType, + maxVols: 2, + test: "pod with missing PV is counted towards the PV limit", + }, + { + newPod: onePVCPod(azureDiskVolumeFilterType), + existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, + filterName: azureDiskVolumeFilterType, + maxVols: 3, + test: "pod with missing PV is counted towards the PV limit", + }, + { + newPod: deletedPVPod2, + existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, + filterName: azureDiskVolumeFilterType, + maxVols: 2, + test: "two pods missing the same PV are counted towards the PV limit only once", + }, + { + newPod: anotherDeletedPVPod, + existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, + filterName: azureDiskVolumeFilterType, + maxVols: 2, + test: "two pods missing different PVs are counted towards the PV limit twice", + }, + { + newPod: onePVCPod(azureDiskVolumeFilterType), + existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, + filterName: azureDiskVolumeFilterType, + maxVols: 2, + test: "pod with unbound PVC is counted towards the PV limit", + }, + { + newPod: onePVCPod(azureDiskVolumeFilterType), + existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, + filterName: azureDiskVolumeFilterType, + maxVols: 3, + test: "pod with unbound PVC is counted towards the PV limit", + }, + { + newPod: unboundPVCPod2, + existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, + filterName: azureDiskVolumeFilterType, + maxVols: 2, + test: "the same unbound PVC in multiple pods is counted towards the PV limit only once", + }, + { + newPod: anotherUnboundPVCPod, + existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, + filterName: azureDiskVolumeFilterType, + maxVols: 2, + test: "two different unbound PVCs are counted towards the PV limit as two volumes", + }, + } + + for _, test := range tests { + t.Run(test.test, func(t *testing.T) { + node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName) + p := newNonCSILimits(test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName)).(framework.FilterPlugin) + gotStatus := p.Filter(context.Background(), nil, test.newPod, node) + if !reflect.DeepEqual(gotStatus, test.wantStatus) { + t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) + } + }) + } } -func TestVolumeCountConflicts(t *testing.T) { +func TestCinderLimits(t *testing.T) { + twoVolCinderPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + Cinder: &v1.CinderVolumeSource{VolumeID: "tvp1"}, + }, + }, + { + VolumeSource: v1.VolumeSource{ + Cinder: &v1.CinderVolumeSource{VolumeID: "tvp2"}, + }, + }, + }, + }, + } + oneVolCinderPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + Cinder: &v1.CinderVolumeSource{VolumeID: "ovp"}, + }, + }, + }, + }, + } + + tests := []struct { + newPod *v1.Pod + existingPods []*v1.Pod + filterName string + driverName string + maxVols int + test string + wantStatus *framework.Status + }{ + { + newPod: oneVolCinderPod, + existingPods: []*v1.Pod{twoVolCinderPod}, + filterName: cinderVolumeFilterType, + maxVols: 4, + test: "fits when node capacity >= new pod's Cinder volumes", + }, + { + newPod: oneVolCinderPod, + existingPods: []*v1.Pod{twoVolCinderPod}, + filterName: cinderVolumeFilterType, + maxVols: 2, + test: "not fit when node capacity < new pod's Cinder volumes", + wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + }, + } + + for _, test := range tests { + t.Run(test.test, func(t *testing.T) { + node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName) + p := newNonCSILimits(test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName)).(framework.FilterPlugin) + gotStatus := p.Filter(context.Background(), nil, test.newPod, node) + if !reflect.DeepEqual(gotStatus, test.wantStatus) { + t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) + } + }) + } +} +func TestEBSLimits(t *testing.T) { oneVolPod := &v1.Pod{ Spec: v1.PodSpec{ Volumes: []v1.Volume{ @@ -287,28 +636,383 @@ func TestVolumeCountConflicts(t *testing.T) { }, }, } - twoVolCinderPod := &v1.Pod{ + + tests := []struct { + newPod *v1.Pod + existingPods []*v1.Pod + filterName string + driverName string + maxVols int + test string + wantStatus *framework.Status + }{ + { + newPod: oneVolPod, + existingPods: []*v1.Pod{twoVolPod, oneVolPod}, + filterName: ebsVolumeFilterType, + driverName: csilibplugins.AWSEBSInTreePluginName, + maxVols: 4, + test: "fits when node capacity >= new pod's EBS volumes", + }, + { + newPod: twoVolPod, + existingPods: []*v1.Pod{oneVolPod}, + filterName: ebsVolumeFilterType, + driverName: csilibplugins.AWSEBSInTreePluginName, + maxVols: 2, + test: "doesn't fit when node capacity < new pod's EBS volumes", + wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + }, + { + newPod: splitVolsPod, + existingPods: []*v1.Pod{twoVolPod}, + filterName: ebsVolumeFilterType, + driverName: csilibplugins.AWSEBSInTreePluginName, + maxVols: 3, + test: "new pod's count ignores non-EBS volumes", + }, + { + newPod: twoVolPod, + existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod}, + filterName: ebsVolumeFilterType, + driverName: csilibplugins.AWSEBSInTreePluginName, + maxVols: 3, + test: "existing pods' counts ignore non-EBS volumes", + }, + { + newPod: onePVCPod(ebsVolumeFilterType), + existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod}, + filterName: ebsVolumeFilterType, + driverName: csilibplugins.AWSEBSInTreePluginName, + maxVols: 3, + test: "new pod's count considers PVCs backed by EBS volumes", + }, + { + newPod: splitPVCPod(ebsVolumeFilterType), + existingPods: []*v1.Pod{splitVolsPod, oneVolPod}, + filterName: ebsVolumeFilterType, + driverName: csilibplugins.AWSEBSInTreePluginName, + maxVols: 3, + test: "new pod's count ignores PVCs not backed by EBS volumes", + }, + { + newPod: twoVolPod, + existingPods: []*v1.Pod{oneVolPod, onePVCPod(ebsVolumeFilterType)}, + filterName: ebsVolumeFilterType, + driverName: csilibplugins.AWSEBSInTreePluginName, + maxVols: 3, + test: "existing pods' counts considers PVCs backed by EBS volumes", + wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + }, + { + newPod: twoVolPod, + existingPods: []*v1.Pod{oneVolPod, twoVolPod, onePVCPod(ebsVolumeFilterType)}, + filterName: ebsVolumeFilterType, + driverName: csilibplugins.AWSEBSInTreePluginName, + maxVols: 4, + test: "already-mounted EBS volumes are always ok to allow", + }, + { + newPod: splitVolsPod, + existingPods: []*v1.Pod{oneVolPod, oneVolPod, onePVCPod(ebsVolumeFilterType)}, + filterName: ebsVolumeFilterType, + driverName: csilibplugins.AWSEBSInTreePluginName, + maxVols: 3, + test: "the same EBS volumes are not counted multiple times", + }, + { + newPod: onePVCPod(ebsVolumeFilterType), + existingPods: []*v1.Pod{oneVolPod, deletedPVCPod}, + filterName: ebsVolumeFilterType, + driverName: csilibplugins.AWSEBSInTreePluginName, + maxVols: 1, + test: "missing PVC is not counted towards the PV limit", + wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + }, + { + newPod: onePVCPod(ebsVolumeFilterType), + existingPods: []*v1.Pod{oneVolPod, deletedPVCPod}, + filterName: ebsVolumeFilterType, + driverName: csilibplugins.AWSEBSInTreePluginName, + maxVols: 2, + test: "missing PVC is not counted towards the PV limit", + }, + { + newPod: onePVCPod(ebsVolumeFilterType), + existingPods: []*v1.Pod{oneVolPod, twoDeletedPVCPod}, + filterName: ebsVolumeFilterType, + driverName: csilibplugins.AWSEBSInTreePluginName, + maxVols: 2, + test: "two missing PVCs are not counted towards the PV limit twice", + }, + { + newPod: unboundPVCwithInvalidSCPod, + existingPods: []*v1.Pod{oneVolPod}, + filterName: ebsVolumeFilterType, + driverName: csilibplugins.AWSEBSInTreePluginName, + maxVols: 1, + test: "unbound PVC with invalid SC is not counted towards the PV limit", + }, + { + newPod: unboundPVCwithDefaultSCPod, + existingPods: []*v1.Pod{oneVolPod}, + filterName: ebsVolumeFilterType, + driverName: csilibplugins.AWSEBSInTreePluginName, + maxVols: 1, + test: "unbound PVC from different provisioner is not counted towards the PV limit", + }, + + { + newPod: onePVCPod(ebsVolumeFilterType), + existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, + filterName: ebsVolumeFilterType, + driverName: csilibplugins.AWSEBSInTreePluginName, + maxVols: 2, + test: "pod with missing PV is counted towards the PV limit", + wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + }, + { + newPod: onePVCPod(ebsVolumeFilterType), + existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, + filterName: ebsVolumeFilterType, + driverName: csilibplugins.AWSEBSInTreePluginName, + maxVols: 3, + test: "pod with missing PV is counted towards the PV limit", + }, + { + newPod: deletedPVPod2, + existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, + filterName: ebsVolumeFilterType, + driverName: csilibplugins.AWSEBSInTreePluginName, + maxVols: 2, + test: "two pods missing the same PV are counted towards the PV limit only once", + }, + { + newPod: anotherDeletedPVPod, + existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, + filterName: ebsVolumeFilterType, + driverName: csilibplugins.AWSEBSInTreePluginName, + maxVols: 2, + test: "two pods missing different PVs are counted towards the PV limit twice", + wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + }, + { + newPod: onePVCPod(ebsVolumeFilterType), + existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, + filterName: ebsVolumeFilterType, + driverName: csilibplugins.AWSEBSInTreePluginName, + maxVols: 2, + test: "pod with unbound PVC is counted towards the PV limit", + wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + }, + { + newPod: onePVCPod(ebsVolumeFilterType), + existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, + filterName: ebsVolumeFilterType, + driverName: csilibplugins.AWSEBSInTreePluginName, + maxVols: 3, + test: "pod with unbound PVC is counted towards the PV limit", + }, + { + newPod: unboundPVCPod2, + existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, + filterName: ebsVolumeFilterType, + driverName: csilibplugins.AWSEBSInTreePluginName, + maxVols: 2, + test: "the same unbound PVC in multiple pods is counted towards the PV limit only once", + }, + { + newPod: anotherUnboundPVCPod, + existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, + filterName: ebsVolumeFilterType, + driverName: csilibplugins.AWSEBSInTreePluginName, + maxVols: 2, + test: "two different unbound PVCs are counted towards the PV limit as two volumes", + wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + }, + } + + for _, test := range tests { + t.Run(test.test, func(t *testing.T) { + node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName) + p := newNonCSILimits(test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName)).(framework.FilterPlugin) + gotStatus := p.Filter(context.Background(), nil, test.newPod, node) + if !reflect.DeepEqual(gotStatus, test.wantStatus) { + t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) + } + }) + } +} + +func TestGCEPDLimits(t *testing.T) { + oneVolPod := &v1.Pod{ Spec: v1.PodSpec{ Volumes: []v1.Volume{ { VolumeSource: v1.VolumeSource{ - Cinder: &v1.CinderVolumeSource{VolumeID: "tvp1"}, - }, - }, - { - VolumeSource: v1.VolumeSource{ - Cinder: &v1.CinderVolumeSource{VolumeID: "tvp2"}, + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"}, }, }, }, }, } - oneVolCinderPod := &v1.Pod{ + twoVolPod := &v1.Pod{ Spec: v1.PodSpec{ Volumes: []v1.Volume{ { VolumeSource: v1.VolumeSource{ - Cinder: &v1.CinderVolumeSource{VolumeID: "ovp"}, + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp1"}, + }, + }, + { + VolumeSource: v1.VolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp2"}, + }, + }, + }, + }, + } + splitVolsPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{}, + }, + }, + { + VolumeSource: v1.VolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "svp"}, + }, + }, + }, + }, + } + nonApplicablePod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{}, + }, + }, + }, + }, + } + deletedPVCPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "deletedPVC", + }, + }, + }, + }, + }, + } + twoDeletedPVCPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "deletedPVC", + }, + }, + }, + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "anotherDeletedPVC", + }, + }, + }, + }, + }, + } + deletedPVPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvcWithDeletedPV", + }, + }, + }, + }, + }, + } + // deletedPVPod2 is a different pod than deletedPVPod but using the same PVC + deletedPVPod2 := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvcWithDeletedPV", + }, + }, + }, + }, + }, + } + // anotherDeletedPVPod is a different pod than deletedPVPod and uses another PVC + anotherDeletedPVPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "anotherPVCWithDeletedPV", + }, + }, + }, + }, + }, + } + emptyPod := &v1.Pod{ + Spec: v1.PodSpec{}, + } + unboundPVCPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "unboundPVC", + }, + }, + }, + }, + }, + } + // Different pod than unboundPVCPod, but using the same unbound PVC + unboundPVCPod2 := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "unboundPVC", + }, + }, + }, + }, + }, + } + + // pod with unbound PVC that's different to unboundPVC + anotherUnboundPVCPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "anotherUnboundPVC", + }, }, }, }, @@ -319,594 +1023,203 @@ func TestVolumeCountConflicts(t *testing.T) { newPod *v1.Pod existingPods []*v1.Pod filterName string + driverName string maxVols int - fits bool test string + wantStatus *framework.Status }{ - // filterName:EBSVolumeFilterType { newPod: oneVolPod, existingPods: []*v1.Pod{twoVolPod, oneVolPod}, - filterName: EBSVolumeFilterType, + filterName: gcePDVolumeFilterType, maxVols: 4, - fits: true, - test: "fits when node capacity >= new pod's EBS volumes", - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{oneVolPod}, - filterName: EBSVolumeFilterType, - maxVols: 2, - fits: false, - test: "doesn't fit when node capacity < new pod's EBS volumes", - }, - { - newPod: splitVolsPod, - existingPods: []*v1.Pod{twoVolPod}, - filterName: EBSVolumeFilterType, - maxVols: 3, - fits: true, - test: "new pod's count ignores non-EBS volumes", - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod}, - filterName: EBSVolumeFilterType, - maxVols: 3, - fits: true, - test: "existing pods' counts ignore non-EBS volumes", - }, - { - newPod: onePVCPod(EBSVolumeFilterType), - existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod}, - filterName: EBSVolumeFilterType, - maxVols: 3, - fits: true, - test: "new pod's count considers PVCs backed by EBS volumes", - }, - { - newPod: splitPVCPod(EBSVolumeFilterType), - existingPods: []*v1.Pod{splitVolsPod, oneVolPod}, - filterName: EBSVolumeFilterType, - maxVols: 3, - fits: true, - test: "new pod's count ignores PVCs not backed by EBS volumes", - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{oneVolPod, onePVCPod(EBSVolumeFilterType)}, - filterName: EBSVolumeFilterType, - maxVols: 3, - fits: false, - test: "existing pods' counts considers PVCs backed by EBS volumes", - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{oneVolPod, twoVolPod, onePVCPod(EBSVolumeFilterType)}, - filterName: EBSVolumeFilterType, - maxVols: 4, - fits: true, - test: "already-mounted EBS volumes are always ok to allow", - }, - { - newPod: splitVolsPod, - existingPods: []*v1.Pod{oneVolPod, oneVolPod, onePVCPod(EBSVolumeFilterType)}, - filterName: EBSVolumeFilterType, - maxVols: 3, - fits: true, - test: "the same EBS volumes are not counted multiple times", - }, - { - newPod: onePVCPod(EBSVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVCPod}, - filterName: EBSVolumeFilterType, - maxVols: 1, - fits: false, - test: "missing PVC is not counted towards the PV limit", - }, - { - newPod: onePVCPod(EBSVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVCPod}, - filterName: EBSVolumeFilterType, - maxVols: 2, - fits: true, - test: "missing PVC is not counted towards the PV limit", - }, - { - newPod: onePVCPod(EBSVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, twoDeletedPVCPod}, - filterName: EBSVolumeFilterType, - maxVols: 2, - fits: true, - test: "two missing PVCs are not counted towards the PV limit twice", - }, - { - newPod: unboundPVCwithInvalidSCPod, - existingPods: []*v1.Pod{oneVolPod}, - filterName: EBSVolumeFilterType, - maxVols: 1, - fits: true, - test: "unbound PVC with invalid SC is not counted towards the PV limit", - }, - { - newPod: unboundPVCwithDefaultSCPod, - existingPods: []*v1.Pod{oneVolPod}, - filterName: EBSVolumeFilterType, - maxVols: 1, - fits: true, - test: "unbound PVC from different provisioner is not counted towards the PV limit", - }, - - { - newPod: onePVCPod(EBSVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: EBSVolumeFilterType, - maxVols: 2, - fits: false, - test: "pod with missing PV is counted towards the PV limit", - }, - { - newPod: onePVCPod(EBSVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: EBSVolumeFilterType, - maxVols: 3, - fits: true, - test: "pod with missing PV is counted towards the PV limit", - }, - { - newPod: deletedPVPod2, - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: EBSVolumeFilterType, - maxVols: 2, - fits: true, - test: "two pods missing the same PV are counted towards the PV limit only once", - }, - { - newPod: anotherDeletedPVPod, - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: EBSVolumeFilterType, - maxVols: 2, - fits: false, - test: "two pods missing different PVs are counted towards the PV limit twice", - }, - { - newPod: onePVCPod(EBSVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: EBSVolumeFilterType, - maxVols: 2, - fits: false, - test: "pod with unbound PVC is counted towards the PV limit", - }, - { - newPod: onePVCPod(EBSVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: EBSVolumeFilterType, - maxVols: 3, - fits: true, - test: "pod with unbound PVC is counted towards the PV limit", - }, - { - newPod: unboundPVCPod2, - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: EBSVolumeFilterType, - maxVols: 2, - fits: true, - test: "the same unbound PVC in multiple pods is counted towards the PV limit only once", - }, - { - newPod: anotherUnboundPVCPod, - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: EBSVolumeFilterType, - maxVols: 2, - fits: false, - test: "two different unbound PVCs are counted towards the PV limit as two volumes", - }, - // filterName:GCEPDVolumeFilterType - { - newPod: oneVolPod, - existingPods: []*v1.Pod{twoVolPod, oneVolPod}, - filterName: GCEPDVolumeFilterType, - maxVols: 4, - fits: true, test: "fits when node capacity >= new pod's GCE volumes", }, { newPod: twoVolPod, existingPods: []*v1.Pod{oneVolPod}, - filterName: GCEPDVolumeFilterType, + filterName: gcePDVolumeFilterType, maxVols: 2, - fits: true, test: "fit when node capacity < new pod's GCE volumes", }, { newPod: splitVolsPod, existingPods: []*v1.Pod{twoVolPod}, - filterName: GCEPDVolumeFilterType, + filterName: gcePDVolumeFilterType, maxVols: 3, - fits: true, test: "new pod's count ignores non-GCE volumes", }, { newPod: twoVolPod, existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod}, - filterName: GCEPDVolumeFilterType, + filterName: gcePDVolumeFilterType, maxVols: 3, - fits: true, test: "existing pods' counts ignore non-GCE volumes", }, { - newPod: onePVCPod(GCEPDVolumeFilterType), + newPod: onePVCPod(gcePDVolumeFilterType), existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod}, - filterName: GCEPDVolumeFilterType, + filterName: gcePDVolumeFilterType, maxVols: 3, - fits: true, test: "new pod's count considers PVCs backed by GCE volumes", }, { - newPod: splitPVCPod(GCEPDVolumeFilterType), + newPod: splitPVCPod(gcePDVolumeFilterType), existingPods: []*v1.Pod{splitVolsPod, oneVolPod}, - filterName: GCEPDVolumeFilterType, + filterName: gcePDVolumeFilterType, maxVols: 3, - fits: true, test: "new pod's count ignores PVCs not backed by GCE volumes", }, { newPod: twoVolPod, - existingPods: []*v1.Pod{oneVolPod, onePVCPod(GCEPDVolumeFilterType)}, - filterName: GCEPDVolumeFilterType, + existingPods: []*v1.Pod{oneVolPod, onePVCPod(gcePDVolumeFilterType)}, + filterName: gcePDVolumeFilterType, maxVols: 3, - fits: true, test: "existing pods' counts considers PVCs backed by GCE volumes", }, { newPod: twoVolPod, - existingPods: []*v1.Pod{oneVolPod, twoVolPod, onePVCPod(GCEPDVolumeFilterType)}, - filterName: GCEPDVolumeFilterType, + existingPods: []*v1.Pod{oneVolPod, twoVolPod, onePVCPod(gcePDVolumeFilterType)}, + filterName: gcePDVolumeFilterType, maxVols: 4, - fits: true, test: "already-mounted EBS volumes are always ok to allow", }, { newPod: splitVolsPod, - existingPods: []*v1.Pod{oneVolPod, oneVolPod, onePVCPod(GCEPDVolumeFilterType)}, - filterName: GCEPDVolumeFilterType, + existingPods: []*v1.Pod{oneVolPod, oneVolPod, onePVCPod(gcePDVolumeFilterType)}, + filterName: gcePDVolumeFilterType, maxVols: 3, - fits: true, test: "the same GCE volumes are not counted multiple times", }, { - newPod: onePVCPod(GCEPDVolumeFilterType), + newPod: onePVCPod(gcePDVolumeFilterType), existingPods: []*v1.Pod{oneVolPod, deletedPVCPod}, - filterName: GCEPDVolumeFilterType, + filterName: gcePDVolumeFilterType, maxVols: 2, - fits: true, test: "pod with missing PVC is counted towards the PV limit", }, { - newPod: onePVCPod(GCEPDVolumeFilterType), + newPod: onePVCPod(gcePDVolumeFilterType), existingPods: []*v1.Pod{oneVolPod, deletedPVCPod}, - filterName: GCEPDVolumeFilterType, + filterName: gcePDVolumeFilterType, maxVols: 3, - fits: true, test: "pod with missing PVC is counted towards the PV limit", }, { - newPod: onePVCPod(GCEPDVolumeFilterType), + newPod: onePVCPod(gcePDVolumeFilterType), existingPods: []*v1.Pod{oneVolPod, twoDeletedPVCPod}, - filterName: GCEPDVolumeFilterType, + filterName: gcePDVolumeFilterType, maxVols: 3, - fits: true, test: "pod with missing two PVCs is counted towards the PV limit twice", }, { - newPod: onePVCPod(GCEPDVolumeFilterType), + newPod: onePVCPod(gcePDVolumeFilterType), existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: GCEPDVolumeFilterType, + filterName: gcePDVolumeFilterType, maxVols: 2, - fits: true, test: "pod with missing PV is counted towards the PV limit", }, { - newPod: onePVCPod(GCEPDVolumeFilterType), + newPod: onePVCPod(gcePDVolumeFilterType), existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: GCEPDVolumeFilterType, + filterName: gcePDVolumeFilterType, maxVols: 3, - fits: true, test: "pod with missing PV is counted towards the PV limit", }, { newPod: deletedPVPod2, existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: GCEPDVolumeFilterType, + filterName: gcePDVolumeFilterType, maxVols: 2, - fits: true, test: "two pods missing the same PV are counted towards the PV limit only once", }, { newPod: anotherDeletedPVPod, existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: GCEPDVolumeFilterType, + filterName: gcePDVolumeFilterType, maxVols: 2, - fits: true, test: "two pods missing different PVs are counted towards the PV limit twice", }, { - newPod: onePVCPod(GCEPDVolumeFilterType), + newPod: onePVCPod(gcePDVolumeFilterType), existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: GCEPDVolumeFilterType, + filterName: gcePDVolumeFilterType, maxVols: 2, - fits: true, test: "pod with unbound PVC is counted towards the PV limit", }, { - newPod: onePVCPod(GCEPDVolumeFilterType), + newPod: onePVCPod(gcePDVolumeFilterType), existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: GCEPDVolumeFilterType, + filterName: gcePDVolumeFilterType, maxVols: 3, - fits: true, test: "pod with unbound PVC is counted towards the PV limit", }, { newPod: unboundPVCPod2, existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: GCEPDVolumeFilterType, + filterName: gcePDVolumeFilterType, maxVols: 2, - fits: true, test: "the same unbound PVC in multiple pods is counted towards the PV limit only once", }, { newPod: anotherUnboundPVCPod, existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: GCEPDVolumeFilterType, + filterName: gcePDVolumeFilterType, maxVols: 2, - fits: true, test: "two different unbound PVCs are counted towards the PV limit as two volumes", }, - // filterName:AzureDiskVolumeFilterType - { - newPod: oneVolPod, - existingPods: []*v1.Pod{twoVolPod, oneVolPod}, - filterName: AzureDiskVolumeFilterType, - maxVols: 4, - fits: true, - test: "fits when node capacity >= new pod's AzureDisk volumes", - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{oneVolPod}, - filterName: AzureDiskVolumeFilterType, - maxVols: 2, - fits: true, - test: "fit when node capacity < new pod's AzureDisk volumes", - }, - { - newPod: splitVolsPod, - existingPods: []*v1.Pod{twoVolPod}, - filterName: AzureDiskVolumeFilterType, - maxVols: 3, - fits: true, - test: "new pod's count ignores non-AzureDisk volumes", - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod}, - filterName: AzureDiskVolumeFilterType, - maxVols: 3, - fits: true, - test: "existing pods' counts ignore non-AzureDisk volumes", - }, - { - newPod: onePVCPod(AzureDiskVolumeFilterType), - existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod}, - filterName: AzureDiskVolumeFilterType, - maxVols: 3, - fits: true, - test: "new pod's count considers PVCs backed by AzureDisk volumes", - }, - { - newPod: splitPVCPod(AzureDiskVolumeFilterType), - existingPods: []*v1.Pod{splitVolsPod, oneVolPod}, - filterName: AzureDiskVolumeFilterType, - maxVols: 3, - fits: true, - test: "new pod's count ignores PVCs not backed by AzureDisk volumes", - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{oneVolPod, onePVCPod(AzureDiskVolumeFilterType)}, - filterName: AzureDiskVolumeFilterType, - maxVols: 3, - fits: true, - test: "existing pods' counts considers PVCs backed by AzureDisk volumes", - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{oneVolPod, twoVolPod, onePVCPod(AzureDiskVolumeFilterType)}, - filterName: AzureDiskVolumeFilterType, - maxVols: 4, - fits: true, - test: "already-mounted AzureDisk volumes are always ok to allow", - }, - { - newPod: splitVolsPod, - existingPods: []*v1.Pod{oneVolPod, oneVolPod, onePVCPod(AzureDiskVolumeFilterType)}, - filterName: AzureDiskVolumeFilterType, - maxVols: 3, - fits: true, - test: "the same AzureDisk volumes are not counted multiple times", - }, - { - newPod: onePVCPod(AzureDiskVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVCPod}, - filterName: AzureDiskVolumeFilterType, - maxVols: 2, - fits: true, - test: "pod with missing PVC is counted towards the PV limit", - }, - { - newPod: onePVCPod(AzureDiskVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVCPod}, - filterName: AzureDiskVolumeFilterType, - maxVols: 3, - fits: true, - test: "pod with missing PVC is counted towards the PV limit", - }, - { - newPod: onePVCPod(AzureDiskVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, twoDeletedPVCPod}, - filterName: AzureDiskVolumeFilterType, - maxVols: 3, - fits: true, - test: "pod with missing two PVCs is counted towards the PV limit twice", - }, - { - newPod: onePVCPod(AzureDiskVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: AzureDiskVolumeFilterType, - maxVols: 2, - fits: true, - test: "pod with missing PV is counted towards the PV limit", - }, - { - newPod: onePVCPod(AzureDiskVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: AzureDiskVolumeFilterType, - maxVols: 3, - fits: true, - test: "pod with missing PV is counted towards the PV limit", - }, - { - newPod: deletedPVPod2, - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: AzureDiskVolumeFilterType, - maxVols: 2, - fits: true, - test: "two pods missing the same PV are counted towards the PV limit only once", - }, - { - newPod: anotherDeletedPVPod, - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: AzureDiskVolumeFilterType, - maxVols: 2, - fits: true, - test: "two pods missing different PVs are counted towards the PV limit twice", - }, - { - newPod: onePVCPod(AzureDiskVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: AzureDiskVolumeFilterType, - maxVols: 2, - fits: true, - test: "pod with unbound PVC is counted towards the PV limit", - }, - { - newPod: onePVCPod(AzureDiskVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: AzureDiskVolumeFilterType, - maxVols: 3, - fits: true, - test: "pod with unbound PVC is counted towards the PV limit", - }, - { - newPod: unboundPVCPod2, - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: AzureDiskVolumeFilterType, - maxVols: 2, - fits: true, - test: "the same unbound PVC in multiple pods is counted towards the PV limit only once", - }, - { - newPod: anotherUnboundPVCPod, - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: AzureDiskVolumeFilterType, - maxVols: 2, - fits: true, - test: "two different unbound PVCs are counted towards the PV limit as two volumes", - }, - // filterName:CinderVolumeFilterType - { - newPod: oneVolCinderPod, - existingPods: []*v1.Pod{twoVolCinderPod}, - filterName: CinderVolumeFilterType, - maxVols: 4, - fits: true, - test: "fits when node capacity >= new pod's Cinder volumes", - }, - { - newPod: oneVolCinderPod, - existingPods: []*v1.Pod{twoVolCinderPod}, - filterName: CinderVolumeFilterType, - maxVols: 2, - fits: false, - test: "not fit when node capacity < new pod's Cinder volumes", - }, } - expectedFailureReasons := []PredicateFailureReason{ErrMaxVolumeCountExceeded} - - // running attachable predicate tests with feature gate and limit present on nodes for _, test := range tests { - node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName) - pred := NewMaxPDVolumeCountPredicate(test.filterName, - getFakeCSINodeLister(csiNode), - getFakeStorageClassLister(test.filterName), - getFakePVLister(test.filterName), - getFakePVCLister(test.filterName)) - fits, reasons, err := pred(test.newPod, nil, node) - if err != nil { - t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err) - } - if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) { - t.Errorf("Using allocatable [%s]%s: unexpected failure reasons: %v, want: %v", test.filterName, test.test, reasons, expectedFailureReasons) - } - if fits != test.fits { - t.Errorf("Using allocatable [%s]%s: expected %v, got %v", test.filterName, test.test, test.fits, fits) - } + t.Run(test.test, func(t *testing.T) { + node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName) + p := newNonCSILimits(test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName)).(framework.FilterPlugin) + gotStatus := p.Filter(context.Background(), nil, test.newPod, node) + if !reflect.DeepEqual(gotStatus, test.wantStatus) { + t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) + } + }) } } -func getFakeStorageClassLister(sc string) fakelisters.StorageClassLister { - var provisioner string - switch sc { - case EBSVolumeFilterType: - provisioner = csilibplugins.AWSEBSInTreePluginName - case GCEPDVolumeFilterType: - provisioner = csilibplugins.GCEPDInTreePluginName - case AzureDiskVolumeFilterType: - provisioner = csilibplugins.AzureDiskInTreePluginName - case CinderVolumeFilterType: - provisioner = csilibplugins.CinderInTreePluginName - default: - return fakelisters.StorageClassLister{} - } - return fakelisters.StorageClassLister{ - { - ObjectMeta: metav1.ObjectMeta{Name: sc}, - Provisioner: provisioner, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "standard-sc"}, - Provisioner: "standard-sc", - }, - } -} +func TestGetMaxVols(t *testing.T) { + previousValue := os.Getenv(predicates.KubeMaxPDVols) -func getFakePVLister(filterName string) fakelisters.PersistentVolumeLister { - return fakelisters.PersistentVolumeLister{ + tests := []struct { + rawMaxVols string + expected int + name string + }{ { - ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"}, - Spec: v1.PersistentVolumeSpec{ - PersistentVolumeSource: v1.PersistentVolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: strings.ToLower(filterName) + "Vol"}, - }, - }, + rawMaxVols: "invalid", + expected: -1, + name: "Unable to parse maximum PD volumes value, using default value", }, { - ObjectMeta: metav1.ObjectMeta{Name: "someNon" + filterName + "Vol"}, - Spec: v1.PersistentVolumeSpec{ - PersistentVolumeSource: v1.PersistentVolumeSource{}, - }, + rawMaxVols: "-2", + expected: -1, + name: "Maximum PD volumes must be a positive value, using default value", }, + { + rawMaxVols: "40", + expected: 40, + name: "Parse maximum PD volumes value from env", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + os.Setenv(predicates.KubeMaxPDVols, test.rawMaxVols) + result := getMaxVolLimitFromEnv() + if result != test.expected { + t.Errorf("expected %v got %v", test.expected, result) + } + }) + } + + os.Unsetenv(predicates.KubeMaxPDVols) + if previousValue != "" { + os.Setenv(predicates.KubeMaxPDVols, previousValue) } } @@ -971,173 +1284,60 @@ func getFakePVCLister(filterName string) fakelisters.PersistentVolumeClaimLister } } -func TestMaxVolumeFuncM5(t *testing.T) { - node := &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node-for-m5-instance", - Labels: map[string]string{ - v1.LabelInstanceType: "m5.large", +func getFakePVLister(filterName string) fakelisters.PersistentVolumeLister { + return fakelisters.PersistentVolumeLister{ + { + ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"}, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: strings.ToLower(filterName) + "Vol"}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "someNon" + filterName + "Vol"}, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{}, }, }, } - os.Unsetenv(KubeMaxPDVols) - maxVolumeFunc := getMaxVolumeFunc(EBSVolumeFilterType) - maxVolume := maxVolumeFunc(node) - if maxVolume != volumeutil.DefaultMaxEBSNitroVolumeLimit { - t.Errorf("Expected max volume to be %d got %d", volumeutil.DefaultMaxEBSNitroVolumeLimit, maxVolume) - } } -func TestMaxVolumeFuncT3(t *testing.T) { - node := &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node-for-t3-instance", - Labels: map[string]string{ - v1.LabelInstanceType: "t3.medium", +func onePVCPod(filterName string) *v1.Pod { + return &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "some" + filterName + "Vol", + }, + }, + }, }, }, } - os.Unsetenv(KubeMaxPDVols) - maxVolumeFunc := getMaxVolumeFunc(EBSVolumeFilterType) - maxVolume := maxVolumeFunc(node) - if maxVolume != volumeutil.DefaultMaxEBSNitroVolumeLimit { - t.Errorf("Expected max volume to be %d got %d", volumeutil.DefaultMaxEBSNitroVolumeLimit, maxVolume) - } } -func TestMaxVolumeFuncR5(t *testing.T) { - node := &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node-for-r5-instance", - Labels: map[string]string{ - v1.LabelInstanceType: "r5d.xlarge", +func splitPVCPod(filterName string) *v1.Pod { + return &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "someNon" + filterName + "Vol", + }, + }, + }, + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "some" + filterName + "Vol", + }, + }, + }, }, }, } - os.Unsetenv(KubeMaxPDVols) - maxVolumeFunc := getMaxVolumeFunc(EBSVolumeFilterType) - maxVolume := maxVolumeFunc(node) - if maxVolume != volumeutil.DefaultMaxEBSNitroVolumeLimit { - t.Errorf("Expected max volume to be %d got %d", volumeutil.DefaultMaxEBSNitroVolumeLimit, maxVolume) - } -} - -func TestMaxVolumeFuncM4(t *testing.T) { - node := &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node-for-m4-instance", - Labels: map[string]string{ - v1.LabelInstanceType: "m4.2xlarge", - }, - }, - } - os.Unsetenv(KubeMaxPDVols) - maxVolumeFunc := getMaxVolumeFunc(EBSVolumeFilterType) - maxVolume := maxVolumeFunc(node) - if maxVolume != volumeutil.DefaultMaxEBSVolumes { - t.Errorf("Expected max volume to be %d got %d", volumeutil.DefaultMaxEBSVolumes, maxVolume) - } -} - -func TestMaxVolumeFuncM4WithOnlyStableLabels(t *testing.T) { - node := &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node-for-m4-instance", - Labels: map[string]string{ - v1.LabelInstanceTypeStable: "m4.2xlarge", - }, - }, - } - os.Unsetenv(KubeMaxPDVols) - maxVolumeFunc := getMaxVolumeFunc(EBSVolumeFilterType) - maxVolume := maxVolumeFunc(node) - if maxVolume != volumeutil.DefaultMaxEBSVolumes { - t.Errorf("Expected max volume to be %d got %d", volumeutil.DefaultMaxEBSVolumes, maxVolume) - } -} - -func TestMaxVolumeFuncM4WithBothBetaAndStableLabels(t *testing.T) { - node := &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node-for-m4-instance", - Labels: map[string]string{ - v1.LabelInstanceType: "m4.2xlarge", - v1.LabelInstanceTypeStable: "m4.2xlarge", - }, - }, - } - os.Unsetenv(KubeMaxPDVols) - maxVolumeFunc := getMaxVolumeFunc(EBSVolumeFilterType) - maxVolume := maxVolumeFunc(node) - if maxVolume != volumeutil.DefaultMaxEBSVolumes { - t.Errorf("Expected max volume to be %d got %d", volumeutil.DefaultMaxEBSVolumes, maxVolume) - } -} - -func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) (*schedulernodeinfo.NodeInfo, *storagev1.CSINode) { - nodeInfo := schedulernodeinfo.NewNodeInfo(pods...) - node := &v1.Node{ - ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"}, - Status: v1.NodeStatus{ - Allocatable: v1.ResourceList{}, - }, - } - var csiNode *storagev1.CSINode - - addLimitToNode := func() { - for _, driver := range driverNames { - node.Status.Allocatable[getVolumeLimitKey(driver)] = *resource.NewQuantity(limit, resource.DecimalSI) - } - } - - initCSINode := func() { - csiNode = &storagev1.CSINode{ - ObjectMeta: metav1.ObjectMeta{Name: "csi-node-for-max-pd-test-1"}, - Spec: storagev1.CSINodeSpec{ - Drivers: []storagev1.CSINodeDriver{}, - }, - } - } - - addDriversCSINode := func(addLimits bool) { - initCSINode() - for _, driver := range driverNames { - driver := storagev1.CSINodeDriver{ - Name: driver, - NodeID: "node-for-max-pd-test-1", - } - if addLimits { - driver.Allocatable = &storagev1.VolumeNodeResources{ - Count: utilpointer.Int32Ptr(int32(limit)), - } - } - csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, driver) - } - } - - switch limitSource { - case "node": - addLimitToNode() - case "csinode": - addDriversCSINode(true) - case "both": - addLimitToNode() - addDriversCSINode(true) - case "csinode-with-no-limit": - addDriversCSINode(false) - case "no-csi-driver": - initCSINode() - default: - // Do nothing. - } - - nodeInfo.SetNode(node) - return nodeInfo, csiNode -} - -func getFakeCSINodeLister(csiNode *storagev1.CSINode) fakelisters.CSINodeLister { - if csiNode != nil { - return fakelisters.CSINodeLister(*csiNode) - } - return fakelisters.CSINodeLister{} }