mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 15:25:57 +00:00
feat(scheduling): implement azure, cinder, ebs and gce as filter plugin
This commit is contained in:
parent
f1cbbda291
commit
320ac4e277
@ -21,17 +21,11 @@ go_library(
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -39,7 +33,6 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"max_attachable_volume_predicate_test.go",
|
||||
"predicates_test.go",
|
||||
"utils_test.go",
|
||||
],
|
||||
@ -48,19 +41,14 @@ go_test(
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/listers/fake:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||
"//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -18,28 +18,19 @@ package predicates
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -92,25 +83,8 @@ const (
|
||||
// EvenPodsSpreadPred defines the name of predicate EvenPodsSpread.
|
||||
EvenPodsSpreadPred = "EvenPodsSpread"
|
||||
|
||||
// DefaultMaxGCEPDVolumes defines the maximum number of PD Volumes for GCE.
|
||||
// GCE instances can have up to 16 PD volumes attached.
|
||||
DefaultMaxGCEPDVolumes = 16
|
||||
// DefaultMaxAzureDiskVolumes defines the maximum number of PD Volumes for Azure.
|
||||
// Larger Azure VMs can actually have much more disks attached.
|
||||
// TODO We should determine the max based on VM size
|
||||
DefaultMaxAzureDiskVolumes = 16
|
||||
|
||||
// KubeMaxPDVols defines the maximum number of PD Volumes per kubelet.
|
||||
KubeMaxPDVols = "KUBE_MAX_PD_VOLS"
|
||||
|
||||
// EBSVolumeFilterType defines the filter name for EBSVolumeFilter.
|
||||
EBSVolumeFilterType = "EBS"
|
||||
// GCEPDVolumeFilterType defines the filter name for GCEPDVolumeFilter.
|
||||
GCEPDVolumeFilterType = "GCE"
|
||||
// AzureDiskVolumeFilterType defines the filter name for AzureDiskVolumeFilter.
|
||||
AzureDiskVolumeFilterType = "AzureDisk"
|
||||
// CinderVolumeFilterType defines the filter name for CinderVolumeFilter.
|
||||
CinderVolumeFilterType = "Cinder"
|
||||
)
|
||||
|
||||
// IMPORTANT NOTE for predicate developers:
|
||||
@ -145,394 +119,6 @@ func Ordering() []string {
|
||||
// The failure information is given by the error.
|
||||
type FitPredicate func(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error)
|
||||
|
||||
// MaxPDVolumeCountChecker contains information to check the max number of volumes for a predicate.
|
||||
type MaxPDVolumeCountChecker struct {
|
||||
filter VolumeFilter
|
||||
volumeLimitKey v1.ResourceName
|
||||
maxVolumeFunc func(node *v1.Node) int
|
||||
csiNodeLister storagelisters.CSINodeLister
|
||||
pvLister corelisters.PersistentVolumeLister
|
||||
pvcLister corelisters.PersistentVolumeClaimLister
|
||||
scLister storagelisters.StorageClassLister
|
||||
|
||||
// The string below is generated randomly during the struct's initialization.
|
||||
// It is used to prefix volumeID generated inside the predicate() method to
|
||||
// avoid conflicts with any real volume.
|
||||
randomVolumeIDPrefix string
|
||||
}
|
||||
|
||||
// VolumeFilter contains information on how to filter PD Volumes when checking PD Volume caps.
|
||||
type VolumeFilter struct {
|
||||
// Filter normal volumes
|
||||
FilterVolume func(vol *v1.Volume) (id string, relevant bool)
|
||||
FilterPersistentVolume func(pv *v1.PersistentVolume) (id string, relevant bool)
|
||||
// MatchProvisioner evaluates if the StorageClass provisioner matches the running predicate
|
||||
MatchProvisioner func(sc *storage.StorageClass) (relevant bool)
|
||||
// IsMigrated returns a boolean specifying whether the plugin is migrated to a CSI driver
|
||||
IsMigrated func(csiNode *storage.CSINode) bool
|
||||
}
|
||||
|
||||
// NewMaxPDVolumeCountPredicate creates a predicate which evaluates whether a pod can fit based on the
|
||||
// number of volumes which match a filter that it requests, and those that are already present.
|
||||
//
|
||||
// DEPRECATED
|
||||
// All cloudprovider specific predicates defined here are deprecated in favour of CSI volume limit
|
||||
// predicate - MaxCSIVolumeCountPred.
|
||||
//
|
||||
// The predicate looks for both volumes used directly, as well as PVC volumes that are backed by relevant volume
|
||||
// types, counts the number of unique volumes, and rejects the new pod if it would place the total count over
|
||||
// the maximum.
|
||||
func NewMaxPDVolumeCountPredicate(filterName string, csiNodeLister storagelisters.CSINodeLister, scLister storagelisters.StorageClassLister,
|
||||
pvLister corelisters.PersistentVolumeLister, pvcLister corelisters.PersistentVolumeClaimLister) FitPredicate {
|
||||
var filter VolumeFilter
|
||||
var volumeLimitKey v1.ResourceName
|
||||
|
||||
switch filterName {
|
||||
|
||||
case EBSVolumeFilterType:
|
||||
filter = EBSVolumeFilter
|
||||
volumeLimitKey = v1.ResourceName(volumeutil.EBSVolumeLimitKey)
|
||||
case GCEPDVolumeFilterType:
|
||||
filter = GCEPDVolumeFilter
|
||||
volumeLimitKey = v1.ResourceName(volumeutil.GCEVolumeLimitKey)
|
||||
case AzureDiskVolumeFilterType:
|
||||
filter = AzureDiskVolumeFilter
|
||||
volumeLimitKey = v1.ResourceName(volumeutil.AzureVolumeLimitKey)
|
||||
case CinderVolumeFilterType:
|
||||
filter = CinderVolumeFilter
|
||||
volumeLimitKey = v1.ResourceName(volumeutil.CinderVolumeLimitKey)
|
||||
default:
|
||||
klog.Fatalf("Wrong filterName, Only Support %v %v %v ", EBSVolumeFilterType,
|
||||
GCEPDVolumeFilterType, AzureDiskVolumeFilterType)
|
||||
return nil
|
||||
|
||||
}
|
||||
c := &MaxPDVolumeCountChecker{
|
||||
filter: filter,
|
||||
volumeLimitKey: volumeLimitKey,
|
||||
maxVolumeFunc: getMaxVolumeFunc(filterName),
|
||||
csiNodeLister: csiNodeLister,
|
||||
pvLister: pvLister,
|
||||
pvcLister: pvcLister,
|
||||
scLister: scLister,
|
||||
randomVolumeIDPrefix: rand.String(32),
|
||||
}
|
||||
|
||||
return c.predicate
|
||||
}
|
||||
|
||||
func getMaxVolumeFunc(filterName string) func(node *v1.Node) int {
|
||||
return func(node *v1.Node) int {
|
||||
maxVolumesFromEnv := getMaxVolLimitFromEnv()
|
||||
if maxVolumesFromEnv > 0 {
|
||||
return maxVolumesFromEnv
|
||||
}
|
||||
|
||||
var nodeInstanceType string
|
||||
for k, v := range node.ObjectMeta.Labels {
|
||||
if k == v1.LabelInstanceType || k == v1.LabelInstanceTypeStable {
|
||||
nodeInstanceType = v
|
||||
break
|
||||
}
|
||||
}
|
||||
switch filterName {
|
||||
case EBSVolumeFilterType:
|
||||
return getMaxEBSVolume(nodeInstanceType)
|
||||
case GCEPDVolumeFilterType:
|
||||
return DefaultMaxGCEPDVolumes
|
||||
case AzureDiskVolumeFilterType:
|
||||
return DefaultMaxAzureDiskVolumes
|
||||
case CinderVolumeFilterType:
|
||||
return volumeutil.DefaultMaxCinderVolumes
|
||||
default:
|
||||
return -1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getMaxEBSVolume(nodeInstanceType string) int {
|
||||
if ok, _ := regexp.MatchString(volumeutil.EBSNitroLimitRegex, nodeInstanceType); ok {
|
||||
return volumeutil.DefaultMaxEBSNitroVolumeLimit
|
||||
}
|
||||
return volumeutil.DefaultMaxEBSVolumes
|
||||
}
|
||||
|
||||
// getMaxVolLimitFromEnv checks the max PD volumes environment variable, otherwise returning a default value.
|
||||
func getMaxVolLimitFromEnv() int {
|
||||
if rawMaxVols := os.Getenv(KubeMaxPDVols); rawMaxVols != "" {
|
||||
if parsedMaxVols, err := strconv.Atoi(rawMaxVols); err != nil {
|
||||
klog.Errorf("Unable to parse maximum PD volumes value, using default: %v", err)
|
||||
} else if parsedMaxVols <= 0 {
|
||||
klog.Errorf("Maximum PD volumes must be a positive value, using default")
|
||||
} else {
|
||||
return parsedMaxVols
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace string, filteredVolumes map[string]bool) error {
|
||||
for i := range volumes {
|
||||
vol := &volumes[i]
|
||||
if id, ok := c.filter.FilterVolume(vol); ok {
|
||||
filteredVolumes[id] = true
|
||||
} else if vol.PersistentVolumeClaim != nil {
|
||||
pvcName := vol.PersistentVolumeClaim.ClaimName
|
||||
if pvcName == "" {
|
||||
return fmt.Errorf("PersistentVolumeClaim had no name")
|
||||
}
|
||||
|
||||
// Until we know real ID of the volume use namespace/pvcName as substitute
|
||||
// with a random prefix (calculated and stored inside 'c' during initialization)
|
||||
// to avoid conflicts with existing volume IDs.
|
||||
pvID := fmt.Sprintf("%s-%s/%s", c.randomVolumeIDPrefix, namespace, pvcName)
|
||||
|
||||
pvc, err := c.pvcLister.PersistentVolumeClaims(namespace).Get(pvcName)
|
||||
if err != nil || pvc == nil {
|
||||
// If the PVC is invalid, we don't count the volume because
|
||||
// there's no guarantee that it belongs to the running predicate.
|
||||
klog.V(4).Infof("Unable to look up PVC info for %s/%s, assuming PVC doesn't match predicate when counting limits: %v", namespace, pvcName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
pvName := pvc.Spec.VolumeName
|
||||
if pvName == "" {
|
||||
// PVC is not bound. It was either deleted and created again or
|
||||
// it was forcefully unbound by admin. The pod can still use the
|
||||
// original PV where it was bound to, so we count the volume if
|
||||
// it belongs to the running predicate.
|
||||
if c.matchProvisioner(pvc) {
|
||||
klog.V(4).Infof("PVC %s/%s is not bound, assuming PVC matches predicate when counting limits", namespace, pvcName)
|
||||
filteredVolumes[pvID] = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
pv, err := c.pvLister.Get(pvName)
|
||||
if err != nil || pv == nil {
|
||||
// If the PV is invalid and PVC belongs to the running predicate,
|
||||
// log the error and count the PV towards the PV limit.
|
||||
if c.matchProvisioner(pvc) {
|
||||
klog.V(4).Infof("Unable to look up PV info for %s/%s/%s, assuming PV matches predicate when counting limits: %v", namespace, pvcName, pvName, err)
|
||||
filteredVolumes[pvID] = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if id, ok := c.filter.FilterPersistentVolume(pv); ok {
|
||||
filteredVolumes[id] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// matchProvisioner helps identify if the given PVC belongs to the running predicate.
|
||||
func (c *MaxPDVolumeCountChecker) matchProvisioner(pvc *v1.PersistentVolumeClaim) bool {
|
||||
if pvc.Spec.StorageClassName == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
storageClass, err := c.scLister.Get(*pvc.Spec.StorageClassName)
|
||||
if err != nil || storageClass == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return c.filter.MatchProvisioner(storageClass)
|
||||
}
|
||||
|
||||
func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
// If a pod doesn't have any volume attached to it, the predicate will always be true.
|
||||
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
|
||||
if len(pod.Spec.Volumes) == 0 {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
newVolumes := make(map[string]bool)
|
||||
if err := c.filterVolumes(pod.Spec.Volumes, pod.Namespace, newVolumes); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
// quick return
|
||||
if len(newVolumes) == 0 {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
}
|
||||
|
||||
var (
|
||||
csiNode *storage.CSINode
|
||||
err error
|
||||
)
|
||||
if c.csiNodeLister != nil {
|
||||
csiNode, err = c.csiNodeLister.Get(node.Name)
|
||||
if err != nil {
|
||||
// we don't fail here because the CSINode object is only necessary
|
||||
// for determining whether the migration is enabled or not
|
||||
klog.V(5).Infof("Could not get a CSINode object for the node: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// if a plugin has been migrated to a CSI driver, defer to the CSI predicate
|
||||
if c.filter.IsMigrated(csiNode) {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// count unique volumes
|
||||
existingVolumes := make(map[string]bool)
|
||||
for _, existingPod := range nodeInfo.Pods() {
|
||||
if err := c.filterVolumes(existingPod.Spec.Volumes, existingPod.Namespace, existingVolumes); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
}
|
||||
numExistingVolumes := len(existingVolumes)
|
||||
|
||||
// filter out already-mounted volumes
|
||||
for k := range existingVolumes {
|
||||
if _, ok := newVolumes[k]; ok {
|
||||
delete(newVolumes, k)
|
||||
}
|
||||
}
|
||||
|
||||
numNewVolumes := len(newVolumes)
|
||||
maxAttachLimit := c.maxVolumeFunc(node)
|
||||
|
||||
volumeLimits := nodeInfo.VolumeLimits()
|
||||
if maxAttachLimitFromAllocatable, ok := volumeLimits[c.volumeLimitKey]; ok {
|
||||
maxAttachLimit = int(maxAttachLimitFromAllocatable)
|
||||
}
|
||||
|
||||
if numExistingVolumes+numNewVolumes > maxAttachLimit {
|
||||
// violates MaxEBSVolumeCount or MaxGCEPDVolumeCount
|
||||
return false, []PredicateFailureReason{ErrMaxVolumeCountExceeded}, nil
|
||||
}
|
||||
if nodeInfo != nil && nodeInfo.TransientInfo != nil && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) {
|
||||
nodeInfo.TransientInfo.TransientLock.Lock()
|
||||
defer nodeInfo.TransientInfo.TransientLock.Unlock()
|
||||
nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount = maxAttachLimit - numExistingVolumes
|
||||
nodeInfo.TransientInfo.TransNodeInfo.RequestedVolumes = numNewVolumes
|
||||
}
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// EBSVolumeFilter is a VolumeFilter for filtering AWS ElasticBlockStore Volumes.
|
||||
var EBSVolumeFilter = VolumeFilter{
|
||||
FilterVolume: func(vol *v1.Volume) (string, bool) {
|
||||
if vol.AWSElasticBlockStore != nil {
|
||||
return vol.AWSElasticBlockStore.VolumeID, true
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) {
|
||||
if pv.Spec.AWSElasticBlockStore != nil {
|
||||
return pv.Spec.AWSElasticBlockStore.VolumeID, true
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
MatchProvisioner: func(sc *storage.StorageClass) (relevant bool) {
|
||||
if sc.Provisioner == csilibplugins.AWSEBSInTreePluginName {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
|
||||
IsMigrated: func(csiNode *storage.CSINode) bool {
|
||||
return isCSIMigrationOn(csiNode, csilibplugins.AWSEBSInTreePluginName)
|
||||
},
|
||||
}
|
||||
|
||||
// GCEPDVolumeFilter is a VolumeFilter for filtering GCE PersistentDisk Volumes.
|
||||
var GCEPDVolumeFilter = VolumeFilter{
|
||||
FilterVolume: func(vol *v1.Volume) (string, bool) {
|
||||
if vol.GCEPersistentDisk != nil {
|
||||
return vol.GCEPersistentDisk.PDName, true
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) {
|
||||
if pv.Spec.GCEPersistentDisk != nil {
|
||||
return pv.Spec.GCEPersistentDisk.PDName, true
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
MatchProvisioner: func(sc *storage.StorageClass) (relevant bool) {
|
||||
if sc.Provisioner == csilibplugins.GCEPDInTreePluginName {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
|
||||
IsMigrated: func(csiNode *storage.CSINode) bool {
|
||||
return isCSIMigrationOn(csiNode, csilibplugins.GCEPDInTreePluginName)
|
||||
},
|
||||
}
|
||||
|
||||
// AzureDiskVolumeFilter is a VolumeFilter for filtering Azure Disk Volumes.
|
||||
var AzureDiskVolumeFilter = VolumeFilter{
|
||||
FilterVolume: func(vol *v1.Volume) (string, bool) {
|
||||
if vol.AzureDisk != nil {
|
||||
return vol.AzureDisk.DiskName, true
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) {
|
||||
if pv.Spec.AzureDisk != nil {
|
||||
return pv.Spec.AzureDisk.DiskName, true
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
MatchProvisioner: func(sc *storage.StorageClass) (relevant bool) {
|
||||
if sc.Provisioner == csilibplugins.AzureDiskInTreePluginName {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
|
||||
IsMigrated: func(csiNode *storage.CSINode) bool {
|
||||
return isCSIMigrationOn(csiNode, csilibplugins.AzureDiskInTreePluginName)
|
||||
},
|
||||
}
|
||||
|
||||
// CinderVolumeFilter is a VolumeFilter for filtering Cinder Volumes.
|
||||
// It will be deprecated once Openstack cloudprovider has been removed from in-tree.
|
||||
var CinderVolumeFilter = VolumeFilter{
|
||||
FilterVolume: func(vol *v1.Volume) (string, bool) {
|
||||
if vol.Cinder != nil {
|
||||
return vol.Cinder.VolumeID, true
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) {
|
||||
if pv.Spec.Cinder != nil {
|
||||
return pv.Spec.Cinder.VolumeID, true
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
MatchProvisioner: func(sc *storage.StorageClass) (relevant bool) {
|
||||
if sc.Provisioner == csilibplugins.CinderInTreePluginName {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
|
||||
IsMigrated: func(csiNode *storage.CSINode) bool {
|
||||
return isCSIMigrationOn(csiNode, csilibplugins.CinderInTreePluginName)
|
||||
},
|
||||
}
|
||||
|
||||
// GetResourceRequest returns a *schedulernodeinfo.Resource that covers the largest
|
||||
// width in each resource dimension. Because init-containers run sequentially, we collect
|
||||
// the max in each dimension iteratively. In contrast, we sum the resource vectors for
|
||||
|
@ -17,7 +17,6 @@ limitations under the License.
|
||||
package predicates
|
||||
|
||||
import (
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -1719,44 +1718,3 @@ func TestPodToleratesTaints(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetMaxVols(t *testing.T) {
|
||||
previousValue := os.Getenv(KubeMaxPDVols)
|
||||
|
||||
tests := []struct {
|
||||
rawMaxVols string
|
||||
expected int
|
||||
name string
|
||||
}{
|
||||
{
|
||||
rawMaxVols: "invalid",
|
||||
expected: -1,
|
||||
name: "Unable to parse maximum PD volumes value, using default value",
|
||||
},
|
||||
{
|
||||
rawMaxVols: "-2",
|
||||
expected: -1,
|
||||
name: "Maximum PD volumes must be a positive value, using default value",
|
||||
},
|
||||
{
|
||||
rawMaxVols: "40",
|
||||
expected: 40,
|
||||
name: "Parse maximum PD volumes value from env",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
os.Setenv(KubeMaxPDVols, test.rawMaxVols)
|
||||
result := getMaxVolLimitFromEnv()
|
||||
if result != test.expected {
|
||||
t.Errorf("expected %v got %v", test.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
os.Unsetenv(KubeMaxPDVols)
|
||||
if previousValue != "" {
|
||||
os.Setenv(KubeMaxPDVols, previousValue)
|
||||
}
|
||||
}
|
||||
|
@ -17,15 +17,8 @@ limitations under the License.
|
||||
package predicates
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
@ -94,56 +87,3 @@ func SetPredicatesOrderingDuringTest(value []string) func() {
|
||||
predicatesOrdering = origVal
|
||||
}
|
||||
}
|
||||
|
||||
// isCSIMigrationOn returns a boolean value indicating whether
|
||||
// the CSI migration has been enabled for a particular storage plugin.
|
||||
func isCSIMigrationOn(csiNode *storagev1.CSINode, pluginName string) bool {
|
||||
if csiNode == nil || len(pluginName) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// In-tree storage to CSI driver migration feature should be enabled,
|
||||
// along with the plugin-specific one
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) {
|
||||
return false
|
||||
}
|
||||
|
||||
switch pluginName {
|
||||
case csilibplugins.AWSEBSInTreePluginName:
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAWS) {
|
||||
return false
|
||||
}
|
||||
case csilibplugins.GCEPDInTreePluginName:
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationGCE) {
|
||||
return false
|
||||
}
|
||||
case csilibplugins.AzureDiskInTreePluginName:
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAzureDisk) {
|
||||
return false
|
||||
}
|
||||
case csilibplugins.CinderInTreePluginName:
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationOpenStack) {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
// The plugin name should be listed in the CSINode object annotation.
|
||||
// This indicates that the plugin has been migrated to a CSI driver in the node.
|
||||
csiNodeAnn := csiNode.GetAnnotations()
|
||||
if csiNodeAnn == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var mpaSet sets.String
|
||||
mpa := csiNodeAnn[v1.MigratedPluginsAnnotationKey]
|
||||
if len(mpa) == 0 {
|
||||
mpaSet = sets.NewString()
|
||||
} else {
|
||||
tok := strings.Split(mpa, ",")
|
||||
mpaSet = sets.NewString(tok...)
|
||||
}
|
||||
|
||||
return mpaSet.Has(pluginName)
|
||||
}
|
||||
|
@ -3,12 +3,8 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"azure.go",
|
||||
"cinder.go",
|
||||
"csi.go",
|
||||
"csinode_helper.go",
|
||||
"ebs.go",
|
||||
"gce.go",
|
||||
"non_csi.go",
|
||||
"utils.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits",
|
||||
@ -17,7 +13,6 @@ go_library(
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/migration:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
@ -39,11 +34,8 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"azure_test.go",
|
||||
"cinder_test.go",
|
||||
"csi_test.go",
|
||||
"ebs_test.go",
|
||||
"gce_test.go",
|
||||
"non_csi_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
|
@ -1,62 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodevolumelimits
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// AzureDiskLimits is a plugin that checks node volume limits.
|
||||
type AzureDiskLimits struct {
|
||||
predicate predicates.FitPredicate
|
||||
}
|
||||
|
||||
var _ framework.FilterPlugin = &AzureDiskLimits{}
|
||||
|
||||
// AzureDiskName is the name of the plugin used in the plugin registry and configurations.
|
||||
const AzureDiskName = "AzureDiskLimits"
|
||||
|
||||
// Name returns name of the plugin. It is used in logs, etc.
|
||||
func (pl *AzureDiskLimits) Name() string {
|
||||
return AzureDiskName
|
||||
}
|
||||
|
||||
// Filter invoked at the filter extension point.
|
||||
func (pl *AzureDiskLimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
// metadata is not needed
|
||||
_, reasons, err := pl.predicate(pod, nil, nodeInfo)
|
||||
return migration.PredicateResultToFrameworkStatus(reasons, err)
|
||||
}
|
||||
|
||||
// NewAzureDisk returns function that initializes a new plugin and returns it.
|
||||
func NewAzureDisk(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
informerFactory := handle.SharedInformerFactory()
|
||||
pvLister := informerFactory.Core().V1().PersistentVolumes().Lister()
|
||||
pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister()
|
||||
scLister := informerFactory.Storage().V1().StorageClasses().Lister()
|
||||
|
||||
return &AzureDiskLimits{
|
||||
predicate: predicates.NewMaxPDVolumeCountPredicate(predicates.AzureDiskVolumeFilterType, getCSINodeListerIfEnabled(informerFactory), scLister, pvLister, pvcLister),
|
||||
}, nil
|
||||
}
|
@ -1,367 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodevolumelimits
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
func TestAzureDiskLimits(t *testing.T) {
|
||||
oneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
twoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
splitVolsPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "svp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
nonApplicablePod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
deletedPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "deletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
twoDeletedPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "deletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherDeletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
deletedPVPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvcWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// deletedPVPod2 is a different pod than deletedPVPod but using the same PVC
|
||||
deletedPVPod2 := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvcWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// anotherDeletedPVPod is a different pod than deletedPVPod and uses another PVC
|
||||
anotherDeletedPVPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherPVCWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
emptyPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
unboundPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// Different pod than unboundPVCPod, but using the same unbound PVC
|
||||
unboundPVCPod2 := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// pod with unbound PVC that's different to unboundPVC
|
||||
anotherUnboundPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherUnboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
newPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
filterName string
|
||||
driverName string
|
||||
maxVols int
|
||||
test string
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
existingPods: []*v1.Pod{twoVolPod, oneVolPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 4,
|
||||
test: "fits when node capacity >= new pod's AzureDisk volumes",
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{oneVolPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "fit when node capacity < new pod's AzureDisk volumes",
|
||||
},
|
||||
{
|
||||
newPod: splitVolsPod,
|
||||
existingPods: []*v1.Pod{twoVolPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "new pod's count ignores non-AzureDisk volumes",
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "existing pods' counts ignore non-AzureDisk volumes",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.AzureDiskVolumeFilterType),
|
||||
existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "new pod's count considers PVCs backed by AzureDisk volumes",
|
||||
},
|
||||
{
|
||||
newPod: splitPVCPod(predicates.AzureDiskVolumeFilterType),
|
||||
existingPods: []*v1.Pod{splitVolsPod, oneVolPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "new pod's count ignores PVCs not backed by AzureDisk volumes",
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, onePVCPod(predicates.AzureDiskVolumeFilterType)},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "existing pods' counts considers PVCs backed by AzureDisk volumes",
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, twoVolPod, onePVCPod(predicates.AzureDiskVolumeFilterType)},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 4,
|
||||
test: "already-mounted AzureDisk volumes are always ok to allow",
|
||||
},
|
||||
{
|
||||
newPod: splitVolsPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, oneVolPod, onePVCPod(predicates.AzureDiskVolumeFilterType)},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "the same AzureDisk volumes are not counted multiple times",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.AzureDiskVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVCPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "pod with missing PVC is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.AzureDiskVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVCPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "pod with missing PVC is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.AzureDiskVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, twoDeletedPVCPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "pod with missing two PVCs is counted towards the PV limit twice",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.AzureDiskVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "pod with missing PV is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.AzureDiskVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "pod with missing PV is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: deletedPVPod2,
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "two pods missing the same PV are counted towards the PV limit only once",
|
||||
},
|
||||
{
|
||||
newPod: anotherDeletedPVPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "two pods missing different PVs are counted towards the PV limit twice",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.AzureDiskVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "pod with unbound PVC is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.AzureDiskVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "pod with unbound PVC is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: unboundPVCPod2,
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "the same unbound PVC in multiple pods is counted towards the PV limit only once",
|
||||
},
|
||||
{
|
||||
newPod: anotherUnboundPVCPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "two different unbound PVCs are counted towards the PV limit as two volumes",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.test, func(t *testing.T) {
|
||||
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
||||
p := &AzureDiskLimits{
|
||||
predicate: predicates.NewMaxPDVolumeCountPredicate(test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName)),
|
||||
}
|
||||
gotStatus := p.Filter(context.Background(), nil, test.newPod, node)
|
||||
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
||||
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -1,61 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodevolumelimits
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// CinderLimits is a plugin that checks node volume limits.
|
||||
type CinderLimits struct {
|
||||
predicate predicates.FitPredicate
|
||||
}
|
||||
|
||||
var _ framework.FilterPlugin = &CinderLimits{}
|
||||
|
||||
// CinderName is the name of the plugin used in the plugin registry and configurations.
|
||||
const CinderName = "CinderLimits"
|
||||
|
||||
// Name returns name of the plugin. It is used in logs, etc.
|
||||
func (pl *CinderLimits) Name() string {
|
||||
return CinderName
|
||||
}
|
||||
|
||||
// Filter invoked at the filter extension point.
|
||||
func (pl *CinderLimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
// metadata is not needed
|
||||
_, reasons, err := pl.predicate(pod, nil, nodeInfo)
|
||||
return migration.PredicateResultToFrameworkStatus(reasons, err)
|
||||
}
|
||||
|
||||
// NewCinder returns function that initializes a new plugin and returns it.
|
||||
func NewCinder(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
informerFactory := handle.SharedInformerFactory()
|
||||
pvLister := informerFactory.Core().V1().PersistentVolumes().Lister()
|
||||
pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister()
|
||||
scLister := informerFactory.Storage().V1().StorageClasses().Lister()
|
||||
return &CinderLimits{
|
||||
predicate: predicates.NewMaxPDVolumeCountPredicate(predicates.CinderVolumeFilterType, getCSINodeListerIfEnabled(informerFactory), scLister, pvLister, pvcLister),
|
||||
}, nil
|
||||
}
|
@ -1,96 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodevolumelimits
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
func TestCinderLimits(t *testing.T) {
|
||||
twoVolCinderPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{VolumeID: "tvp1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{VolumeID: "tvp2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
oneVolCinderPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{VolumeID: "ovp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
newPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
filterName string
|
||||
driverName string
|
||||
maxVols int
|
||||
test string
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
{
|
||||
newPod: oneVolCinderPod,
|
||||
existingPods: []*v1.Pod{twoVolCinderPod},
|
||||
filterName: predicates.CinderVolumeFilterType,
|
||||
maxVols: 4,
|
||||
test: "fits when node capacity >= new pod's Cinder volumes",
|
||||
},
|
||||
{
|
||||
newPod: oneVolCinderPod,
|
||||
existingPods: []*v1.Pod{twoVolCinderPod},
|
||||
filterName: predicates.CinderVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "not fit when node capacity < new pod's Cinder volumes",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.test, func(t *testing.T) {
|
||||
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
||||
p := &CinderLimits{
|
||||
predicate: predicates.NewMaxPDVolumeCountPredicate(test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName)),
|
||||
}
|
||||
gotStatus := p.Filter(context.Background(), nil, test.newPod, node)
|
||||
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
||||
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -52,13 +52,13 @@ const (
|
||||
// getVolumeLimitKey returns a ResourceName by filter type
|
||||
func getVolumeLimitKey(filterType string) v1.ResourceName {
|
||||
switch filterType {
|
||||
case predicates.EBSVolumeFilterType:
|
||||
case ebsVolumeFilterType:
|
||||
return v1.ResourceName(volumeutil.EBSVolumeLimitKey)
|
||||
case predicates.GCEPDVolumeFilterType:
|
||||
case gcePDVolumeFilterType:
|
||||
return v1.ResourceName(volumeutil.GCEVolumeLimitKey)
|
||||
case predicates.AzureDiskVolumeFilterType:
|
||||
case azureDiskVolumeFilterType:
|
||||
return v1.ResourceName(volumeutil.AzureVolumeLimitKey)
|
||||
case predicates.CinderVolumeFilterType:
|
||||
case cinderVolumeFilterType:
|
||||
return v1.ResourceName(volumeutil.CinderVolumeLimitKey)
|
||||
default:
|
||||
return v1.ResourceName(volumeutil.GetCSIAttachLimitKey(filterType))
|
||||
|
@ -1,32 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodevolumelimits
|
||||
|
||||
import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
// getCSINodeListerIfEnabled returns the CSINode lister or nil if the feature is disabled
|
||||
func getCSINodeListerIfEnabled(factory informers.SharedInformerFactory) storagelisters.CSINodeLister {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CSINodeInfo) {
|
||||
return nil
|
||||
}
|
||||
return factory.Storage().V1().CSINodes().Lister()
|
||||
}
|
@ -1,62 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodevolumelimits
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// EBSLimits is a plugin that checks node volume limits.
|
||||
type EBSLimits struct {
|
||||
predicate predicates.FitPredicate
|
||||
}
|
||||
|
||||
var _ framework.FilterPlugin = &EBSLimits{}
|
||||
|
||||
// EBSName is the name of the plugin used in the plugin registry and configurations.
|
||||
const EBSName = "EBSLimits"
|
||||
|
||||
// Name returns name of the plugin. It is used in logs, etc.
|
||||
func (pl *EBSLimits) Name() string {
|
||||
return EBSName
|
||||
}
|
||||
|
||||
// Filter invoked at the filter extension point.
|
||||
func (pl *EBSLimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
// metadata is not needed
|
||||
_, reasons, err := pl.predicate(pod, nil, nodeInfo)
|
||||
return migration.PredicateResultToFrameworkStatus(reasons, err)
|
||||
}
|
||||
|
||||
// NewEBS returns function that initializes a new plugin and returns it.
|
||||
func NewEBS(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
informerFactory := handle.SharedInformerFactory()
|
||||
pvLister := informerFactory.Core().V1().PersistentVolumes().Lister()
|
||||
pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister()
|
||||
scLister := informerFactory.Storage().V1().StorageClasses().Lister()
|
||||
|
||||
return &EBSLimits{
|
||||
predicate: predicates.NewMaxPDVolumeCountPredicate(predicates.EBSVolumeFilterType, getCSINodeListerIfEnabled(informerFactory), scLister, pvLister, pvcLister),
|
||||
}, nil
|
||||
}
|
@ -1,561 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodevolumelimits
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func onePVCPod(filterName string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "some" + filterName + "Vol",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func splitPVCPod(filterName string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "someNon" + filterName + "Vol",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "some" + filterName + "Vol",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestEBSLimits(t *testing.T) {
|
||||
oneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
twoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
unboundPVCwithInvalidSCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVCwithInvalidSCPod",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
unboundPVCwithDefaultSCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVCwithDefaultSCPod",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
splitVolsPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "svp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
nonApplicablePod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
deletedPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "deletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
twoDeletedPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "deletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherDeletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
deletedPVPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvcWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// deletedPVPod2 is a different pod than deletedPVPod but using the same PVC
|
||||
deletedPVPod2 := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvcWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// anotherDeletedPVPod is a different pod than deletedPVPod and uses another PVC
|
||||
anotherDeletedPVPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherPVCWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
emptyPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
unboundPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// Different pod than unboundPVCPod, but using the same unbound PVC
|
||||
unboundPVCPod2 := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// pod with unbound PVC that's different to unboundPVC
|
||||
anotherUnboundPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherUnboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
newPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
filterName string
|
||||
driverName string
|
||||
maxVols int
|
||||
test string
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
existingPods: []*v1.Pod{twoVolPod, oneVolPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 4,
|
||||
test: "fits when node capacity >= new pod's EBS volumes",
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{oneVolPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 2,
|
||||
test: "doesn't fit when node capacity < new pod's EBS volumes",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
{
|
||||
newPod: splitVolsPod,
|
||||
existingPods: []*v1.Pod{twoVolPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 3,
|
||||
test: "new pod's count ignores non-EBS volumes",
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 3,
|
||||
test: "existing pods' counts ignore non-EBS volumes",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.EBSVolumeFilterType),
|
||||
existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 3,
|
||||
test: "new pod's count considers PVCs backed by EBS volumes",
|
||||
},
|
||||
{
|
||||
newPod: splitPVCPod(predicates.EBSVolumeFilterType),
|
||||
existingPods: []*v1.Pod{splitVolsPod, oneVolPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 3,
|
||||
test: "new pod's count ignores PVCs not backed by EBS volumes",
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, onePVCPod(predicates.EBSVolumeFilterType)},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 3,
|
||||
test: "existing pods' counts considers PVCs backed by EBS volumes",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, twoVolPod, onePVCPod(predicates.EBSVolumeFilterType)},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 4,
|
||||
test: "already-mounted EBS volumes are always ok to allow",
|
||||
},
|
||||
{
|
||||
newPod: splitVolsPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, oneVolPod, onePVCPod(predicates.EBSVolumeFilterType)},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 3,
|
||||
test: "the same EBS volumes are not counted multiple times",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.EBSVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVCPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 1,
|
||||
test: "missing PVC is not counted towards the PV limit",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.EBSVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVCPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 2,
|
||||
test: "missing PVC is not counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.EBSVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, twoDeletedPVCPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 2,
|
||||
test: "two missing PVCs are not counted towards the PV limit twice",
|
||||
},
|
||||
{
|
||||
newPod: unboundPVCwithInvalidSCPod,
|
||||
existingPods: []*v1.Pod{oneVolPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 1,
|
||||
test: "unbound PVC with invalid SC is not counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: unboundPVCwithDefaultSCPod,
|
||||
existingPods: []*v1.Pod{oneVolPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 1,
|
||||
test: "unbound PVC from different provisioner is not counted towards the PV limit",
|
||||
},
|
||||
|
||||
{
|
||||
newPod: onePVCPod(predicates.EBSVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 2,
|
||||
test: "pod with missing PV is counted towards the PV limit",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.EBSVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 3,
|
||||
test: "pod with missing PV is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: deletedPVPod2,
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 2,
|
||||
test: "two pods missing the same PV are counted towards the PV limit only once",
|
||||
},
|
||||
{
|
||||
newPod: anotherDeletedPVPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 2,
|
||||
test: "two pods missing different PVs are counted towards the PV limit twice",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.EBSVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 2,
|
||||
test: "pod with unbound PVC is counted towards the PV limit",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.EBSVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 3,
|
||||
test: "pod with unbound PVC is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: unboundPVCPod2,
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 2,
|
||||
test: "the same unbound PVC in multiple pods is counted towards the PV limit only once",
|
||||
},
|
||||
{
|
||||
newPod: anotherUnboundPVCPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 2,
|
||||
test: "two different unbound PVCs are counted towards the PV limit as two volumes",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.test, func(t *testing.T) {
|
||||
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
||||
p := &EBSLimits{
|
||||
predicate: predicates.NewMaxPDVolumeCountPredicate(test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName)),
|
||||
}
|
||||
gotStatus := p.Filter(context.Background(), nil, test.newPod, node)
|
||||
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
||||
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func getFakePVCLister(filterName string) fakelisters.PersistentVolumeClaimLister {
|
||||
return fakelisters.PersistentVolumeClaimLister{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "some" + filterName + "Vol",
|
||||
StorageClassName: &filterName,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "someNon" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "someNon" + filterName + "Vol",
|
||||
StorageClassName: &filterName,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvcWithDeletedPV"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pvcWithDeletedPV",
|
||||
StorageClassName: &filterName,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "anotherPVCWithDeletedPV"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "anotherPVCWithDeletedPV",
|
||||
StorageClassName: &filterName,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "unboundPVC"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "",
|
||||
StorageClassName: &filterName,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "anotherUnboundPVC"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "",
|
||||
StorageClassName: &filterName,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "unboundPVCwithDefaultSCPod"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "",
|
||||
StorageClassName: utilpointer.StringPtr("standard-sc"),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "unboundPVCwithInvalidSCPod"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "",
|
||||
StorageClassName: utilpointer.StringPtr("invalid-sc"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getFakePVLister(filterName string) fakelisters.PersistentVolumeLister {
|
||||
return fakelisters.PersistentVolumeLister{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: strings.ToLower(filterName) + "Vol"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "someNon" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
@ -1,62 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodevolumelimits
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// GCEPDLimits is a plugin that checks node volume limits.
|
||||
type GCEPDLimits struct {
|
||||
predicate predicates.FitPredicate
|
||||
}
|
||||
|
||||
var _ framework.FilterPlugin = &GCEPDLimits{}
|
||||
|
||||
// GCEPDName is the name of the plugin used in the plugin registry and configurations.
|
||||
const GCEPDName = "GCEPDLimits"
|
||||
|
||||
// Name returns name of the plugin. It is used in logs, etc.
|
||||
func (pl *GCEPDLimits) Name() string {
|
||||
return GCEPDName
|
||||
}
|
||||
|
||||
// Filter invoked at the filter extension point.
|
||||
func (pl *GCEPDLimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
// metadata is not needed
|
||||
_, reasons, err := pl.predicate(pod, nil, nodeInfo)
|
||||
return migration.PredicateResultToFrameworkStatus(reasons, err)
|
||||
}
|
||||
|
||||
// NewGCEPD returns function that initializes a new plugin and returns it.
|
||||
func NewGCEPD(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
informerFactory := handle.SharedInformerFactory()
|
||||
pvLister := informerFactory.Core().V1().PersistentVolumes().Lister()
|
||||
pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister()
|
||||
scLister := informerFactory.Storage().V1().StorageClasses().Lister()
|
||||
|
||||
return &GCEPDLimits{
|
||||
predicate: predicates.NewMaxPDVolumeCountPredicate(predicates.GCEPDVolumeFilterType, getCSINodeListerIfEnabled(informerFactory), scLister, pvLister, pvcLister),
|
||||
}, nil
|
||||
}
|
@ -1,367 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodevolumelimits
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
func TestGCEPDLimits(t *testing.T) {
|
||||
oneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
twoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
splitVolsPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "svp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
nonApplicablePod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
deletedPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "deletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
twoDeletedPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "deletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherDeletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
deletedPVPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvcWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// deletedPVPod2 is a different pod than deletedPVPod but using the same PVC
|
||||
deletedPVPod2 := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvcWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// anotherDeletedPVPod is a different pod than deletedPVPod and uses another PVC
|
||||
anotherDeletedPVPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherPVCWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
emptyPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
unboundPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// Different pod than unboundPVCPod, but using the same unbound PVC
|
||||
unboundPVCPod2 := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// pod with unbound PVC that's different to unboundPVC
|
||||
anotherUnboundPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherUnboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
newPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
filterName string
|
||||
driverName string
|
||||
maxVols int
|
||||
test string
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
existingPods: []*v1.Pod{twoVolPod, oneVolPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 4,
|
||||
test: "fits when node capacity >= new pod's GCE volumes",
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{oneVolPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "fit when node capacity < new pod's GCE volumes",
|
||||
},
|
||||
{
|
||||
newPod: splitVolsPod,
|
||||
existingPods: []*v1.Pod{twoVolPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "new pod's count ignores non-GCE volumes",
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "existing pods' counts ignore non-GCE volumes",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.GCEPDVolumeFilterType),
|
||||
existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "new pod's count considers PVCs backed by GCE volumes",
|
||||
},
|
||||
{
|
||||
newPod: splitPVCPod(predicates.GCEPDVolumeFilterType),
|
||||
existingPods: []*v1.Pod{splitVolsPod, oneVolPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "new pod's count ignores PVCs not backed by GCE volumes",
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, onePVCPod(predicates.GCEPDVolumeFilterType)},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "existing pods' counts considers PVCs backed by GCE volumes",
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, twoVolPod, onePVCPod(predicates.GCEPDVolumeFilterType)},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 4,
|
||||
test: "already-mounted EBS volumes are always ok to allow",
|
||||
},
|
||||
{
|
||||
newPod: splitVolsPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, oneVolPod, onePVCPod(predicates.GCEPDVolumeFilterType)},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "the same GCE volumes are not counted multiple times",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.GCEPDVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVCPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "pod with missing PVC is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.GCEPDVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVCPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "pod with missing PVC is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.GCEPDVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, twoDeletedPVCPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "pod with missing two PVCs is counted towards the PV limit twice",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.GCEPDVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "pod with missing PV is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.GCEPDVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "pod with missing PV is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: deletedPVPod2,
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "two pods missing the same PV are counted towards the PV limit only once",
|
||||
},
|
||||
{
|
||||
newPod: anotherDeletedPVPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "two pods missing different PVs are counted towards the PV limit twice",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.GCEPDVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "pod with unbound PVC is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.GCEPDVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "pod with unbound PVC is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: unboundPVCPod2,
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "the same unbound PVC in multiple pods is counted towards the PV limit only once",
|
||||
},
|
||||
{
|
||||
newPod: anotherUnboundPVCPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "two different unbound PVCs are counted towards the PV limit as two volumes",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.test, func(t *testing.T) {
|
||||
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
||||
p := &GCEPDLimits{
|
||||
predicate: predicates.NewMaxPDVolumeCountPredicate(test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName)),
|
||||
}
|
||||
gotStatus := p.Filter(context.Background(), nil, test.newPod, node)
|
||||
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
||||
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
520
pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go
Normal file
520
pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go
Normal file
@ -0,0 +1,520 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodevolumelimits
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultMaxGCEPDVolumes defines the maximum number of PD Volumes for GCE.
|
||||
// GCE instances can have up to 16 PD volumes attached.
|
||||
defaultMaxGCEPDVolumes = 16
|
||||
// defaultMaxAzureDiskVolumes defines the maximum number of PD Volumes for Azure.
|
||||
// Larger Azure VMs can actually have much more disks attached.
|
||||
// TODO We should determine the max based on VM size
|
||||
defaultMaxAzureDiskVolumes = 16
|
||||
|
||||
// ebsVolumeFilterType defines the filter name for ebsVolumeFilter.
|
||||
ebsVolumeFilterType = "EBS"
|
||||
// gcePDVolumeFilterType defines the filter name for gcePDVolumeFilter.
|
||||
gcePDVolumeFilterType = "GCE"
|
||||
// azureDiskVolumeFilterType defines the filter name for azureDiskVolumeFilter.
|
||||
azureDiskVolumeFilterType = "AzureDisk"
|
||||
// cinderVolumeFilterType defines the filter name for cinderVolumeFilter.
|
||||
cinderVolumeFilterType = "Cinder"
|
||||
)
|
||||
|
||||
// AzureDiskName is the name of the plugin used in the plugin registry and configurations.
|
||||
const AzureDiskName = "AzureDiskLimits"
|
||||
|
||||
// NewAzureDisk returns function that initializes a new plugin and returns it.
|
||||
func NewAzureDisk(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
informerFactory := handle.SharedInformerFactory()
|
||||
return newNonCSILimitsWithInformerFactory(azureDiskVolumeFilterType, informerFactory), nil
|
||||
}
|
||||
|
||||
// CinderName is the name of the plugin used in the plugin registry and configurations.
|
||||
const CinderName = "CinderLimits"
|
||||
|
||||
// NewCinder returns function that initializes a new plugin and returns it.
|
||||
func NewCinder(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
informerFactory := handle.SharedInformerFactory()
|
||||
return newNonCSILimitsWithInformerFactory(cinderVolumeFilterType, informerFactory), nil
|
||||
}
|
||||
|
||||
// EBSName is the name of the plugin used in the plugin registry and configurations.
|
||||
const EBSName = "EBSLimits"
|
||||
|
||||
// NewEBS returns function that initializes a new plugin and returns it.
|
||||
func NewEBS(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
informerFactory := handle.SharedInformerFactory()
|
||||
return newNonCSILimitsWithInformerFactory(ebsVolumeFilterType, informerFactory), nil
|
||||
}
|
||||
|
||||
// GCEPDName is the name of the plugin used in the plugin registry and configurations.
|
||||
const GCEPDName = "GCEPDLimits"
|
||||
|
||||
// NewGCEPD returns function that initializes a new plugin and returns it.
|
||||
func NewGCEPD(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
informerFactory := handle.SharedInformerFactory()
|
||||
return newNonCSILimitsWithInformerFactory(gcePDVolumeFilterType, informerFactory), nil
|
||||
}
|
||||
|
||||
// nonCSILimits contains information to check the max number of volumes for a plugin.
|
||||
type nonCSILimits struct {
|
||||
name string
|
||||
filter VolumeFilter
|
||||
volumeLimitKey v1.ResourceName
|
||||
maxVolumeFunc func(node *v1.Node) int
|
||||
csiNodeLister storagelisters.CSINodeLister
|
||||
pvLister corelisters.PersistentVolumeLister
|
||||
pvcLister corelisters.PersistentVolumeClaimLister
|
||||
scLister storagelisters.StorageClassLister
|
||||
|
||||
// The string below is generated randomly during the struct's initialization.
|
||||
// It is used to prefix volumeID generated inside the predicate() method to
|
||||
// avoid conflicts with any real volume.
|
||||
randomVolumeIDPrefix string
|
||||
}
|
||||
|
||||
var _ framework.FilterPlugin = &nonCSILimits{}
|
||||
|
||||
// newNonCSILimitsWithInformerFactory returns a plugin with filter name and informer factory.
|
||||
func newNonCSILimitsWithInformerFactory(
|
||||
filterName string,
|
||||
informerFactory informers.SharedInformerFactory,
|
||||
) framework.Plugin {
|
||||
pvLister := informerFactory.Core().V1().PersistentVolumes().Lister()
|
||||
pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister()
|
||||
scLister := informerFactory.Storage().V1().StorageClasses().Lister()
|
||||
|
||||
return newNonCSILimits(filterName, getCSINodeListerIfEnabled(informerFactory), scLister, pvLister, pvcLister)
|
||||
}
|
||||
|
||||
// newNonCSILimits creates a plugin which evaluates whether a pod can fit based on the
|
||||
// number of volumes which match a filter that it requests, and those that are already present.
|
||||
//
|
||||
// DEPRECATED
|
||||
// All cloudprovider specific predicates defined here are deprecated in favour of CSI volume limit
|
||||
// predicate - MaxCSIVolumeCountPred.
|
||||
//
|
||||
// The predicate looks for both volumes used directly, as well as PVC volumes that are backed by relevant volume
|
||||
// types, counts the number of unique volumes, and rejects the new pod if it would place the total count over
|
||||
// the maximum.
|
||||
func newNonCSILimits(
|
||||
filterName string,
|
||||
csiNodeLister storagelisters.CSINodeLister,
|
||||
scLister storagelisters.StorageClassLister,
|
||||
pvLister corelisters.PersistentVolumeLister,
|
||||
pvcLister corelisters.PersistentVolumeClaimLister,
|
||||
) framework.Plugin {
|
||||
var filter VolumeFilter
|
||||
var volumeLimitKey v1.ResourceName
|
||||
var name string
|
||||
|
||||
switch filterName {
|
||||
case ebsVolumeFilterType:
|
||||
name = EBSName
|
||||
filter = ebsVolumeFilter
|
||||
volumeLimitKey = v1.ResourceName(volumeutil.EBSVolumeLimitKey)
|
||||
case gcePDVolumeFilterType:
|
||||
name = GCEPDName
|
||||
filter = gcePDVolumeFilter
|
||||
volumeLimitKey = v1.ResourceName(volumeutil.GCEVolumeLimitKey)
|
||||
case azureDiskVolumeFilterType:
|
||||
name = AzureDiskName
|
||||
filter = azureDiskVolumeFilter
|
||||
volumeLimitKey = v1.ResourceName(volumeutil.AzureVolumeLimitKey)
|
||||
case cinderVolumeFilterType:
|
||||
name = CinderName
|
||||
filter = cinderVolumeFilter
|
||||
volumeLimitKey = v1.ResourceName(volumeutil.CinderVolumeLimitKey)
|
||||
default:
|
||||
klog.Fatalf("Wrong filterName, Only Support %v %v %v %v", ebsVolumeFilterType,
|
||||
gcePDVolumeFilterType, azureDiskVolumeFilterType, cinderVolumeFilterType)
|
||||
return nil
|
||||
}
|
||||
pl := &nonCSILimits{
|
||||
name: name,
|
||||
filter: filter,
|
||||
volumeLimitKey: volumeLimitKey,
|
||||
maxVolumeFunc: getMaxVolumeFunc(filterName),
|
||||
csiNodeLister: csiNodeLister,
|
||||
pvLister: pvLister,
|
||||
pvcLister: pvcLister,
|
||||
scLister: scLister,
|
||||
randomVolumeIDPrefix: rand.String(32),
|
||||
}
|
||||
|
||||
return pl
|
||||
}
|
||||
|
||||
// Name returns name of the plugin. It is used in logs, etc.
|
||||
func (pl *nonCSILimits) Name() string {
|
||||
return pl.name
|
||||
}
|
||||
|
||||
// Filter invoked at the filter extension point.
|
||||
func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
// If a pod doesn't have any volume attached to it, the predicate will always be true.
|
||||
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
|
||||
if len(pod.Spec.Volumes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
newVolumes := make(map[string]bool)
|
||||
if err := pl.filterVolumes(pod.Spec.Volumes, pod.Namespace, newVolumes); err != nil {
|
||||
return framework.NewStatus(framework.Error, err.Error())
|
||||
}
|
||||
|
||||
// quick return
|
||||
if len(newVolumes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return framework.NewStatus(framework.Error, fmt.Sprintf("node not found"))
|
||||
}
|
||||
|
||||
var csiNode *storage.CSINode
|
||||
var err error
|
||||
if pl.csiNodeLister != nil {
|
||||
csiNode, err = pl.csiNodeLister.Get(node.Name)
|
||||
if err != nil {
|
||||
// we don't fail here because the CSINode object is only necessary
|
||||
// for determining whether the migration is enabled or not
|
||||
klog.V(5).Infof("Could not get a CSINode object for the node: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// if a plugin has been migrated to a CSI driver, defer to the CSI predicate
|
||||
if pl.filter.IsMigrated(csiNode) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// count unique volumes
|
||||
existingVolumes := make(map[string]bool)
|
||||
for _, existingPod := range nodeInfo.Pods() {
|
||||
if err := pl.filterVolumes(existingPod.Spec.Volumes, existingPod.Namespace, existingVolumes); err != nil {
|
||||
return framework.NewStatus(framework.Error, err.Error())
|
||||
}
|
||||
}
|
||||
numExistingVolumes := len(existingVolumes)
|
||||
|
||||
// filter out already-mounted volumes
|
||||
for k := range existingVolumes {
|
||||
if _, ok := newVolumes[k]; ok {
|
||||
delete(newVolumes, k)
|
||||
}
|
||||
}
|
||||
|
||||
numNewVolumes := len(newVolumes)
|
||||
maxAttachLimit := pl.maxVolumeFunc(node)
|
||||
volumeLimits := nodeInfo.VolumeLimits()
|
||||
if maxAttachLimitFromAllocatable, ok := volumeLimits[pl.volumeLimitKey]; ok {
|
||||
maxAttachLimit = int(maxAttachLimitFromAllocatable)
|
||||
}
|
||||
|
||||
if numExistingVolumes+numNewVolumes > maxAttachLimit {
|
||||
// violates MaxEBSVolumeCount or MaxGCEPDVolumeCount
|
||||
return framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason())
|
||||
}
|
||||
if nodeInfo != nil && nodeInfo.TransientInfo != nil && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) {
|
||||
nodeInfo.TransientInfo.TransientLock.Lock()
|
||||
defer nodeInfo.TransientInfo.TransientLock.Unlock()
|
||||
nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount = maxAttachLimit - numExistingVolumes
|
||||
nodeInfo.TransientInfo.TransNodeInfo.RequestedVolumes = numNewVolumes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pl *nonCSILimits) filterVolumes(volumes []v1.Volume, namespace string, filteredVolumes map[string]bool) error {
|
||||
for i := range volumes {
|
||||
vol := &volumes[i]
|
||||
if id, ok := pl.filter.FilterVolume(vol); ok {
|
||||
filteredVolumes[id] = true
|
||||
} else if vol.PersistentVolumeClaim != nil {
|
||||
pvcName := vol.PersistentVolumeClaim.ClaimName
|
||||
if pvcName == "" {
|
||||
return fmt.Errorf("PersistentVolumeClaim had no name")
|
||||
}
|
||||
|
||||
// Until we know real ID of the volume use namespace/pvcName as substitute
|
||||
// with a random prefix (calculated and stored inside 'c' during initialization)
|
||||
// to avoid conflicts with existing volume IDs.
|
||||
pvID := fmt.Sprintf("%s-%s/%s", pl.randomVolumeIDPrefix, namespace, pvcName)
|
||||
|
||||
pvc, err := pl.pvcLister.PersistentVolumeClaims(namespace).Get(pvcName)
|
||||
if err != nil || pvc == nil {
|
||||
// If the PVC is invalid, we don't count the volume because
|
||||
// there's no guarantee that it belongs to the running predicate.
|
||||
klog.V(4).Infof("Unable to look up PVC info for %s/%s, assuming PVC doesn't match predicate when counting limits: %v", namespace, pvcName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
pvName := pvc.Spec.VolumeName
|
||||
if pvName == "" {
|
||||
// PVC is not bound. It was either deleted and created again or
|
||||
// it was forcefully unbound by admin. The pod can still use the
|
||||
// original PV where it was bound to, so we count the volume if
|
||||
// it belongs to the running predicate.
|
||||
if pl.matchProvisioner(pvc) {
|
||||
klog.V(4).Infof("PVC %s/%s is not bound, assuming PVC matches predicate when counting limits", namespace, pvcName)
|
||||
filteredVolumes[pvID] = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
pv, err := pl.pvLister.Get(pvName)
|
||||
if err != nil || pv == nil {
|
||||
// If the PV is invalid and PVC belongs to the running predicate,
|
||||
// log the error and count the PV towards the PV limit.
|
||||
if pl.matchProvisioner(pvc) {
|
||||
klog.V(4).Infof("Unable to look up PV info for %s/%s/%s, assuming PV matches predicate when counting limits: %v", namespace, pvcName, pvName, err)
|
||||
filteredVolumes[pvID] = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if id, ok := pl.filter.FilterPersistentVolume(pv); ok {
|
||||
filteredVolumes[id] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// matchProvisioner helps identify if the given PVC belongs to the running predicate.
|
||||
func (pl *nonCSILimits) matchProvisioner(pvc *v1.PersistentVolumeClaim) bool {
|
||||
if pvc.Spec.StorageClassName == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
storageClass, err := pl.scLister.Get(*pvc.Spec.StorageClassName)
|
||||
if err != nil || storageClass == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return pl.filter.MatchProvisioner(storageClass)
|
||||
}
|
||||
|
||||
// getMaxVolLimitFromEnv checks the max PD volumes environment variable, otherwise returning a default value.
|
||||
func getMaxVolLimitFromEnv() int {
|
||||
if rawMaxVols := os.Getenv(predicates.KubeMaxPDVols); rawMaxVols != "" {
|
||||
if parsedMaxVols, err := strconv.Atoi(rawMaxVols); err != nil {
|
||||
klog.Errorf("Unable to parse maximum PD volumes value, using default: %v", err)
|
||||
} else if parsedMaxVols <= 0 {
|
||||
klog.Errorf("Maximum PD volumes must be a positive value, using default")
|
||||
} else {
|
||||
return parsedMaxVols
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
// VolumeFilter contains information on how to filter PD Volumes when checking PD Volume caps.
|
||||
type VolumeFilter struct {
|
||||
// Filter normal volumes
|
||||
FilterVolume func(vol *v1.Volume) (id string, relevant bool)
|
||||
FilterPersistentVolume func(pv *v1.PersistentVolume) (id string, relevant bool)
|
||||
// MatchProvisioner evaluates if the StorageClass provisioner matches the running predicate
|
||||
MatchProvisioner func(sc *storage.StorageClass) (relevant bool)
|
||||
// IsMigrated returns a boolean specifying whether the plugin is migrated to a CSI driver
|
||||
IsMigrated func(csiNode *storage.CSINode) bool
|
||||
}
|
||||
|
||||
// ebsVolumeFilter is a VolumeFilter for filtering AWS ElasticBlockStore Volumes.
|
||||
var ebsVolumeFilter = VolumeFilter{
|
||||
FilterVolume: func(vol *v1.Volume) (string, bool) {
|
||||
if vol.AWSElasticBlockStore != nil {
|
||||
return vol.AWSElasticBlockStore.VolumeID, true
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) {
|
||||
if pv.Spec.AWSElasticBlockStore != nil {
|
||||
return pv.Spec.AWSElasticBlockStore.VolumeID, true
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
MatchProvisioner: func(sc *storage.StorageClass) (relevant bool) {
|
||||
if sc.Provisioner == csilibplugins.AWSEBSInTreePluginName {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
|
||||
IsMigrated: func(csiNode *storage.CSINode) bool {
|
||||
return isCSIMigrationOn(csiNode, csilibplugins.AWSEBSInTreePluginName)
|
||||
},
|
||||
}
|
||||
|
||||
// gcePDVolumeFilter is a VolumeFilter for filtering gce PersistentDisk Volumes.
|
||||
var gcePDVolumeFilter = VolumeFilter{
|
||||
FilterVolume: func(vol *v1.Volume) (string, bool) {
|
||||
if vol.GCEPersistentDisk != nil {
|
||||
return vol.GCEPersistentDisk.PDName, true
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) {
|
||||
if pv.Spec.GCEPersistentDisk != nil {
|
||||
return pv.Spec.GCEPersistentDisk.PDName, true
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
MatchProvisioner: func(sc *storage.StorageClass) (relevant bool) {
|
||||
if sc.Provisioner == csilibplugins.GCEPDInTreePluginName {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
|
||||
IsMigrated: func(csiNode *storage.CSINode) bool {
|
||||
return isCSIMigrationOn(csiNode, csilibplugins.GCEPDInTreePluginName)
|
||||
},
|
||||
}
|
||||
|
||||
// azureDiskVolumeFilter is a VolumeFilter for filtering azure Disk Volumes.
|
||||
var azureDiskVolumeFilter = VolumeFilter{
|
||||
FilterVolume: func(vol *v1.Volume) (string, bool) {
|
||||
if vol.AzureDisk != nil {
|
||||
return vol.AzureDisk.DiskName, true
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) {
|
||||
if pv.Spec.AzureDisk != nil {
|
||||
return pv.Spec.AzureDisk.DiskName, true
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
MatchProvisioner: func(sc *storage.StorageClass) (relevant bool) {
|
||||
if sc.Provisioner == csilibplugins.AzureDiskInTreePluginName {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
|
||||
IsMigrated: func(csiNode *storage.CSINode) bool {
|
||||
return isCSIMigrationOn(csiNode, csilibplugins.AzureDiskInTreePluginName)
|
||||
},
|
||||
}
|
||||
|
||||
// cinderVolumeFilter is a VolumeFilter for filtering cinder Volumes.
|
||||
// It will be deprecated once Openstack cloudprovider has been removed from in-tree.
|
||||
var cinderVolumeFilter = VolumeFilter{
|
||||
FilterVolume: func(vol *v1.Volume) (string, bool) {
|
||||
if vol.Cinder != nil {
|
||||
return vol.Cinder.VolumeID, true
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) {
|
||||
if pv.Spec.Cinder != nil {
|
||||
return pv.Spec.Cinder.VolumeID, true
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
MatchProvisioner: func(sc *storage.StorageClass) (relevant bool) {
|
||||
if sc.Provisioner == csilibplugins.CinderInTreePluginName {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
|
||||
IsMigrated: func(csiNode *storage.CSINode) bool {
|
||||
return isCSIMigrationOn(csiNode, csilibplugins.CinderInTreePluginName)
|
||||
},
|
||||
}
|
||||
|
||||
func getMaxVolumeFunc(filterName string) func(node *v1.Node) int {
|
||||
return func(node *v1.Node) int {
|
||||
maxVolumesFromEnv := getMaxVolLimitFromEnv()
|
||||
if maxVolumesFromEnv > 0 {
|
||||
return maxVolumesFromEnv
|
||||
}
|
||||
|
||||
var nodeInstanceType string
|
||||
for k, v := range node.ObjectMeta.Labels {
|
||||
if k == v1.LabelInstanceType || k == v1.LabelInstanceTypeStable {
|
||||
nodeInstanceType = v
|
||||
break
|
||||
}
|
||||
}
|
||||
switch filterName {
|
||||
case ebsVolumeFilterType:
|
||||
return getMaxEBSVolume(nodeInstanceType)
|
||||
case gcePDVolumeFilterType:
|
||||
return defaultMaxGCEPDVolumes
|
||||
case azureDiskVolumeFilterType:
|
||||
return defaultMaxAzureDiskVolumes
|
||||
case cinderVolumeFilterType:
|
||||
return volumeutil.DefaultMaxCinderVolumes
|
||||
default:
|
||||
return -1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getMaxEBSVolume(nodeInstanceType string) int {
|
||||
if ok, _ := regexp.MatchString(volumeutil.EBSNitroLimitRegex, nodeInstanceType); ok {
|
||||
return volumeutil.DefaultMaxEBSNitroVolumeLimit
|
||||
}
|
||||
return volumeutil.DefaultMaxEBSVolumes
|
||||
}
|
||||
|
||||
// getCSINodeListerIfEnabled returns the CSINode lister or nil if the feature is disabled
|
||||
func getCSINodeListerIfEnabled(factory informers.SharedInformerFactory) storagelisters.CSINodeLister {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CSINodeInfo) {
|
||||
return nil
|
||||
}
|
||||
return factory.Storage().V1().CSINodes().Lister()
|
||||
}
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user