From cba2b3f773f6a232c43c96da13847a26f08aad61 Mon Sep 17 00:00:00 2001 From: carlory Date: Wed, 20 Mar 2024 15:15:48 +0800 Subject: [PATCH] kube-scheduler remove non-csi volumelimit plugins --- .../framework/plugins/names/names.go | 4 - .../framework/plugins/nodevolumelimits/csi.go | 57 +- .../plugins/nodevolumelimits/csi_test.go | 66 +- .../plugins/nodevolumelimits/non_csi.go | 567 --------- .../plugins/nodevolumelimits/non_csi_test.go | 1069 ----------------- .../plugins/nodevolumelimits/utils.go | 13 - pkg/scheduler/framework/plugins/registry.go | 4 - .../scheduler_perf/scheduler_perf.go | 4 - .../volumescheduling/volume_binding_test.go | 3 +- 9 files changed, 52 insertions(+), 1735 deletions(-) delete mode 100644 pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go delete mode 100644 pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go diff --git a/pkg/scheduler/framework/plugins/names/names.go b/pkg/scheduler/framework/plugins/names/names.go index 4d54867509a..a3cf1342338 100644 --- a/pkg/scheduler/framework/plugins/names/names.go +++ b/pkg/scheduler/framework/plugins/names/names.go @@ -30,10 +30,6 @@ const ( NodeResourcesFit = "NodeResourcesFit" NodeUnschedulable = "NodeUnschedulable" NodeVolumeLimits = "NodeVolumeLimits" - AzureDiskLimits = "AzureDiskLimits" - CinderLimits = "CinderLimits" - EBSLimits = "EBSLimits" - GCEPDLimits = "GCEPDLimits" PodTopologySpread = "PodTopologySpread" SchedulingGates = "SchedulingGates" TaintToleration = "TaintToleration" diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go b/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go index 9c2e037fd7f..d35c6c2248c 100644 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go @@ -35,7 +35,11 @@ import ( "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" "k8s.io/kubernetes/pkg/scheduler/util" - volumeutil "k8s.io/kubernetes/pkg/volume/util" +) + +const ( + // ErrReasonMaxVolumeCountExceeded is used for MaxVolumeCount predicate error. + ErrReasonMaxVolumeCountExceeded = "node(s) exceed max volume count" ) // InTreeToCSITranslator contains methods required to check migratable status @@ -141,7 +145,6 @@ func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v logger := klog.FromContext(ctx) - // If CSINode doesn't exist, the predicate may read the limits from Node object csiNode, err := pl.csiNodeLister.Get(node.Name) if err != nil { // TODO: return the error once CSINode is created by default (2 releases) @@ -163,7 +166,7 @@ func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v } // If the node doesn't have volume limits, the predicate will always be true - nodeVolumeLimits := getVolumeLimits(nodeInfo, csiNode) + nodeVolumeLimits := getVolumeLimits(csiNode) if len(nodeVolumeLimits) == 0 { return nil } @@ -176,22 +179,23 @@ func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v } attachedVolumeCount := map[string]int{} - for volumeUniqueName, volumeLimitKey := range attachedVolumes { + for volumeUniqueName, driverName := range attachedVolumes { // Don't count single volume used in multiple pods more than once delete(newVolumes, volumeUniqueName) - attachedVolumeCount[volumeLimitKey]++ + attachedVolumeCount[driverName]++ } + // Count the new volumes count per driver newVolumeCount := map[string]int{} - for _, volumeLimitKey := range newVolumes { - newVolumeCount[volumeLimitKey]++ + for _, driverName := range newVolumes { + newVolumeCount[driverName]++ } - for volumeLimitKey, count := range newVolumeCount { - maxVolumeLimit, ok := nodeVolumeLimits[v1.ResourceName(volumeLimitKey)] + for driverName, count := range newVolumeCount { + maxVolumeLimit, ok := nodeVolumeLimits[driverName] if ok { - currentVolumeCount := attachedVolumeCount[volumeLimitKey] - logger.V(5).Info("Found plugin volume limits", "node", node.Name, "volumeLimitKey", volumeLimitKey, + currentVolumeCount := attachedVolumeCount[driverName] + logger.V(5).Info("Found plugin volume limits", "node", node.Name, "driverName", driverName, "maxLimits", maxVolumeLimit, "currentVolumeCount", currentVolumeCount, "newVolumeCount", count, "pod", klog.KObj(pod)) if currentVolumeCount+count > int(maxVolumeLimit) { @@ -203,6 +207,9 @@ func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v return nil } +// filterAttachableVolumes filters the attachable volumes from the pod and adds them to the result map. +// The result map is a map of volumeUniqueName to driver name. The volumeUniqueName is a unique name for +// the volume in the format of "driverName/volumeHandle". And driver name is the CSI driver name. func (pl *CSILimits) filterAttachableVolumes( logger klog.Logger, pod *v1.Pod, csiNode *storagev1.CSINode, newPod bool, result map[string]string) error { for _, vol := range pod.Spec.Volumes { @@ -265,8 +272,7 @@ func (pl *CSILimits) filterAttachableVolumes( } volumeUniqueName := fmt.Sprintf("%s/%s", driverName, volumeHandle) - volumeLimitKey := volumeutil.GetCSIAttachLimitKey(driverName) - result[volumeUniqueName] = volumeLimitKey + result[volumeUniqueName] = driverName } return nil } @@ -307,8 +313,7 @@ func (pl *CSILimits) checkAttachableInlineVolume(logger klog.Logger, vol *v1.Vol return nil } volumeUniqueName := fmt.Sprintf("%s/%s", driverName, translatedPV.Spec.PersistentVolumeSource.CSI.VolumeHandle) - volumeLimitKey := volumeutil.GetCSIAttachLimitKey(driverName) - result[volumeUniqueName] = volumeLimitKey + result[volumeUniqueName] = driverName return nil } @@ -428,17 +433,17 @@ func NewCSI(_ context.Context, _ runtime.Object, handle framework.Handle, fts fe }, nil } -func getVolumeLimits(nodeInfo *framework.NodeInfo, csiNode *storagev1.CSINode) map[v1.ResourceName]int64 { - // TODO: stop getting values from Node object in v1.18 - nodeVolumeLimits := volumeLimits(nodeInfo) - if csiNode != nil { - for i := range csiNode.Spec.Drivers { - d := csiNode.Spec.Drivers[i] - if d.Allocatable != nil && d.Allocatable.Count != nil { - // TODO: drop GetCSIAttachLimitKey once we don't get values from Node object (v1.18) - k := v1.ResourceName(volumeutil.GetCSIAttachLimitKey(d.Name)) - nodeVolumeLimits[k] = int64(*d.Allocatable.Count) - } +// getVolumeLimits reads the volume limits from CSINode object and returns a map of volume limits. +// The key is the driver name and the value is the maximum number of volumes that can be attached to the node. +// If a key is not found in the map, it means there is no limit for the driver on the node. +func getVolumeLimits(csiNode *storagev1.CSINode) map[string]int64 { + nodeVolumeLimits := make(map[string]int64) + if csiNode == nil { + return nodeVolumeLimits + } + for _, d := range csiNode.Spec.Drivers { + if d.Allocatable != nil && d.Allocatable.Count != nil { + nodeVolumeLimits[d.Name] = int64(*d.Allocatable.Count) } } return nodeVolumeLimits diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go b/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go index d74d52b2597..9f06a3a443b 100644 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go @@ -26,7 +26,6 @@ import ( "github.com/google/go-cmp/cmp" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/sets" @@ -35,7 +34,6 @@ import ( "k8s.io/kubernetes/pkg/scheduler/framework" st "k8s.io/kubernetes/pkg/scheduler/testing" tf "k8s.io/kubernetes/pkg/scheduler/testing/framework" - volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/test/utils/ktesting" "k8s.io/utils/ptr" ) @@ -51,22 +49,6 @@ var ( scName = "csi-sc" ) -// getVolumeLimitKey returns a ResourceName by filter type -func getVolumeLimitKey(filterType string) v1.ResourceName { - switch filterType { - case ebsVolumeFilterType: - return v1.ResourceName(volumeutil.EBSVolumeLimitKey) - case gcePDVolumeFilterType: - return v1.ResourceName(volumeutil.GCEVolumeLimitKey) - case azureDiskVolumeFilterType: - return v1.ResourceName(volumeutil.AzureVolumeLimitKey) - case cinderVolumeFilterType: - return v1.ResourceName(volumeutil.CinderVolumeLimitKey) - default: - return v1.ResourceName(volumeutil.GetCSIAttachLimitKey(filterType)) - } -} - func TestCSILimits(t *testing.T) { runningPod := st.MakePod().PVC("csi-ebs.csi.aws.com-3").Obj() pendingVolumePod := st.MakePod().PVC("csi-4").Obj() @@ -297,7 +279,7 @@ func TestCSILimits(t *testing.T) { maxVols: 4, driverNames: []string{ebsCSIDriverName}, test: "fits when node volume limit >= new pods CSI volume", - limitSource: "node", + limitSource: "csinode", }, { newPod: csiEBSOneVolPod, @@ -306,7 +288,7 @@ func TestCSILimits(t *testing.T) { maxVols: 2, driverNames: []string{ebsCSIDriverName}, test: "doesn't when node volume limit <= pods CSI volume", - limitSource: "node", + limitSource: "csinode", wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, { @@ -326,7 +308,7 @@ func TestCSILimits(t *testing.T) { maxVols: 2, driverNames: []string{ebsCSIDriverName}, test: "count pending PVCs towards volume limit <= pods CSI volume", - limitSource: "node", + limitSource: "csinode", wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, // two same pending PVCs should be counted as 1 @@ -337,7 +319,7 @@ func TestCSILimits(t *testing.T) { maxVols: 4, driverNames: []string{ebsCSIDriverName}, test: "count multiple pending pvcs towards volume limit >= pods CSI volume", - limitSource: "node", + limitSource: "csinode", }, // should count PVCs with invalid PV name but valid SC { @@ -347,7 +329,7 @@ func TestCSILimits(t *testing.T) { maxVols: 2, driverNames: []string{ebsCSIDriverName}, test: "should count PVCs with invalid PV name but valid SC", - limitSource: "node", + limitSource: "csinode", wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, // don't count a volume which has storageclass missing @@ -358,7 +340,7 @@ func TestCSILimits(t *testing.T) { maxVols: 2, driverNames: []string{ebsCSIDriverName}, test: "don't count pvcs with missing SC towards volume limit", - limitSource: "node", + limitSource: "csinode", }, // don't count multiple volume types { @@ -368,7 +350,7 @@ func TestCSILimits(t *testing.T) { maxVols: 2, driverNames: []string{ebsCSIDriverName, gceCSIDriverName}, test: "count pvcs with the same type towards volume limit", - limitSource: "node", + limitSource: "csinode", wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, { @@ -378,7 +360,7 @@ func TestCSILimits(t *testing.T) { maxVols: 2, driverNames: []string{ebsCSIDriverName, gceCSIDriverName}, test: "don't count pvcs with different type towards volume limit", - limitSource: "node", + limitSource: "csinode", }, // Tests for in-tree volume migration { @@ -396,10 +378,8 @@ func TestCSILimits(t *testing.T) { newPod: inTreeInlineVolPod, existingPods: []*v1.Pod{inTreeTwoVolPod}, filterName: "csi", - maxVols: 2, driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName}, migrationEnabled: true, - limitSource: "node", test: "nil csi node", }, { @@ -494,6 +474,7 @@ func TestCSILimits(t *testing.T) { filterName: "csi", ephemeralEnabled: true, driverNames: []string{ebsCSIDriverName}, + limitSource: "csinode-with-no-limit", test: "ephemeral volume missing", wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `looking up PVC test/abc-xyz: persistentvolumeclaims "abc-xyz" not found`), }, @@ -503,6 +484,7 @@ func TestCSILimits(t *testing.T) { ephemeralEnabled: true, extraClaims: []v1.PersistentVolumeClaim{*conflictingClaim}, driverNames: []string{ebsCSIDriverName}, + limitSource: "csinode-with-no-limit", test: "ephemeral volume not owned", wantStatus: framework.AsStatus(errors.New("PVC test/abc-xyz was not created for pod test/abc (pod is not owner)")), }, @@ -512,6 +494,7 @@ func TestCSILimits(t *testing.T) { ephemeralEnabled: true, extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaim}, driverNames: []string{ebsCSIDriverName}, + limitSource: "csinode-with-no-limit", test: "ephemeral volume unbound", }, { @@ -522,7 +505,7 @@ func TestCSILimits(t *testing.T) { driverNames: []string{ebsCSIDriverName}, existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod}, maxVols: 2, - limitSource: "node", + limitSource: "csinode", test: "ephemeral doesn't when node volume limit <= pods CSI volume", wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, @@ -534,7 +517,7 @@ func TestCSILimits(t *testing.T) { driverNames: []string{ebsCSIDriverName}, existingPods: []*v1.Pod{runningPod, ephemeralTwoVolumePod}, maxVols: 2, - limitSource: "node", + limitSource: "csinode", test: "ephemeral doesn't when node volume limit <= pods ephemeral CSI volume", wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, @@ -546,7 +529,7 @@ func TestCSILimits(t *testing.T) { driverNames: []string{ebsCSIDriverName}, existingPods: []*v1.Pod{runningPod, ephemeralVolumePod, csiEBSTwoVolPod}, maxVols: 3, - limitSource: "node", + limitSource: "csinode", test: "persistent doesn't when node volume limit <= pods ephemeral CSI volume + persistent volume, ephemeral disabled", wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, @@ -558,7 +541,7 @@ func TestCSILimits(t *testing.T) { driverNames: []string{ebsCSIDriverName}, existingPods: []*v1.Pod{runningPod, ephemeralVolumePod, csiEBSTwoVolPod}, maxVols: 3, - limitSource: "node", + limitSource: "csinode", test: "persistent doesn't when node volume limit <= pods ephemeral CSI volume + persistent volume", wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, @@ -569,7 +552,8 @@ func TestCSILimits(t *testing.T) { extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaim}, driverNames: []string{ebsCSIDriverName}, existingPods: []*v1.Pod{runningPod, ephemeralVolumePod, csiEBSTwoVolPod}, - maxVols: 4, + maxVols: 5, + limitSource: "csinode", test: "persistent okay when node volume limit > pods ephemeral CSI volume + persistent volume", }, { @@ -578,7 +562,7 @@ func TestCSILimits(t *testing.T) { maxVols: 2, driverNames: []string{ebsCSIDriverName}, test: "skip Filter when the pod only uses secrets and configmaps", - limitSource: "node", + limitSource: "csinode", wantPreFilterStatus: framework.NewStatus(framework.Skip), }, { @@ -587,13 +571,14 @@ func TestCSILimits(t *testing.T) { maxVols: 2, driverNames: []string{ebsCSIDriverName}, test: "don't skip Filter when the pod has pvcs", - limitSource: "node", + limitSource: "csinode", }, { newPod: ephemeralPodWithConfigmapAndSecret, filterName: "csi", ephemeralEnabled: true, driverNames: []string{ebsCSIDriverName}, + limitSource: "csinode-with-no-limit", test: "don't skip Filter when the pod has ephemeral volumes", wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `looking up PVC test/abc-xyz: persistentvolumeclaims "abc-xyz" not found`), }, @@ -821,12 +806,6 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int } var csiNode *storagev1.CSINode - addLimitToNode := func() { - for _, driver := range driverNames { - node.Status.Allocatable[getVolumeLimitKey(driver)] = *resource.NewQuantity(int64(limit), resource.DecimalSI) - } - } - initCSINode := func() { csiNode = &storagev1.CSINode{ ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"}, @@ -853,13 +832,8 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int } switch limitSource { - case "node": - addLimitToNode() case "csinode": addDriversCSINode(true) - case "both": - addLimitToNode() - addDriversCSINode(true) case "csinode-with-no-limit": addDriversCSINode(false) case "no-csi-driver": diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go b/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go deleted file mode 100644 index 1dcc6afd741..00000000000 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go +++ /dev/null @@ -1,567 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodevolumelimits - -import ( - "context" - "errors" - "fmt" - "os" - "regexp" - "strconv" - - v1 "k8s.io/api/core/v1" - storage "k8s.io/api/storage/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/informers" - corelisters "k8s.io/client-go/listers/core/v1" - storagelisters "k8s.io/client-go/listers/storage/v1" - "k8s.io/component-helpers/storage/ephemeral" - csilibplugins "k8s.io/csi-translation-lib/plugins" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - volumeutil "k8s.io/kubernetes/pkg/volume/util" -) - -const ( - // defaultMaxGCEPDVolumes defines the maximum number of PD Volumes for GCE. - // GCE instances can have up to 16 PD volumes attached. - defaultMaxGCEPDVolumes = 16 - // defaultMaxAzureDiskVolumes defines the maximum number of PD Volumes for Azure. - // Larger Azure VMs can actually have much more disks attached. - // TODO We should determine the max based on VM size - defaultMaxAzureDiskVolumes = 16 - - // ebsVolumeFilterType defines the filter name for ebsVolumeFilter. - ebsVolumeFilterType = "EBS" - // gcePDVolumeFilterType defines the filter name for gcePDVolumeFilter. - gcePDVolumeFilterType = "GCE" - // azureDiskVolumeFilterType defines the filter name for azureDiskVolumeFilter. - azureDiskVolumeFilterType = "AzureDisk" - // cinderVolumeFilterType defines the filter name for cinderVolumeFilter. - cinderVolumeFilterType = "Cinder" - - // ErrReasonMaxVolumeCountExceeded is used for MaxVolumeCount predicate error. - ErrReasonMaxVolumeCountExceeded = "node(s) exceed max volume count" - - // KubeMaxPDVols defines the maximum number of PD Volumes per kubelet. - KubeMaxPDVols = "KUBE_MAX_PD_VOLS" -) - -// AzureDiskName is the name of the plugin used in the plugin registry and configurations. -const AzureDiskName = names.AzureDiskLimits - -// NewAzureDisk returns function that initializes a new plugin and returns it. -func NewAzureDisk(ctx context.Context, _ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) { - informerFactory := handle.SharedInformerFactory() - return newNonCSILimitsWithInformerFactory(ctx, azureDiskVolumeFilterType, informerFactory, fts), nil -} - -// CinderName is the name of the plugin used in the plugin registry and configurations. -const CinderName = names.CinderLimits - -// NewCinder returns function that initializes a new plugin and returns it. -func NewCinder(ctx context.Context, _ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) { - informerFactory := handle.SharedInformerFactory() - return newNonCSILimitsWithInformerFactory(ctx, cinderVolumeFilterType, informerFactory, fts), nil -} - -// EBSName is the name of the plugin used in the plugin registry and configurations. -const EBSName = names.EBSLimits - -// NewEBS returns function that initializes a new plugin and returns it. -func NewEBS(ctx context.Context, _ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) { - informerFactory := handle.SharedInformerFactory() - return newNonCSILimitsWithInformerFactory(ctx, ebsVolumeFilterType, informerFactory, fts), nil -} - -// GCEPDName is the name of the plugin used in the plugin registry and configurations. -const GCEPDName = names.GCEPDLimits - -// NewGCEPD returns function that initializes a new plugin and returns it. -func NewGCEPD(ctx context.Context, _ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) { - informerFactory := handle.SharedInformerFactory() - return newNonCSILimitsWithInformerFactory(ctx, gcePDVolumeFilterType, informerFactory, fts), nil -} - -// nonCSILimits contains information to check the max number of volumes for a plugin. -type nonCSILimits struct { - name string - filter VolumeFilter - volumeLimitKey v1.ResourceName - maxVolumeFunc func(node *v1.Node) int - csiNodeLister storagelisters.CSINodeLister - pvLister corelisters.PersistentVolumeLister - pvcLister corelisters.PersistentVolumeClaimLister - scLister storagelisters.StorageClassLister - - // The string below is generated randomly during the struct's initialization. - // It is used to prefix volumeID generated inside the predicate() method to - // avoid conflicts with any real volume. - randomVolumeIDPrefix string -} - -var _ framework.PreFilterPlugin = &nonCSILimits{} -var _ framework.FilterPlugin = &nonCSILimits{} -var _ framework.EnqueueExtensions = &nonCSILimits{} - -// newNonCSILimitsWithInformerFactory returns a plugin with filter name and informer factory. -func newNonCSILimitsWithInformerFactory( - ctx context.Context, - filterName string, - informerFactory informers.SharedInformerFactory, - fts feature.Features, -) framework.Plugin { - pvLister := informerFactory.Core().V1().PersistentVolumes().Lister() - pvcLister := informerFactory.Core().V1().PersistentVolumeClaims().Lister() - csiNodesLister := informerFactory.Storage().V1().CSINodes().Lister() - scLister := informerFactory.Storage().V1().StorageClasses().Lister() - - return newNonCSILimits(ctx, filterName, csiNodesLister, scLister, pvLister, pvcLister, fts) -} - -// newNonCSILimits creates a plugin which evaluates whether a pod can fit based on the -// number of volumes which match a filter that it requests, and those that are already present. -// -// DEPRECATED -// All cloudprovider specific predicates defined here are deprecated in favour of CSI volume limit -// predicate - MaxCSIVolumeCountPred. -// -// The predicate looks for both volumes used directly, as well as PVC volumes that are backed by relevant volume -// types, counts the number of unique volumes, and rejects the new pod if it would place the total count over -// the maximum. -func newNonCSILimits( - ctx context.Context, - filterName string, - csiNodeLister storagelisters.CSINodeLister, - scLister storagelisters.StorageClassLister, - pvLister corelisters.PersistentVolumeLister, - pvcLister corelisters.PersistentVolumeClaimLister, - fts feature.Features, -) framework.Plugin { - logger := klog.FromContext(ctx) - var filter VolumeFilter - var volumeLimitKey v1.ResourceName - var name string - - switch filterName { - case ebsVolumeFilterType: - name = EBSName - filter = ebsVolumeFilter - volumeLimitKey = v1.ResourceName(volumeutil.EBSVolumeLimitKey) - case gcePDVolumeFilterType: - name = GCEPDName - filter = gcePDVolumeFilter - volumeLimitKey = v1.ResourceName(volumeutil.GCEVolumeLimitKey) - case azureDiskVolumeFilterType: - name = AzureDiskName - filter = azureDiskVolumeFilter - volumeLimitKey = v1.ResourceName(volumeutil.AzureVolumeLimitKey) - case cinderVolumeFilterType: - name = CinderName - filter = cinderVolumeFilter - volumeLimitKey = v1.ResourceName(volumeutil.CinderVolumeLimitKey) - default: - logger.Error(errors.New("wrong filterName"), "Cannot create nonCSILimits plugin") - return nil - } - pl := &nonCSILimits{ - name: name, - filter: filter, - volumeLimitKey: volumeLimitKey, - maxVolumeFunc: getMaxVolumeFunc(logger, filterName), - csiNodeLister: csiNodeLister, - pvLister: pvLister, - pvcLister: pvcLister, - scLister: scLister, - randomVolumeIDPrefix: rand.String(32), - } - - return pl -} - -// Name returns name of the plugin. It is used in logs, etc. -func (pl *nonCSILimits) Name() string { - return pl.name -} - -// EventsToRegister returns the possible events that may make a Pod -// failed by this plugin schedulable. -func (pl *nonCSILimits) EventsToRegister(_ context.Context) ([]framework.ClusterEventWithHint, error) { - return []framework.ClusterEventWithHint{ - {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: framework.Add}}, - {Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: framework.Delete}}, - {Event: framework.ClusterEvent{Resource: framework.PersistentVolumeClaim, ActionType: framework.Add}}, - }, nil -} - -// PreFilter invoked at the prefilter extension point -// -// If the pod haven't those types of volumes, we'll skip the Filter phase -func (pl *nonCSILimits) PreFilter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { - volumes := pod.Spec.Volumes - for i := range volumes { - vol := &volumes[i] - _, ok := pl.filter.FilterVolume(vol) - if ok || vol.PersistentVolumeClaim != nil || vol.Ephemeral != nil { - return nil, nil - } - } - - return nil, framework.NewStatus(framework.Skip) -} - -// PreFilterExtensions returns prefilter extensions, pod add and remove. -func (pl *nonCSILimits) PreFilterExtensions() framework.PreFilterExtensions { - return nil -} - -// Filter invoked at the filter extension point. -func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { - // If a pod doesn't have any volume attached to it, the predicate will always be true. - // Thus we make a fast path for it, to avoid unnecessary computations in this case. - if len(pod.Spec.Volumes) == 0 { - return nil - } - - logger := klog.FromContext(ctx) - newVolumes := sets.New[string]() - if err := pl.filterVolumes(logger, pod, true /* new pod */, newVolumes); err != nil { - if apierrors.IsNotFound(err) { - // PVC is not found. This Pod will never be schedulable until PVC is created. - return framework.NewStatus(framework.UnschedulableAndUnresolvable, err.Error()) - } - return framework.AsStatus(err) - } - - // quick return - if len(newVolumes) == 0 { - return nil - } - - node := nodeInfo.Node() - - var csiNode *storage.CSINode - var err error - if pl.csiNodeLister != nil { - csiNode, err = pl.csiNodeLister.Get(node.Name) - if err != nil { - // we don't fail here because the CSINode object is only necessary - // for determining whether the migration is enabled or not - logger.V(5).Info("Could not get a CSINode object for the node", "node", klog.KObj(node), "err", err) - } - } - - // if a plugin has been migrated to a CSI driver, defer to the CSI predicate - if pl.filter.IsMigrated(csiNode) { - return nil - } - - // count unique volumes - existingVolumes := sets.New[string]() - for _, existingPod := range nodeInfo.Pods { - if err := pl.filterVolumes(logger, existingPod.Pod, false /* existing pod */, existingVolumes); err != nil { - return framework.AsStatus(err) - } - } - numExistingVolumes := len(existingVolumes) - - // filter out already-mounted volumes - for k := range existingVolumes { - delete(newVolumes, k) - } - - numNewVolumes := len(newVolumes) - maxAttachLimit := pl.maxVolumeFunc(node) - volumeLimits := volumeLimits(nodeInfo) - if maxAttachLimitFromAllocatable, ok := volumeLimits[pl.volumeLimitKey]; ok { - maxAttachLimit = int(maxAttachLimitFromAllocatable) - } - - if numExistingVolumes+numNewVolumes > maxAttachLimit { - return framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded) - } - return nil -} - -func (pl *nonCSILimits) filterVolumes(logger klog.Logger, pod *v1.Pod, newPod bool, filteredVolumes sets.Set[string]) error { - volumes := pod.Spec.Volumes - for i := range volumes { - vol := &volumes[i] - if id, ok := pl.filter.FilterVolume(vol); ok { - filteredVolumes.Insert(id) - continue - } - - pvcName := "" - isEphemeral := false - switch { - case vol.PersistentVolumeClaim != nil: - pvcName = vol.PersistentVolumeClaim.ClaimName - case vol.Ephemeral != nil: - // Generic ephemeral inline volumes also use a PVC, - // just with a computed name and certain ownership. - // That is checked below once the pvc object is - // retrieved. - pvcName = ephemeral.VolumeClaimName(pod, vol) - isEphemeral = true - default: - continue - } - if pvcName == "" { - return fmt.Errorf("PersistentVolumeClaim had no name") - } - - // Until we know real ID of the volume use namespace/pvcName as substitute - // with a random prefix (calculated and stored inside 'c' during initialization) - // to avoid conflicts with existing volume IDs. - pvID := fmt.Sprintf("%s-%s/%s", pl.randomVolumeIDPrefix, pod.Namespace, pvcName) - - pvc, err := pl.pvcLister.PersistentVolumeClaims(pod.Namespace).Get(pvcName) - if err != nil { - if newPod { - // The PVC is required to proceed with - // scheduling of a new pod because it cannot - // run without it. Bail out immediately. - return fmt.Errorf("looking up PVC %s/%s: %w", pod.Namespace, pvcName, err) - } - // If the PVC is invalid, we don't count the volume because - // there's no guarantee that it belongs to the running predicate. - logger.V(4).Info("Unable to look up PVC info, assuming PVC doesn't match predicate when counting limits", "pod", klog.KObj(pod), "PVC", klog.KRef(pod.Namespace, pvcName), "err", err) - continue - } - - // The PVC for an ephemeral volume must be owned by the pod. - if isEphemeral { - if err := ephemeral.VolumeIsForPod(pod, pvc); err != nil { - return err - } - } - - pvName := pvc.Spec.VolumeName - if pvName == "" { - // PVC is not bound. It was either deleted and created again or - // it was forcefully unbound by admin. The pod can still use the - // original PV where it was bound to, so we count the volume if - // it belongs to the running predicate. - if pl.matchProvisioner(pvc) { - logger.V(4).Info("PVC is not bound, assuming PVC matches predicate when counting limits", "pod", klog.KObj(pod), "PVC", klog.KRef(pod.Namespace, pvcName)) - filteredVolumes.Insert(pvID) - } - continue - } - - pv, err := pl.pvLister.Get(pvName) - if err != nil { - // If the PV is invalid and PVC belongs to the running predicate, - // log the error and count the PV towards the PV limit. - if pl.matchProvisioner(pvc) { - logger.V(4).Info("Unable to look up PV, assuming PV matches predicate when counting limits", "pod", klog.KObj(pod), "PVC", klog.KRef(pod.Namespace, pvcName), "PV", klog.KRef("", pvName), "err", err) - filteredVolumes.Insert(pvID) - } - continue - } - - if id, ok := pl.filter.FilterPersistentVolume(pv); ok { - filteredVolumes.Insert(id) - } - } - - return nil -} - -// matchProvisioner helps identify if the given PVC belongs to the running predicate. -func (pl *nonCSILimits) matchProvisioner(pvc *v1.PersistentVolumeClaim) bool { - if pvc.Spec.StorageClassName == nil { - return false - } - - storageClass, err := pl.scLister.Get(*pvc.Spec.StorageClassName) - if err != nil || storageClass == nil { - return false - } - - return pl.filter.MatchProvisioner(storageClass) -} - -// getMaxVolLimitFromEnv checks the max PD volumes environment variable, otherwise returning a default value. -func getMaxVolLimitFromEnv(logger klog.Logger) int { - if rawMaxVols := os.Getenv(KubeMaxPDVols); rawMaxVols != "" { - if parsedMaxVols, err := strconv.Atoi(rawMaxVols); err != nil { - logger.Error(err, "Unable to parse maximum PD volumes value, using default") - } else if parsedMaxVols <= 0 { - logger.Error(errors.New("maximum PD volumes is negative"), "Unable to parse maximum PD volumes value, using default") - } else { - return parsedMaxVols - } - } - - return -1 -} - -// VolumeFilter contains information on how to filter PD Volumes when checking PD Volume caps. -type VolumeFilter struct { - // Filter normal volumes - FilterVolume func(vol *v1.Volume) (id string, relevant bool) - FilterPersistentVolume func(pv *v1.PersistentVolume) (id string, relevant bool) - // MatchProvisioner evaluates if the StorageClass provisioner matches the running predicate - MatchProvisioner func(sc *storage.StorageClass) (relevant bool) - // IsMigrated returns a boolean specifying whether the plugin is migrated to a CSI driver - IsMigrated func(csiNode *storage.CSINode) bool -} - -// ebsVolumeFilter is a VolumeFilter for filtering AWS ElasticBlockStore Volumes. -var ebsVolumeFilter = VolumeFilter{ - FilterVolume: func(vol *v1.Volume) (string, bool) { - if vol.AWSElasticBlockStore != nil { - return vol.AWSElasticBlockStore.VolumeID, true - } - return "", false - }, - - FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { - if pv.Spec.AWSElasticBlockStore != nil { - return pv.Spec.AWSElasticBlockStore.VolumeID, true - } - return "", false - }, - - MatchProvisioner: func(sc *storage.StorageClass) bool { - return sc.Provisioner == csilibplugins.AWSEBSInTreePluginName - }, - - IsMigrated: func(csiNode *storage.CSINode) bool { - return isCSIMigrationOn(csiNode, csilibplugins.AWSEBSInTreePluginName) - }, -} - -// gcePDVolumeFilter is a VolumeFilter for filtering gce PersistentDisk Volumes. -var gcePDVolumeFilter = VolumeFilter{ - FilterVolume: func(vol *v1.Volume) (string, bool) { - if vol.GCEPersistentDisk != nil { - return vol.GCEPersistentDisk.PDName, true - } - return "", false - }, - - FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { - if pv.Spec.GCEPersistentDisk != nil { - return pv.Spec.GCEPersistentDisk.PDName, true - } - return "", false - }, - - MatchProvisioner: func(sc *storage.StorageClass) bool { - return sc.Provisioner == csilibplugins.GCEPDInTreePluginName - }, - - IsMigrated: func(csiNode *storage.CSINode) bool { - return isCSIMigrationOn(csiNode, csilibplugins.GCEPDInTreePluginName) - }, -} - -// azureDiskVolumeFilter is a VolumeFilter for filtering azure Disk Volumes. -var azureDiskVolumeFilter = VolumeFilter{ - FilterVolume: func(vol *v1.Volume) (string, bool) { - if vol.AzureDisk != nil { - return vol.AzureDisk.DiskName, true - } - return "", false - }, - - FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { - if pv.Spec.AzureDisk != nil { - return pv.Spec.AzureDisk.DiskName, true - } - return "", false - }, - - MatchProvisioner: func(sc *storage.StorageClass) bool { - return sc.Provisioner == csilibplugins.AzureDiskInTreePluginName - }, - - IsMigrated: func(csiNode *storage.CSINode) bool { - return isCSIMigrationOn(csiNode, csilibplugins.AzureDiskInTreePluginName) - }, -} - -// cinderVolumeFilter is a VolumeFilter for filtering cinder Volumes. -// It will be deprecated once Openstack cloudprovider has been removed from in-tree. -var cinderVolumeFilter = VolumeFilter{ - FilterVolume: func(vol *v1.Volume) (string, bool) { - if vol.Cinder != nil { - return vol.Cinder.VolumeID, true - } - return "", false - }, - - FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { - if pv.Spec.Cinder != nil { - return pv.Spec.Cinder.VolumeID, true - } - return "", false - }, - - MatchProvisioner: func(sc *storage.StorageClass) bool { - return sc.Provisioner == csilibplugins.CinderInTreePluginName - }, - - IsMigrated: func(csiNode *storage.CSINode) bool { - return isCSIMigrationOn(csiNode, csilibplugins.CinderInTreePluginName) - }, -} - -func getMaxVolumeFunc(logger klog.Logger, filterName string) func(node *v1.Node) int { - return func(node *v1.Node) int { - maxVolumesFromEnv := getMaxVolLimitFromEnv(logger) - if maxVolumesFromEnv > 0 { - return maxVolumesFromEnv - } - - var nodeInstanceType string - for k, v := range node.ObjectMeta.Labels { - if k == v1.LabelInstanceType || k == v1.LabelInstanceTypeStable { - nodeInstanceType = v - break - } - } - switch filterName { - case ebsVolumeFilterType: - return getMaxEBSVolume(nodeInstanceType) - case gcePDVolumeFilterType: - return defaultMaxGCEPDVolumes - case azureDiskVolumeFilterType: - return defaultMaxAzureDiskVolumes - case cinderVolumeFilterType: - return volumeutil.DefaultMaxCinderVolumes - default: - return -1 - } - } -} - -func getMaxEBSVolume(nodeInstanceType string) int { - if ok, _ := regexp.MatchString(volumeutil.EBSNitroLimitRegex, nodeInstanceType); ok { - return volumeutil.DefaultMaxEBSNitroVolumeLimit - } - return volumeutil.DefaultMaxEBSVolumes -} diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go b/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go deleted file mode 100644 index 5135f20eab5..00000000000 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go +++ /dev/null @@ -1,1069 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodevolumelimits - -import ( - "context" - "errors" - "fmt" - "reflect" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - csilibplugins "k8s.io/csi-translation-lib/plugins" - "k8s.io/klog/v2/ktesting" - "k8s.io/kubernetes/pkg/scheduler/framework" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" - st "k8s.io/kubernetes/pkg/scheduler/testing" - tf "k8s.io/kubernetes/pkg/scheduler/testing/framework" - "k8s.io/utils/ptr" -) - -var ( - nonApplicablePod = st.MakePod().Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{}, - }, - }).Obj() - onlyConfigmapAndSecretPod = st.MakePod().Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - ConfigMap: &v1.ConfigMapVolumeSource{}, - }, - }).Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - Secret: &v1.SecretVolumeSource{}, - }, - }).Obj() - pvcPodWithConfigmapAndSecret = st.MakePod().PVC("pvcWithDeletedPV").Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - ConfigMap: &v1.ConfigMapVolumeSource{}, - }, - }).Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - Secret: &v1.SecretVolumeSource{}, - }, - }).Obj() - - deletedPVCPod = st.MakePod().PVC("deletedPVC").Obj() - twoDeletedPVCPod = st.MakePod().PVC("deletedPVC").PVC("anotherDeletedPVC").Obj() - deletedPVPod = st.MakePod().PVC("pvcWithDeletedPV").Obj() - // deletedPVPod2 is a different pod than deletedPVPod but using the same PVC - deletedPVPod2 = st.MakePod().PVC("pvcWithDeletedPV").Obj() - anotherDeletedPVPod = st.MakePod().PVC("anotherPVCWithDeletedPV").Obj() - emptyPod = st.MakePod().Obj() - unboundPVCPod = st.MakePod().PVC("unboundPVC").Obj() - // Different pod than unboundPVCPod, but using the same unbound PVC - unboundPVCPod2 = st.MakePod().PVC("unboundPVC").Obj() - // pod with unbound PVC that's different to unboundPVC - anotherUnboundPVCPod = st.MakePod().PVC("anotherUnboundPVC").Obj() -) - -func TestEphemeralLimits(t *testing.T) { - // We have to specify a valid filter and arbitrarily pick Cinder here. - // It doesn't matter for the test cases. - filterName := gcePDVolumeFilterType - driverName := csilibplugins.GCEPDInTreePluginName - - ephemeralVolumePod := st.MakePod().Name("abc").Namespace("test").UID("12345").Volume(v1.Volume{ - Name: "xyz", - VolumeSource: v1.VolumeSource{ - Ephemeral: &v1.EphemeralVolumeSource{}, - }, - }).Obj() - - controller := true - ephemeralClaim := &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ephemeralVolumePod.Namespace, - Name: ephemeralVolumePod.Name + "-" + ephemeralVolumePod.Spec.Volumes[0].Name, - OwnerReferences: []metav1.OwnerReference{ - { - Kind: "Pod", - Name: ephemeralVolumePod.Name, - UID: ephemeralVolumePod.UID, - Controller: &controller, - }, - }, - }, - Spec: v1.PersistentVolumeClaimSpec{ - VolumeName: "missing", - StorageClassName: &filterName, - }, - } - conflictingClaim := ephemeralClaim.DeepCopy() - conflictingClaim.OwnerReferences = nil - - ephemeralPodWithConfigmapAndSecret := st.MakePod().Name("abc").Namespace("test").UID("12345").Volume(v1.Volume{ - Name: "xyz", - VolumeSource: v1.VolumeSource{ - Ephemeral: &v1.EphemeralVolumeSource{}, - }, - }).Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - ConfigMap: &v1.ConfigMapVolumeSource{}, - }, - }).Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - Secret: &v1.SecretVolumeSource{}, - }, - }).Obj() - - tests := []struct { - newPod *v1.Pod - existingPods []*v1.Pod - extraClaims []v1.PersistentVolumeClaim - ephemeralEnabled bool - maxVols int32 - test string - wantStatus *framework.Status - wantPreFilterStatus *framework.Status - }{ - { - newPod: ephemeralVolumePod, - ephemeralEnabled: true, - test: "volume missing", - wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `looking up PVC test/abc-xyz: persistentvolumeclaims "abc-xyz" not found`), - }, - { - newPod: ephemeralVolumePod, - ephemeralEnabled: true, - extraClaims: []v1.PersistentVolumeClaim{*conflictingClaim}, - test: "volume not owned", - wantStatus: framework.AsStatus(errors.New("PVC test/abc-xyz was not created for pod test/abc (pod is not owner)")), - }, - { - newPod: ephemeralVolumePod, - ephemeralEnabled: true, - extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaim}, - maxVols: 1, - test: "volume unbound, allowed", - }, - { - newPod: ephemeralVolumePod, - ephemeralEnabled: true, - extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaim}, - maxVols: 0, - test: "volume unbound, exceeds limit", - wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), - }, - { - newPod: onlyConfigmapAndSecretPod, - ephemeralEnabled: true, - extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaim}, - maxVols: 1, - test: "skip Filter when the pod only uses secrets and configmaps", - wantPreFilterStatus: framework.NewStatus(framework.Skip), - }, - { - newPod: ephemeralPodWithConfigmapAndSecret, - ephemeralEnabled: true, - extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaim}, - maxVols: 1, - test: "don't skip Filter when the pods has ephemeral volumes", - }, - } - - for _, test := range tests { - t.Run(test.test, func(t *testing.T) { - _, ctx := ktesting.NewTestContext(t) - fts := feature.Features{} - node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, test.maxVols, filterName) - p := newNonCSILimits(ctx, filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(filterName, driverName), getFakePVLister(filterName), append(getFakePVCLister(filterName), test.extraClaims...), fts).(framework.FilterPlugin) - _, gotPreFilterStatus := p.(*nonCSILimits).PreFilter(ctx, nil, test.newPod) - if diff := cmp.Diff(test.wantPreFilterStatus, gotPreFilterStatus); diff != "" { - t.Errorf("PreFilter status does not match (-want, +got): %s", diff) - } - - if gotPreFilterStatus.Code() != framework.Skip { - gotStatus := p.Filter(ctx, nil, test.newPod, node) - if !reflect.DeepEqual(gotStatus, test.wantStatus) { - t.Errorf("Filter status does not match: %v, want: %v", gotStatus, test.wantStatus) - } - } - }) - } -} - -func TestAzureDiskLimits(t *testing.T) { - oneAzureDiskPod := st.MakePod().Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - AzureDisk: &v1.AzureDiskVolumeSource{}, - }, - }).Obj() - twoAzureDiskPod := st.MakePod().Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - AzureDisk: &v1.AzureDiskVolumeSource{}, - }, - }).Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - AzureDisk: &v1.AzureDiskVolumeSource{}, - }, - }).Obj() - splitAzureDiskPod := st.MakePod().Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{}, - }, - }).Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - AzureDisk: &v1.AzureDiskVolumeSource{}, - }, - }).Obj() - AzureDiskPodWithConfigmapAndSecret := st.MakePod().Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - AzureDisk: &v1.AzureDiskVolumeSource{}, - }, - }).Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - ConfigMap: &v1.ConfigMapVolumeSource{}, - }, - }).Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - Secret: &v1.SecretVolumeSource{}, - }, - }).Obj() - tests := []struct { - newPod *v1.Pod - existingPods []*v1.Pod - filterName string - driverName string - maxVols int32 - test string - wantStatus *framework.Status - wantPreFilterStatus *framework.Status - }{ - { - newPod: oneAzureDiskPod, - existingPods: []*v1.Pod{twoAzureDiskPod, oneAzureDiskPod}, - filterName: azureDiskVolumeFilterType, - maxVols: 4, - test: "fits when node capacity >= new pod's AzureDisk volumes", - }, - { - newPod: twoAzureDiskPod, - existingPods: []*v1.Pod{oneAzureDiskPod}, - filterName: azureDiskVolumeFilterType, - maxVols: 2, - test: "fit when node capacity < new pod's AzureDisk volumes", - }, - { - newPod: splitAzureDiskPod, - existingPods: []*v1.Pod{twoAzureDiskPod}, - filterName: azureDiskVolumeFilterType, - maxVols: 3, - test: "new pod's count ignores non-AzureDisk volumes", - }, - { - newPod: twoAzureDiskPod, - existingPods: []*v1.Pod{splitAzureDiskPod, nonApplicablePod, emptyPod}, - filterName: azureDiskVolumeFilterType, - maxVols: 3, - test: "existing pods' counts ignore non-AzureDisk volumes", - }, - { - newPod: onePVCPod(azureDiskVolumeFilterType), - existingPods: []*v1.Pod{splitAzureDiskPod, nonApplicablePod, emptyPod}, - filterName: azureDiskVolumeFilterType, - maxVols: 3, - test: "new pod's count considers PVCs backed by AzureDisk volumes", - }, - { - newPod: splitPVCPod(azureDiskVolumeFilterType), - existingPods: []*v1.Pod{splitAzureDiskPod, oneAzureDiskPod}, - filterName: azureDiskVolumeFilterType, - maxVols: 3, - test: "new pod's count ignores PVCs not backed by AzureDisk volumes", - }, - { - newPod: twoAzureDiskPod, - existingPods: []*v1.Pod{oneAzureDiskPod, onePVCPod(azureDiskVolumeFilterType)}, - filterName: azureDiskVolumeFilterType, - maxVols: 3, - test: "existing pods' counts considers PVCs backed by AzureDisk volumes", - }, - { - newPod: twoAzureDiskPod, - existingPods: []*v1.Pod{oneAzureDiskPod, twoAzureDiskPod, onePVCPod(azureDiskVolumeFilterType)}, - filterName: azureDiskVolumeFilterType, - maxVols: 4, - test: "already-mounted AzureDisk volumes are always ok to allow", - }, - { - newPod: splitAzureDiskPod, - existingPods: []*v1.Pod{oneAzureDiskPod, oneAzureDiskPod, onePVCPod(azureDiskVolumeFilterType)}, - filterName: azureDiskVolumeFilterType, - maxVols: 3, - test: "the same AzureDisk volumes are not counted multiple times", - }, - { - newPod: onePVCPod(azureDiskVolumeFilterType), - existingPods: []*v1.Pod{oneAzureDiskPod, deletedPVCPod}, - filterName: azureDiskVolumeFilterType, - maxVols: 2, - test: "pod with missing PVC is counted towards the PV limit", - }, - { - newPod: onePVCPod(azureDiskVolumeFilterType), - existingPods: []*v1.Pod{oneAzureDiskPod, deletedPVCPod}, - filterName: azureDiskVolumeFilterType, - maxVols: 3, - test: "pod with missing PVC is counted towards the PV limit", - }, - { - newPod: onePVCPod(azureDiskVolumeFilterType), - existingPods: []*v1.Pod{oneAzureDiskPod, twoDeletedPVCPod}, - filterName: azureDiskVolumeFilterType, - maxVols: 3, - test: "pod with missing two PVCs is counted towards the PV limit twice", - }, - { - newPod: onePVCPod(azureDiskVolumeFilterType), - existingPods: []*v1.Pod{oneAzureDiskPod, deletedPVPod}, - filterName: azureDiskVolumeFilterType, - maxVols: 2, - test: "pod with missing PV is counted towards the PV limit", - }, - { - newPod: onePVCPod(azureDiskVolumeFilterType), - existingPods: []*v1.Pod{oneAzureDiskPod, deletedPVPod}, - filterName: azureDiskVolumeFilterType, - maxVols: 3, - test: "pod with missing PV is counted towards the PV limit", - }, - { - newPod: deletedPVPod2, - existingPods: []*v1.Pod{oneAzureDiskPod, deletedPVPod}, - filterName: azureDiskVolumeFilterType, - maxVols: 2, - test: "two pods missing the same PV are counted towards the PV limit only once", - }, - { - newPod: anotherDeletedPVPod, - existingPods: []*v1.Pod{oneAzureDiskPod, deletedPVPod}, - filterName: azureDiskVolumeFilterType, - maxVols: 2, - test: "two pods missing different PVs are counted towards the PV limit twice", - }, - { - newPod: onePVCPod(azureDiskVolumeFilterType), - existingPods: []*v1.Pod{oneAzureDiskPod, unboundPVCPod}, - filterName: azureDiskVolumeFilterType, - maxVols: 2, - test: "pod with unbound PVC is counted towards the PV limit", - }, - { - newPod: onePVCPod(azureDiskVolumeFilterType), - existingPods: []*v1.Pod{oneAzureDiskPod, unboundPVCPod}, - filterName: azureDiskVolumeFilterType, - maxVols: 3, - test: "pod with unbound PVC is counted towards the PV limit", - }, - { - newPod: unboundPVCPod2, - existingPods: []*v1.Pod{oneAzureDiskPod, unboundPVCPod}, - filterName: azureDiskVolumeFilterType, - maxVols: 2, - test: "the same unbound PVC in multiple pods is counted towards the PV limit only once", - }, - { - newPod: anotherUnboundPVCPod, - existingPods: []*v1.Pod{oneAzureDiskPod, unboundPVCPod}, - filterName: azureDiskVolumeFilterType, - maxVols: 2, - test: "two different unbound PVCs are counted towards the PV limit as two volumes", - }, - { - newPod: onlyConfigmapAndSecretPod, - existingPods: []*v1.Pod{twoAzureDiskPod, oneAzureDiskPod}, - filterName: azureDiskVolumeFilterType, - maxVols: 4, - test: "skip Filter when the pod only uses secrets and configmaps", - wantPreFilterStatus: framework.NewStatus(framework.Skip), - }, - { - newPod: pvcPodWithConfigmapAndSecret, - existingPods: []*v1.Pod{oneAzureDiskPod, deletedPVPod}, - filterName: azureDiskVolumeFilterType, - maxVols: 2, - test: "don't skip Filter when the pod has pvcs", - }, - { - newPod: AzureDiskPodWithConfigmapAndSecret, - existingPods: []*v1.Pod{twoAzureDiskPod, oneAzureDiskPod}, - filterName: azureDiskVolumeFilterType, - maxVols: 4, - test: "don't skip Filter when the pod has AzureDisk volumes", - }, - } - - for _, test := range tests { - t.Run(test.test, func(t *testing.T) { - _, ctx := ktesting.NewTestContext(t) - node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, test.maxVols, test.filterName) - p := newNonCSILimits(ctx, test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName), feature.Features{}).(framework.FilterPlugin) - _, gotPreFilterStatus := p.(*nonCSILimits).PreFilter(context.Background(), nil, test.newPod) - if diff := cmp.Diff(test.wantPreFilterStatus, gotPreFilterStatus); diff != "" { - t.Errorf("PreFilter status does not match (-want, +got): %s", diff) - } - - if gotPreFilterStatus.Code() != framework.Skip { - gotStatus := p.Filter(context.Background(), nil, test.newPod, node) - if !reflect.DeepEqual(gotStatus, test.wantStatus) { - t.Errorf("Filter status does not match: %v, want: %v", gotStatus, test.wantStatus) - } - } - }) - } -} - -func TestEBSLimits(t *testing.T) { - oneVolPod := st.MakePod().Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"}, - }, - }).Obj() - twoVolPod := st.MakePod().Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp1"}, - }, - }).Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp2"}, - }, - }).Obj() - splitVolsPod := st.MakePod().Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{}, - }, - }).Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "svp"}, - }, - }).Obj() - - unboundPVCWithInvalidSCPod := st.MakePod().PVC("unboundPVCWithInvalidSCPod").Obj() - unboundPVCWithDefaultSCPod := st.MakePod().PVC("unboundPVCWithDefaultSCPod").Obj() - - EBSPodWithConfigmapAndSecret := st.MakePod().Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"}, - }, - }).Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - ConfigMap: &v1.ConfigMapVolumeSource{}, - }, - }).Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - Secret: &v1.SecretVolumeSource{}, - }, - }).Obj() - - tests := []struct { - newPod *v1.Pod - existingPods []*v1.Pod - filterName string - driverName string - maxVols int32 - test string - wantStatus *framework.Status - wantPreFilterStatus *framework.Status - }{ - { - newPod: oneVolPod, - existingPods: []*v1.Pod{twoVolPod, oneVolPod}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 4, - test: "fits when node capacity >= new pod's EBS volumes", - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{oneVolPod}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 2, - test: "doesn't fit when node capacity < new pod's EBS volumes", - wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), - }, - { - newPod: splitVolsPod, - existingPods: []*v1.Pod{twoVolPod}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 3, - test: "new pod's count ignores non-EBS volumes", - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 3, - test: "existing pods' counts ignore non-EBS volumes", - }, - { - newPod: onePVCPod(ebsVolumeFilterType), - existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 3, - test: "new pod's count considers PVCs backed by EBS volumes", - }, - { - newPod: splitPVCPod(ebsVolumeFilterType), - existingPods: []*v1.Pod{splitVolsPod, oneVolPod}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 3, - test: "new pod's count ignores PVCs not backed by EBS volumes", - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{oneVolPod, onePVCPod(ebsVolumeFilterType)}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 3, - test: "existing pods' counts considers PVCs backed by EBS volumes", - wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), - }, - { - newPod: twoVolPod, - existingPods: []*v1.Pod{oneVolPod, twoVolPod, onePVCPod(ebsVolumeFilterType)}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 4, - test: "already-mounted EBS volumes are always ok to allow", - }, - { - newPod: splitVolsPod, - existingPods: []*v1.Pod{oneVolPod, oneVolPod, onePVCPod(ebsVolumeFilterType)}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 3, - test: "the same EBS volumes are not counted multiple times", - }, - { - newPod: onePVCPod(ebsVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVCPod}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 1, - test: "missing PVC is not counted towards the PV limit", - wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), - }, - { - newPod: onePVCPod(ebsVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVCPod}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 2, - test: "missing PVC is not counted towards the PV limit", - }, - { - newPod: onePVCPod(ebsVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, twoDeletedPVCPod}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 2, - test: "two missing PVCs are not counted towards the PV limit twice", - }, - { - newPod: unboundPVCWithInvalidSCPod, - existingPods: []*v1.Pod{oneVolPod}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 1, - test: "unbound PVC with invalid SC is not counted towards the PV limit", - }, - { - newPod: unboundPVCWithDefaultSCPod, - existingPods: []*v1.Pod{oneVolPod}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 1, - test: "unbound PVC from different provisioner is not counted towards the PV limit", - }, - { - newPod: onePVCPod(ebsVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 2, - test: "pod with missing PV is counted towards the PV limit", - wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), - }, - { - newPod: onePVCPod(ebsVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 3, - test: "pod with missing PV is counted towards the PV limit", - }, - { - newPod: deletedPVPod2, - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 2, - test: "two pods missing the same PV are counted towards the PV limit only once", - }, - { - newPod: anotherDeletedPVPod, - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 2, - test: "two pods missing different PVs are counted towards the PV limit twice", - wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), - }, - { - newPod: onePVCPod(ebsVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 2, - test: "pod with unbound PVC is counted towards the PV limit", - wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), - }, - { - newPod: onePVCPod(ebsVolumeFilterType), - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 3, - test: "pod with unbound PVC is counted towards the PV limit", - }, - { - newPod: unboundPVCPod2, - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 2, - test: "the same unbound PVC in multiple pods is counted towards the PV limit only once", - }, - { - newPod: anotherUnboundPVCPod, - existingPods: []*v1.Pod{oneVolPod, unboundPVCPod}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 2, - test: "two different unbound PVCs are counted towards the PV limit as two volumes", - wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), - }, - { - newPod: onlyConfigmapAndSecretPod, - existingPods: []*v1.Pod{twoVolPod, oneVolPod}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 4, - test: "skip Filter when the pod only uses secrets and configmaps", - wantPreFilterStatus: framework.NewStatus(framework.Skip), - }, - { - newPod: pvcPodWithConfigmapAndSecret, - existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 2, - test: "don't skip Filter when the pod has pvcs", - }, - { - newPod: EBSPodWithConfigmapAndSecret, - existingPods: []*v1.Pod{twoVolPod, oneVolPod}, - filterName: ebsVolumeFilterType, - driverName: csilibplugins.AWSEBSInTreePluginName, - maxVols: 4, - test: "don't skip Filter when the pod has EBS volumes", - }, - } - - for _, test := range tests { - t.Run(test.test, func(t *testing.T) { - _, ctx := ktesting.NewTestContext(t) - node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, test.maxVols, test.filterName) - p := newNonCSILimits(ctx, test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName), feature.Features{}).(framework.FilterPlugin) - _, gotPreFilterStatus := p.(*nonCSILimits).PreFilter(ctx, nil, test.newPod) - if diff := cmp.Diff(test.wantPreFilterStatus, gotPreFilterStatus); diff != "" { - t.Errorf("PreFilter status does not match (-want, +got): %s", diff) - } - - if gotPreFilterStatus.Code() != framework.Skip { - gotStatus := p.Filter(ctx, nil, test.newPod, node) - if !reflect.DeepEqual(gotStatus, test.wantStatus) { - t.Errorf("Filter status does not match: %v, want: %v", gotStatus, test.wantStatus) - } - } - }) - } -} - -func TestGCEPDLimits(t *testing.T) { - oneGCEPDPod := st.MakePod().Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}, - }, - }).Obj() - twoGCEPDPod := st.MakePod().Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}, - }, - }).Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}, - }, - }).Obj() - splitGCEPDPod := st.MakePod().Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{}, - }, - }).Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}, - }, - }).Obj() - GCEPDPodWithConfigmapAndSecret := st.MakePod().Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}, - }, - }).Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - ConfigMap: &v1.ConfigMapVolumeSource{}, - }, - }).Volume(v1.Volume{ - VolumeSource: v1.VolumeSource{ - Secret: &v1.SecretVolumeSource{}, - }, - }).Obj() - tests := []struct { - newPod *v1.Pod - existingPods []*v1.Pod - filterName string - driverName string - maxVols int32 - test string - wantStatus *framework.Status - wantPreFilterStatus *framework.Status - }{ - { - newPod: oneGCEPDPod, - existingPods: []*v1.Pod{twoGCEPDPod, oneGCEPDPod}, - filterName: gcePDVolumeFilterType, - maxVols: 4, - test: "fits when node capacity >= new pod's GCE volumes", - }, - { - newPod: twoGCEPDPod, - existingPods: []*v1.Pod{oneGCEPDPod}, - filterName: gcePDVolumeFilterType, - maxVols: 2, - test: "fit when node capacity < new pod's GCE volumes", - }, - { - newPod: splitGCEPDPod, - existingPods: []*v1.Pod{twoGCEPDPod}, - filterName: gcePDVolumeFilterType, - maxVols: 3, - test: "new pod's count ignores non-GCE volumes", - }, - { - newPod: twoGCEPDPod, - existingPods: []*v1.Pod{splitGCEPDPod, nonApplicablePod, emptyPod}, - filterName: gcePDVolumeFilterType, - maxVols: 3, - test: "existing pods' counts ignore non-GCE volumes", - }, - { - newPod: onePVCPod(gcePDVolumeFilterType), - existingPods: []*v1.Pod{splitGCEPDPod, nonApplicablePod, emptyPod}, - filterName: gcePDVolumeFilterType, - maxVols: 3, - test: "new pod's count considers PVCs backed by GCE volumes", - }, - { - newPod: splitPVCPod(gcePDVolumeFilterType), - existingPods: []*v1.Pod{splitGCEPDPod, oneGCEPDPod}, - filterName: gcePDVolumeFilterType, - maxVols: 3, - test: "new pod's count ignores PVCs not backed by GCE volumes", - }, - { - newPod: twoGCEPDPod, - existingPods: []*v1.Pod{oneGCEPDPod, onePVCPod(gcePDVolumeFilterType)}, - filterName: gcePDVolumeFilterType, - maxVols: 3, - test: "existing pods' counts considers PVCs backed by GCE volumes", - }, - { - newPod: twoGCEPDPod, - existingPods: []*v1.Pod{oneGCEPDPod, twoGCEPDPod, onePVCPod(gcePDVolumeFilterType)}, - filterName: gcePDVolumeFilterType, - maxVols: 4, - test: "already-mounted EBS volumes are always ok to allow", - }, - { - newPod: splitGCEPDPod, - existingPods: []*v1.Pod{oneGCEPDPod, oneGCEPDPod, onePVCPod(gcePDVolumeFilterType)}, - filterName: gcePDVolumeFilterType, - maxVols: 3, - test: "the same GCE volumes are not counted multiple times", - }, - { - newPod: onePVCPod(gcePDVolumeFilterType), - existingPods: []*v1.Pod{oneGCEPDPod, deletedPVCPod}, - filterName: gcePDVolumeFilterType, - maxVols: 2, - test: "pod with missing PVC is counted towards the PV limit", - }, - { - newPod: onePVCPod(gcePDVolumeFilterType), - existingPods: []*v1.Pod{oneGCEPDPod, deletedPVCPod}, - filterName: gcePDVolumeFilterType, - maxVols: 3, - test: "pod with missing PVC is counted towards the PV limit", - }, - { - newPod: onePVCPod(gcePDVolumeFilterType), - existingPods: []*v1.Pod{oneGCEPDPod, twoDeletedPVCPod}, - filterName: gcePDVolumeFilterType, - maxVols: 3, - test: "pod with missing two PVCs is counted towards the PV limit twice", - }, - { - newPod: onePVCPod(gcePDVolumeFilterType), - existingPods: []*v1.Pod{oneGCEPDPod, deletedPVPod}, - filterName: gcePDVolumeFilterType, - maxVols: 2, - test: "pod with missing PV is counted towards the PV limit", - }, - { - newPod: onePVCPod(gcePDVolumeFilterType), - existingPods: []*v1.Pod{oneGCEPDPod, deletedPVPod}, - filterName: gcePDVolumeFilterType, - maxVols: 3, - test: "pod with missing PV is counted towards the PV limit", - }, - { - newPod: deletedPVPod2, - existingPods: []*v1.Pod{oneGCEPDPod, deletedPVPod}, - filterName: gcePDVolumeFilterType, - maxVols: 2, - test: "two pods missing the same PV are counted towards the PV limit only once", - }, - { - newPod: anotherDeletedPVPod, - existingPods: []*v1.Pod{oneGCEPDPod, deletedPVPod}, - filterName: gcePDVolumeFilterType, - maxVols: 2, - test: "two pods missing different PVs are counted towards the PV limit twice", - }, - { - newPod: onePVCPod(gcePDVolumeFilterType), - existingPods: []*v1.Pod{oneGCEPDPod, unboundPVCPod}, - filterName: gcePDVolumeFilterType, - maxVols: 2, - test: "pod with unbound PVC is counted towards the PV limit", - }, - { - newPod: onePVCPod(gcePDVolumeFilterType), - existingPods: []*v1.Pod{oneGCEPDPod, unboundPVCPod}, - filterName: gcePDVolumeFilterType, - maxVols: 3, - test: "pod with unbound PVC is counted towards the PV limit", - }, - { - newPod: unboundPVCPod2, - existingPods: []*v1.Pod{oneGCEPDPod, unboundPVCPod}, - filterName: gcePDVolumeFilterType, - maxVols: 2, - test: "the same unbound PVC in multiple pods is counted towards the PV limit only once", - }, - { - newPod: anotherUnboundPVCPod, - existingPods: []*v1.Pod{oneGCEPDPod, unboundPVCPod}, - filterName: gcePDVolumeFilterType, - maxVols: 2, - test: "two different unbound PVCs are counted towards the PV limit as two volumes", - }, - { - newPod: onlyConfigmapAndSecretPod, - existingPods: []*v1.Pod{twoGCEPDPod, oneGCEPDPod}, - filterName: gcePDVolumeFilterType, - maxVols: 4, - test: "skip Filter when the pod only uses secrets and configmaps", - wantPreFilterStatus: framework.NewStatus(framework.Skip), - }, - { - newPod: pvcPodWithConfigmapAndSecret, - existingPods: []*v1.Pod{oneGCEPDPod, deletedPVPod}, - filterName: gcePDVolumeFilterType, - maxVols: 2, - test: "don't skip Filter when the pods has pvcs", - }, - { - newPod: GCEPDPodWithConfigmapAndSecret, - existingPods: []*v1.Pod{twoGCEPDPod, oneGCEPDPod}, - filterName: gcePDVolumeFilterType, - maxVols: 4, - test: "don't skip Filter when the pods has GCE volumes", - }, - } - - for _, test := range tests { - t.Run(test.test, func(t *testing.T) { - _, ctx := ktesting.NewTestContext(t) - node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, test.maxVols, test.filterName) - p := newNonCSILimits(ctx, test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName), feature.Features{}).(framework.FilterPlugin) - _, gotPreFilterStatus := p.(*nonCSILimits).PreFilter(context.Background(), nil, test.newPod) - if diff := cmp.Diff(test.wantPreFilterStatus, gotPreFilterStatus); diff != "" { - t.Errorf("PreFilter status does not match (-want, +got): %s", diff) - } - - if gotPreFilterStatus.Code() != framework.Skip { - gotStatus := p.Filter(context.Background(), nil, test.newPod, node) - if !reflect.DeepEqual(gotStatus, test.wantStatus) { - t.Errorf("Filter status does not match: %v, want: %v", gotStatus, test.wantStatus) - } - } - }) - } -} - -func TestGetMaxVols(t *testing.T) { - tests := []struct { - rawMaxVols string - expected int - name string - }{ - { - rawMaxVols: "invalid", - expected: -1, - name: "Unable to parse maximum PD volumes value, using default value", - }, - { - rawMaxVols: "-2", - expected: -1, - name: "Maximum PD volumes must be a positive value, using default value", - }, - { - rawMaxVols: "40", - expected: 40, - name: "Parse maximum PD volumes value from env", - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - logger, _ := ktesting.NewTestContext(t) - t.Setenv(KubeMaxPDVols, test.rawMaxVols) - result := getMaxVolLimitFromEnv(logger) - if result != test.expected { - t.Errorf("expected %v got %v", test.expected, result) - } - }) - } -} - -func getFakePVCLister(filterName string) tf.PersistentVolumeClaimLister { - return tf.PersistentVolumeClaimLister{ - { - ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"}, - Spec: v1.PersistentVolumeClaimSpec{ - VolumeName: "some" + filterName + "Vol", - StorageClassName: &filterName, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "someNon" + filterName + "Vol"}, - Spec: v1.PersistentVolumeClaimSpec{ - VolumeName: "someNon" + filterName + "Vol", - StorageClassName: &filterName, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "pvcWithDeletedPV"}, - Spec: v1.PersistentVolumeClaimSpec{ - VolumeName: "pvcWithDeletedPV", - StorageClassName: &filterName, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "anotherPVCWithDeletedPV"}, - Spec: v1.PersistentVolumeClaimSpec{ - VolumeName: "anotherPVCWithDeletedPV", - StorageClassName: &filterName, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "unboundPVC"}, - Spec: v1.PersistentVolumeClaimSpec{ - VolumeName: "", - StorageClassName: &filterName, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "anotherUnboundPVC"}, - Spec: v1.PersistentVolumeClaimSpec{ - VolumeName: "", - StorageClassName: &filterName, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "unboundPVCWithDefaultSCPod"}, - Spec: v1.PersistentVolumeClaimSpec{ - VolumeName: "", - StorageClassName: ptr.To("standard-sc"), - }, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "unboundPVCWithInvalidSCPod"}, - Spec: v1.PersistentVolumeClaimSpec{ - VolumeName: "", - StorageClassName: ptr.To("invalid-sc"), - }, - }, - } -} - -func getFakePVLister(filterName string) tf.PersistentVolumeLister { - return tf.PersistentVolumeLister{ - { - ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"}, - Spec: v1.PersistentVolumeSpec{ - PersistentVolumeSource: v1.PersistentVolumeSource{ - AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: strings.ToLower(filterName) + "Vol"}, - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "someNon" + filterName + "Vol"}, - Spec: v1.PersistentVolumeSpec{ - PersistentVolumeSource: v1.PersistentVolumeSource{}, - }, - }, - } -} - -func onePVCPod(filterName string) *v1.Pod { - return st.MakePod().PVC(fmt.Sprintf("some%sVol", filterName)).Obj() -} - -func splitPVCPod(filterName string) *v1.Pod { - return st.MakePod().PVC(fmt.Sprintf("someNon%sVol", filterName)).PVC(fmt.Sprintf("some%sVol", filterName)).Obj() -} diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/utils.go b/pkg/scheduler/framework/plugins/nodevolumelimits/utils.go index e6cc13e39bf..d70b18f7255 100644 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/utils.go +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/utils.go @@ -24,9 +24,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" csilibplugins "k8s.io/csi-translation-lib/plugins" - v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/scheduler/framework" ) // isCSIMigrationOn returns a boolean value indicating whether @@ -73,14 +71,3 @@ func isCSIMigrationOn(csiNode *storagev1.CSINode, pluginName string) bool { return mpaSet.Has(pluginName) } - -// volumeLimits returns volume limits associated with the node. -func volumeLimits(n *framework.NodeInfo) map[v1.ResourceName]int64 { - volumeLimits := map[v1.ResourceName]int64{} - for k, v := range n.Allocatable.ScalarResources { - if v1helper.IsAttachableVolumeResourceName(k) { - volumeLimits[k] = v - } - } - return volumeLimits -} diff --git a/pkg/scheduler/framework/plugins/registry.go b/pkg/scheduler/framework/plugins/registry.go index 044d8f3f6f0..68f4e3e9a9d 100644 --- a/pkg/scheduler/framework/plugins/registry.go +++ b/pkg/scheduler/framework/plugins/registry.go @@ -71,10 +71,6 @@ func NewInTreeRegistry() runtime.Registry { volumerestrictions.Name: runtime.FactoryAdapter(fts, volumerestrictions.New), volumezone.Name: volumezone.New, nodevolumelimits.CSIName: runtime.FactoryAdapter(fts, nodevolumelimits.NewCSI), - nodevolumelimits.EBSName: runtime.FactoryAdapter(fts, nodevolumelimits.NewEBS), - nodevolumelimits.GCEPDName: runtime.FactoryAdapter(fts, nodevolumelimits.NewGCEPD), - nodevolumelimits.AzureDiskName: runtime.FactoryAdapter(fts, nodevolumelimits.NewAzureDisk), - nodevolumelimits.CinderName: runtime.FactoryAdapter(fts, nodevolumelimits.NewCinder), interpodaffinity.Name: interpodaffinity.New, queuesort.Name: queuesort.New, defaultbinder.Name: defaultbinder.New, diff --git a/test/integration/scheduler_perf/scheduler_perf.go b/test/integration/scheduler_perf/scheduler_perf.go index 7ecaca00f55..97069abbee2 100644 --- a/test/integration/scheduler_perf/scheduler_perf.go +++ b/test/integration/scheduler_perf/scheduler_perf.go @@ -139,10 +139,6 @@ var ( names.NodeResourcesFit, names.NodeUnschedulable, names.NodeVolumeLimits, - names.AzureDiskLimits, - names.CinderLimits, - names.EBSLimits, - names.GCEPDLimits, names.PodTopologySpread, names.SchedulingGates, names.TaintToleration, diff --git a/test/integration/volumescheduling/volume_binding_test.go b/test/integration/volumescheduling/volume_binding_test.go index 2c880ad987a..428944adefb 100644 --- a/test/integration/volumescheduling/volume_binding_test.go +++ b/test/integration/volumescheduling/volume_binding_test.go @@ -39,7 +39,6 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/pkg/controller/volume/persistentvolume" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" testutil "k8s.io/kubernetes/test/integration/util" @@ -425,7 +424,7 @@ func testVolumeBindingStress(t *testing.T, schedulerResyncPeriod time.Duration, // Set max volume limit to the number of PVCs the test will create // TODO: remove when max volume limit allows setting through storageclass - t.Setenv(nodevolumelimits.KubeMaxPDVols, fmt.Sprintf("%v", podLimit*volsPerPod)) + t.Setenv("KUBE_MAX_PD_VOLS", fmt.Sprintf("%v", podLimit*volsPerPod)) scName := &classWait if dynamic {