mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-02-22 07:03:28 +00:00
Remove unused volume limit constants
This commit is contained in:
@@ -25,28 +25,6 @@ import (
|
||||
// shared between volume package and scheduler
|
||||
|
||||
const (
|
||||
// EBSVolumeLimitKey resource name that will store volume limits for EBS
|
||||
EBSVolumeLimitKey = "attachable-volumes-aws-ebs"
|
||||
// EBSNitroLimitRegex finds nitro instance types with different limit than EBS defaults
|
||||
EBSNitroLimitRegex = "^[cmr]5.*|t3|z1d"
|
||||
// DefaultMaxEBSVolumes is the limit for volumes attached to an instance.
|
||||
// Amazon recommends no more than 40; the system root volume uses at least one.
|
||||
// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#linux-specific-volume-limits
|
||||
DefaultMaxEBSVolumes = 39
|
||||
// DefaultMaxEBSNitroVolumeLimit is default EBS volume limit on m5 and c5 instances
|
||||
DefaultMaxEBSNitroVolumeLimit = 25
|
||||
// AzureVolumeLimitKey stores resource name that will store volume limits for Azure
|
||||
AzureVolumeLimitKey = "attachable-volumes-azure-disk"
|
||||
// GCEVolumeLimitKey stores resource name that will store volume limits for GCE node
|
||||
GCEVolumeLimitKey = "attachable-volumes-gce-pd"
|
||||
|
||||
// CinderVolumeLimitKey contains Volume limit key for Cinder
|
||||
CinderVolumeLimitKey = "attachable-volumes-cinder"
|
||||
// DefaultMaxCinderVolumes defines the maximum number of PD Volumes for Cinder
|
||||
// For Openstack we are keeping this to a high enough value so as depending on backend
|
||||
// cluster admins can configure it.
|
||||
DefaultMaxCinderVolumes = 256
|
||||
|
||||
// CSIAttachLimitPrefix defines prefix used for CSI volumes
|
||||
CSIAttachLimitPrefix = "attachable-volumes-csi-"
|
||||
|
||||
|
||||
@@ -35,8 +35,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/component-helpers/storage/ephemeral"
|
||||
migrationplugins "k8s.io/csi-translation-lib/plugins" // volume plugin names are exported nicely there
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
csitrans "k8s.io/csi-translation-lib"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
@@ -351,40 +350,17 @@ func waitForAllPVCsBound(ctx context.Context, cs clientset.Interface, timeout ti
|
||||
func getNodeLimits(ctx context.Context, cs clientset.Interface, config *storageframework.PerTestConfig, nodeName string, driver storageframework.DynamicPVTestDriver) (int, error) {
|
||||
driverInfo := driver.GetDriverInfo()
|
||||
if len(driverInfo.InTreePluginName) > 0 {
|
||||
return getInTreeNodeLimits(ctx, cs, nodeName, driverInfo.InTreePluginName)
|
||||
csiTranslator := csitrans.New()
|
||||
driverName, err := csiTranslator.GetCSINameFromInTreeName(driverInfo.InTreePluginName)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return getCSINodeLimits(ctx, cs, config, nodeName, driverName)
|
||||
}
|
||||
|
||||
sc := driver.GetDynamicProvisionStorageClass(ctx, config, "")
|
||||
return getCSINodeLimits(ctx, cs, config, nodeName, sc.Provisioner)
|
||||
}
|
||||
|
||||
func getInTreeNodeLimits(ctx context.Context, cs clientset.Interface, nodeName, driverName string) (int, error) {
|
||||
node, err := cs.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var allocatableKey string
|
||||
switch driverName {
|
||||
case migrationplugins.AWSEBSInTreePluginName:
|
||||
allocatableKey = volumeutil.EBSVolumeLimitKey
|
||||
case migrationplugins.GCEPDInTreePluginName:
|
||||
allocatableKey = volumeutil.GCEVolumeLimitKey
|
||||
case migrationplugins.CinderInTreePluginName:
|
||||
allocatableKey = volumeutil.CinderVolumeLimitKey
|
||||
case migrationplugins.AzureDiskInTreePluginName:
|
||||
allocatableKey = volumeutil.AzureVolumeLimitKey
|
||||
default:
|
||||
return 0, fmt.Errorf("unknown in-tree volume plugin name: %s", driverName)
|
||||
}
|
||||
|
||||
limit, ok := node.Status.Allocatable[v1.ResourceName(allocatableKey)]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("node %s does not contain status.allocatable[%s] for volume plugin %s", nodeName, allocatableKey, driverName)
|
||||
}
|
||||
return int(limit.Value()), nil
|
||||
}
|
||||
|
||||
func getCSINodeLimits(ctx context.Context, cs clientset.Interface, config *storageframework.PerTestConfig, nodeName, driverName string) (int, error) {
|
||||
// Retry with a timeout, the driver might just have been installed and kubelet takes a while to publish everything.
|
||||
var limit int
|
||||
|
||||
Reference in New Issue
Block a user