mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-18 08:09:58 +00:00
e2e/storage: get driver name from storage class
Do not use the driverInfo.Name, it is for display only, and may be different from the name of CSI driver
This commit is contained in:
parent
f164b47ae7
commit
863880cd03
@ -39,6 +39,7 @@ import (
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
@ -86,6 +87,13 @@ func (t *volumeLimitsTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInf
|
||||
}
|
||||
|
||||
func (t *volumeLimitsTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
if pattern.VolType != storageframework.DynamicPV {
|
||||
e2eskipper.Skipf("Suite %q does not support %v", t.tsInfo.Name, pattern.VolType)
|
||||
}
|
||||
dInfo := driver.GetDriverInfo()
|
||||
if !dInfo.Capabilities[storageframework.CapVolumeLimits] {
|
||||
e2eskipper.Skipf("Driver %s does not support volume limits", dInfo.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
@ -108,6 +116,8 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
}
|
||||
var (
|
||||
l local
|
||||
|
||||
dDriver storageframework.DynamicPVTestDriver
|
||||
)
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
@ -115,6 +125,10 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volumelimits", storageframework.GetDriverTimeouts(driver))
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
dDriver = driver.(storageframework.DynamicPVTestDriver)
|
||||
})
|
||||
|
||||
// This checks that CSIMaxVolumeLimitChecker works as expected.
|
||||
// A randomly chosen node should be able to handle as many CSI volumes as
|
||||
// it claims to handle in CSINode.Spec.Drivers[x].Allocatable.
|
||||
@ -125,14 +139,6 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
// BEWARE: the test may create lot of volumes and it's really slow.
|
||||
f.It("should support volume limits", f.WithSerial(), func(ctx context.Context) {
|
||||
driverInfo := driver.GetDriverInfo()
|
||||
if !driverInfo.Capabilities[storageframework.CapVolumeLimits] {
|
||||
ginkgo.Skip(fmt.Sprintf("driver %s does not support volume limits", driverInfo.Name))
|
||||
}
|
||||
var dDriver storageframework.DynamicPVTestDriver
|
||||
if dDriver = driver.(storageframework.DynamicPVTestDriver); dDriver == nil {
|
||||
framework.Failf("Test driver does not provide dynamically created volumes")
|
||||
}
|
||||
|
||||
l.ns = f.Namespace
|
||||
l.cs = f.ClientSet
|
||||
|
||||
@ -150,7 +156,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
framework.Logf("Selected node %s", nodeName)
|
||||
|
||||
ginkgo.By("Checking node limits")
|
||||
limit, err := getNodeLimits(ctx, l.cs, l.config, nodeName, driverInfo)
|
||||
limit, err := getNodeLimits(ctx, l.cs, l.config, nodeName, dDriver)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Node %s can handle %d volumes of driver %s", nodeName, limit, driverInfo.Name)
|
||||
@ -265,7 +271,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
|
||||
for _, nodeName := range nodeNames {
|
||||
ginkgo.By("Checking csinode limits")
|
||||
_, err := getNodeLimits(ctx, l.cs, l.config, nodeName, driverInfo)
|
||||
_, err := getNodeLimits(ctx, l.cs, l.config, nodeName, dDriver)
|
||||
if err != nil {
|
||||
framework.Failf("Expected volume limits to be set, error: %v", err)
|
||||
}
|
||||
@ -347,21 +353,24 @@ func waitForAllPVCsBound(ctx context.Context, cs clientset.Interface, timeout ti
|
||||
return pvNames, nil
|
||||
}
|
||||
|
||||
func getNodeLimits(ctx context.Context, cs clientset.Interface, config *storageframework.PerTestConfig, nodeName string, driverInfo *storageframework.DriverInfo) (int, error) {
|
||||
if len(driverInfo.InTreePluginName) == 0 {
|
||||
return getCSINodeLimits(ctx, cs, config, nodeName, driverInfo)
|
||||
}
|
||||
return getInTreeNodeLimits(ctx, cs, nodeName, driverInfo)
|
||||
func getNodeLimits(ctx context.Context, cs clientset.Interface, config *storageframework.PerTestConfig, nodeName string, driver storageframework.DynamicPVTestDriver) (int, error) {
|
||||
driverInfo := driver.GetDriverInfo()
|
||||
if len(driverInfo.InTreePluginName) > 0 {
|
||||
return getInTreeNodeLimits(ctx, cs, nodeName, driverInfo.InTreePluginName)
|
||||
}
|
||||
|
||||
func getInTreeNodeLimits(ctx context.Context, cs clientset.Interface, nodeName string, driverInfo *storageframework.DriverInfo) (int, error) {
|
||||
sc := driver.GetDynamicProvisionStorageClass(ctx, config, "")
|
||||
return getCSINodeLimits(ctx, cs, config, nodeName, sc.Provisioner)
|
||||
}
|
||||
|
||||
func getInTreeNodeLimits(ctx context.Context, cs clientset.Interface, nodeName, driverName string) (int, error) {
|
||||
node, err := cs.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var allocatableKey string
|
||||
switch driverInfo.InTreePluginName {
|
||||
switch driverName {
|
||||
case migrationplugins.AWSEBSInTreePluginName:
|
||||
allocatableKey = volumeutil.EBSVolumeLimitKey
|
||||
case migrationplugins.GCEPDInTreePluginName:
|
||||
@ -371,17 +380,17 @@ func getInTreeNodeLimits(ctx context.Context, cs clientset.Interface, nodeName s
|
||||
case migrationplugins.AzureDiskInTreePluginName:
|
||||
allocatableKey = volumeutil.AzureVolumeLimitKey
|
||||
default:
|
||||
return 0, fmt.Errorf("Unknown in-tree volume plugin name: %s", driverInfo.InTreePluginName)
|
||||
return 0, fmt.Errorf("Unknown in-tree volume plugin name: %s", driverName)
|
||||
}
|
||||
|
||||
limit, ok := node.Status.Allocatable[v1.ResourceName(allocatableKey)]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("Node %s does not contain status.allocatable[%s] for volume plugin %s", nodeName, allocatableKey, driverInfo.InTreePluginName)
|
||||
return 0, fmt.Errorf("Node %s does not contain status.allocatable[%s] for volume plugin %s", nodeName, allocatableKey, driverName)
|
||||
}
|
||||
return int(limit.Value()), nil
|
||||
}
|
||||
|
||||
func getCSINodeLimits(ctx context.Context, cs clientset.Interface, config *storageframework.PerTestConfig, nodeName string, driverInfo *storageframework.DriverInfo) (int, error) {
|
||||
func getCSINodeLimits(ctx context.Context, cs clientset.Interface, config *storageframework.PerTestConfig, nodeName, driverName string) (int, error) {
|
||||
// Retry with a timeout, the driver might just have been installed and kubelet takes a while to publish everything.
|
||||
var limit int
|
||||
err := wait.PollImmediate(2*time.Second, csiNodeInfoTimeout, func() (bool, error) {
|
||||
@ -392,26 +401,26 @@ func getCSINodeLimits(ctx context.Context, cs clientset.Interface, config *stora
|
||||
}
|
||||
var csiDriver *storagev1.CSINodeDriver
|
||||
for i, c := range csiNode.Spec.Drivers {
|
||||
if c.Name == driverInfo.Name || c.Name == config.GetUniqueDriverName() {
|
||||
if c.Name == driverName || c.Name == config.GetUniqueDriverName() {
|
||||
csiDriver = &csiNode.Spec.Drivers[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if csiDriver == nil {
|
||||
framework.Logf("CSINodeInfo does not have driver %s yet", driverInfo.Name)
|
||||
framework.Logf("CSINodeInfo does not have driver %s yet", driverName)
|
||||
return false, nil
|
||||
}
|
||||
if csiDriver.Allocatable == nil {
|
||||
return false, fmt.Errorf("CSINodeInfo does not have Allocatable for driver %s", driverInfo.Name)
|
||||
return false, fmt.Errorf("CSINodeInfo does not have Allocatable for driver %s", driverName)
|
||||
}
|
||||
if csiDriver.Allocatable.Count == nil {
|
||||
return false, fmt.Errorf("CSINodeInfo does not have Allocatable.Count for driver %s", driverInfo.Name)
|
||||
return false, fmt.Errorf("CSINodeInfo does not have Allocatable.Count for driver %s", driverName)
|
||||
}
|
||||
limit = int(*csiDriver.Allocatable.Count)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("could not get CSINode limit for driver %s: %w", driverInfo.Name, err)
|
||||
return 0, fmt.Errorf("could not get CSINode limit for driver %s: %w", driverName, err)
|
||||
}
|
||||
return limit, nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user