mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-18 08:09:58 +00:00
e2e/storage: get driver name from storage class
Do not use the driverInfo.Name, it is for display only, and may be different from the name of CSI driver
This commit is contained in:
parent
f164b47ae7
commit
863880cd03
@ -39,6 +39,7 @@ import (
|
|||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||||
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||||
admissionapi "k8s.io/pod-security-admission/api"
|
admissionapi "k8s.io/pod-security-admission/api"
|
||||||
@ -86,6 +87,13 @@ func (t *volumeLimitsTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInf
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *volumeLimitsTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
func (t *volumeLimitsTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||||
|
if pattern.VolType != storageframework.DynamicPV {
|
||||||
|
e2eskipper.Skipf("Suite %q does not support %v", t.tsInfo.Name, pattern.VolType)
|
||||||
|
}
|
||||||
|
dInfo := driver.GetDriverInfo()
|
||||||
|
if !dInfo.Capabilities[storageframework.CapVolumeLimits] {
|
||||||
|
e2eskipper.Skipf("Driver %s does not support volume limits", dInfo.Name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||||
@ -108,6 +116,8 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
|
|||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
l local
|
l local
|
||||||
|
|
||||||
|
dDriver storageframework.DynamicPVTestDriver
|
||||||
)
|
)
|
||||||
|
|
||||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||||
@ -115,6 +125,10 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
|
|||||||
f := framework.NewFrameworkWithCustomTimeouts("volumelimits", storageframework.GetDriverTimeouts(driver))
|
f := framework.NewFrameworkWithCustomTimeouts("volumelimits", storageframework.GetDriverTimeouts(driver))
|
||||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||||
|
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
dDriver = driver.(storageframework.DynamicPVTestDriver)
|
||||||
|
})
|
||||||
|
|
||||||
// This checks that CSIMaxVolumeLimitChecker works as expected.
|
// This checks that CSIMaxVolumeLimitChecker works as expected.
|
||||||
// A randomly chosen node should be able to handle as many CSI volumes as
|
// A randomly chosen node should be able to handle as many CSI volumes as
|
||||||
// it claims to handle in CSINode.Spec.Drivers[x].Allocatable.
|
// it claims to handle in CSINode.Spec.Drivers[x].Allocatable.
|
||||||
@ -125,14 +139,6 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
|
|||||||
// BEWARE: the test may create lot of volumes and it's really slow.
|
// BEWARE: the test may create lot of volumes and it's really slow.
|
||||||
f.It("should support volume limits", f.WithSerial(), func(ctx context.Context) {
|
f.It("should support volume limits", f.WithSerial(), func(ctx context.Context) {
|
||||||
driverInfo := driver.GetDriverInfo()
|
driverInfo := driver.GetDriverInfo()
|
||||||
if !driverInfo.Capabilities[storageframework.CapVolumeLimits] {
|
|
||||||
ginkgo.Skip(fmt.Sprintf("driver %s does not support volume limits", driverInfo.Name))
|
|
||||||
}
|
|
||||||
var dDriver storageframework.DynamicPVTestDriver
|
|
||||||
if dDriver = driver.(storageframework.DynamicPVTestDriver); dDriver == nil {
|
|
||||||
framework.Failf("Test driver does not provide dynamically created volumes")
|
|
||||||
}
|
|
||||||
|
|
||||||
l.ns = f.Namespace
|
l.ns = f.Namespace
|
||||||
l.cs = f.ClientSet
|
l.cs = f.ClientSet
|
||||||
|
|
||||||
@ -150,7 +156,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
|
|||||||
framework.Logf("Selected node %s", nodeName)
|
framework.Logf("Selected node %s", nodeName)
|
||||||
|
|
||||||
ginkgo.By("Checking node limits")
|
ginkgo.By("Checking node limits")
|
||||||
limit, err := getNodeLimits(ctx, l.cs, l.config, nodeName, driverInfo)
|
limit, err := getNodeLimits(ctx, l.cs, l.config, nodeName, dDriver)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
framework.Logf("Node %s can handle %d volumes of driver %s", nodeName, limit, driverInfo.Name)
|
framework.Logf("Node %s can handle %d volumes of driver %s", nodeName, limit, driverInfo.Name)
|
||||||
@ -265,7 +271,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
|
|||||||
|
|
||||||
for _, nodeName := range nodeNames {
|
for _, nodeName := range nodeNames {
|
||||||
ginkgo.By("Checking csinode limits")
|
ginkgo.By("Checking csinode limits")
|
||||||
_, err := getNodeLimits(ctx, l.cs, l.config, nodeName, driverInfo)
|
_, err := getNodeLimits(ctx, l.cs, l.config, nodeName, dDriver)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("Expected volume limits to be set, error: %v", err)
|
framework.Failf("Expected volume limits to be set, error: %v", err)
|
||||||
}
|
}
|
||||||
@ -347,21 +353,24 @@ func waitForAllPVCsBound(ctx context.Context, cs clientset.Interface, timeout ti
|
|||||||
return pvNames, nil
|
return pvNames, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNodeLimits(ctx context.Context, cs clientset.Interface, config *storageframework.PerTestConfig, nodeName string, driverInfo *storageframework.DriverInfo) (int, error) {
|
func getNodeLimits(ctx context.Context, cs clientset.Interface, config *storageframework.PerTestConfig, nodeName string, driver storageframework.DynamicPVTestDriver) (int, error) {
|
||||||
if len(driverInfo.InTreePluginName) == 0 {
|
driverInfo := driver.GetDriverInfo()
|
||||||
return getCSINodeLimits(ctx, cs, config, nodeName, driverInfo)
|
if len(driverInfo.InTreePluginName) > 0 {
|
||||||
|
return getInTreeNodeLimits(ctx, cs, nodeName, driverInfo.InTreePluginName)
|
||||||
}
|
}
|
||||||
return getInTreeNodeLimits(ctx, cs, nodeName, driverInfo)
|
|
||||||
|
sc := driver.GetDynamicProvisionStorageClass(ctx, config, "")
|
||||||
|
return getCSINodeLimits(ctx, cs, config, nodeName, sc.Provisioner)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getInTreeNodeLimits(ctx context.Context, cs clientset.Interface, nodeName string, driverInfo *storageframework.DriverInfo) (int, error) {
|
func getInTreeNodeLimits(ctx context.Context, cs clientset.Interface, nodeName, driverName string) (int, error) {
|
||||||
node, err := cs.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
node, err := cs.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var allocatableKey string
|
var allocatableKey string
|
||||||
switch driverInfo.InTreePluginName {
|
switch driverName {
|
||||||
case migrationplugins.AWSEBSInTreePluginName:
|
case migrationplugins.AWSEBSInTreePluginName:
|
||||||
allocatableKey = volumeutil.EBSVolumeLimitKey
|
allocatableKey = volumeutil.EBSVolumeLimitKey
|
||||||
case migrationplugins.GCEPDInTreePluginName:
|
case migrationplugins.GCEPDInTreePluginName:
|
||||||
@ -371,17 +380,17 @@ func getInTreeNodeLimits(ctx context.Context, cs clientset.Interface, nodeName s
|
|||||||
case migrationplugins.AzureDiskInTreePluginName:
|
case migrationplugins.AzureDiskInTreePluginName:
|
||||||
allocatableKey = volumeutil.AzureVolumeLimitKey
|
allocatableKey = volumeutil.AzureVolumeLimitKey
|
||||||
default:
|
default:
|
||||||
return 0, fmt.Errorf("Unknown in-tree volume plugin name: %s", driverInfo.InTreePluginName)
|
return 0, fmt.Errorf("Unknown in-tree volume plugin name: %s", driverName)
|
||||||
}
|
}
|
||||||
|
|
||||||
limit, ok := node.Status.Allocatable[v1.ResourceName(allocatableKey)]
|
limit, ok := node.Status.Allocatable[v1.ResourceName(allocatableKey)]
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0, fmt.Errorf("Node %s does not contain status.allocatable[%s] for volume plugin %s", nodeName, allocatableKey, driverInfo.InTreePluginName)
|
return 0, fmt.Errorf("Node %s does not contain status.allocatable[%s] for volume plugin %s", nodeName, allocatableKey, driverName)
|
||||||
}
|
}
|
||||||
return int(limit.Value()), nil
|
return int(limit.Value()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCSINodeLimits(ctx context.Context, cs clientset.Interface, config *storageframework.PerTestConfig, nodeName string, driverInfo *storageframework.DriverInfo) (int, error) {
|
func getCSINodeLimits(ctx context.Context, cs clientset.Interface, config *storageframework.PerTestConfig, nodeName, driverName string) (int, error) {
|
||||||
// Retry with a timeout, the driver might just have been installed and kubelet takes a while to publish everything.
|
// Retry with a timeout, the driver might just have been installed and kubelet takes a while to publish everything.
|
||||||
var limit int
|
var limit int
|
||||||
err := wait.PollImmediate(2*time.Second, csiNodeInfoTimeout, func() (bool, error) {
|
err := wait.PollImmediate(2*time.Second, csiNodeInfoTimeout, func() (bool, error) {
|
||||||
@ -392,26 +401,26 @@ func getCSINodeLimits(ctx context.Context, cs clientset.Interface, config *stora
|
|||||||
}
|
}
|
||||||
var csiDriver *storagev1.CSINodeDriver
|
var csiDriver *storagev1.CSINodeDriver
|
||||||
for i, c := range csiNode.Spec.Drivers {
|
for i, c := range csiNode.Spec.Drivers {
|
||||||
if c.Name == driverInfo.Name || c.Name == config.GetUniqueDriverName() {
|
if c.Name == driverName || c.Name == config.GetUniqueDriverName() {
|
||||||
csiDriver = &csiNode.Spec.Drivers[i]
|
csiDriver = &csiNode.Spec.Drivers[i]
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if csiDriver == nil {
|
if csiDriver == nil {
|
||||||
framework.Logf("CSINodeInfo does not have driver %s yet", driverInfo.Name)
|
framework.Logf("CSINodeInfo does not have driver %s yet", driverName)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if csiDriver.Allocatable == nil {
|
if csiDriver.Allocatable == nil {
|
||||||
return false, fmt.Errorf("CSINodeInfo does not have Allocatable for driver %s", driverInfo.Name)
|
return false, fmt.Errorf("CSINodeInfo does not have Allocatable for driver %s", driverName)
|
||||||
}
|
}
|
||||||
if csiDriver.Allocatable.Count == nil {
|
if csiDriver.Allocatable.Count == nil {
|
||||||
return false, fmt.Errorf("CSINodeInfo does not have Allocatable.Count for driver %s", driverInfo.Name)
|
return false, fmt.Errorf("CSINodeInfo does not have Allocatable.Count for driver %s", driverName)
|
||||||
}
|
}
|
||||||
limit = int(*csiDriver.Allocatable.Count)
|
limit = int(*csiDriver.Allocatable.Count)
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("could not get CSINode limit for driver %s: %w", driverInfo.Name, err)
|
return 0, fmt.Errorf("could not get CSINode limit for driver %s: %w", driverName, err)
|
||||||
}
|
}
|
||||||
return limit, nil
|
return limit, nil
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user