mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-06 10:43:56 +00:00
Enable volume limits e2e tests for CSI hostpath driver
This commit is contained in:
parent
affcd0128b
commit
f6de8ab727
@ -106,6 +106,7 @@ func InitHostPathCSIDriver() testsuites.TestDriver {
|
||||
testsuites.CapPVCDataSource: true,
|
||||
testsuites.CapControllerExpansion: true,
|
||||
testsuites.CapSingleNodeVolume: true,
|
||||
testsuites.CapVolumeLimits: true,
|
||||
}
|
||||
return initHostPathCSIDriver("csi-hostpath",
|
||||
capabilities,
|
||||
|
@ -109,7 +109,7 @@ func (t *volumeLimitsTestSuite) defineTests(driver TestDriver, pattern testpatte
|
||||
// And one extra pod with a CSI volume should get Pending with a condition
|
||||
// that says it's unschedulable because of volume limit.
|
||||
// BEWARE: the test may create lot of volumes and it's really slow.
|
||||
ginkgo.It("should support volume limits [Slow][Serial]", func() {
|
||||
ginkgo.It("should support volume limits [Serial]", func() {
|
||||
driverInfo := driver.GetDriverInfo()
|
||||
if !driverInfo.Capabilities[CapVolumeLimits] {
|
||||
ginkgo.Skip(fmt.Sprintf("driver %s does not support volume limits", driverInfo.Name))
|
||||
@ -124,15 +124,19 @@ func (t *volumeLimitsTestSuite) defineTests(driver TestDriver, pattern testpatte
|
||||
l.config, l.testCleanup = driver.PrepareTest(f)
|
||||
defer l.testCleanup()
|
||||
|
||||
ginkgo.By("Picking a random node")
|
||||
var nodeName string
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
nodeName = node.Name
|
||||
ginkgo.By("Picking a node")
|
||||
// Some CSI drivers are deployed to a single node (e.g csi-hostpath),
|
||||
// so we use that node instead of picking a random one.
|
||||
nodeName := l.config.ClientNodeName
|
||||
if nodeName == "" {
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
nodeName = node.Name
|
||||
}
|
||||
framework.Logf("Selected node %s", nodeName)
|
||||
|
||||
ginkgo.By("Checking node limits")
|
||||
limit, err := getNodeLimits(l.cs, nodeName, driverInfo)
|
||||
limit, err := getNodeLimits(l.cs, l.config, nodeName, driverInfo)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Node %s can handle %d volumes of driver %s", nodeName, limit, driverInfo.Name)
|
||||
@ -283,9 +287,9 @@ func waitForAllPVCsPhase(cs clientset.Interface, timeout time.Duration, pvcs []*
|
||||
return pvNames, err
|
||||
}
|
||||
|
||||
func getNodeLimits(cs clientset.Interface, nodeName string, driverInfo *DriverInfo) (int, error) {
|
||||
func getNodeLimits(cs clientset.Interface, config *PerTestConfig, nodeName string, driverInfo *DriverInfo) (int, error) {
|
||||
if len(driverInfo.InTreePluginName) == 0 {
|
||||
return getCSINodeLimits(cs, nodeName, driverInfo)
|
||||
return getCSINodeLimits(cs, config, nodeName, driverInfo)
|
||||
}
|
||||
return getInTreeNodeLimits(cs, nodeName, driverInfo)
|
||||
}
|
||||
@ -317,7 +321,7 @@ func getInTreeNodeLimits(cs clientset.Interface, nodeName string, driverInfo *Dr
|
||||
return int(limit.Value()), nil
|
||||
}
|
||||
|
||||
func getCSINodeLimits(cs clientset.Interface, nodeName string, driverInfo *DriverInfo) (int, error) {
|
||||
func getCSINodeLimits(cs clientset.Interface, config *PerTestConfig, nodeName string, driverInfo *DriverInfo) (int, error) {
|
||||
// Wait in a loop, the driver might just have been installed and kubelet takes a while to publish everything.
|
||||
var limit int
|
||||
err := wait.PollImmediate(2*time.Second, csiNodeInfoTimeout, func() (bool, error) {
|
||||
@ -328,7 +332,7 @@ func getCSINodeLimits(cs clientset.Interface, nodeName string, driverInfo *Drive
|
||||
}
|
||||
var csiDriver *storagev1.CSINodeDriver
|
||||
for _, c := range csiNode.Spec.Drivers {
|
||||
if c.Name == driverInfo.Name {
|
||||
if c.Name == driverInfo.Name || c.Name == config.GetUniqueDriverName() {
|
||||
csiDriver = &c
|
||||
break
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user