mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-07 11:13:48 +00:00
Enable volume limits e2e tests for CSI hostpath driver
This commit is contained in:
parent
affcd0128b
commit
f6de8ab727
@ -106,6 +106,7 @@ func InitHostPathCSIDriver() testsuites.TestDriver {
|
|||||||
testsuites.CapPVCDataSource: true,
|
testsuites.CapPVCDataSource: true,
|
||||||
testsuites.CapControllerExpansion: true,
|
testsuites.CapControllerExpansion: true,
|
||||||
testsuites.CapSingleNodeVolume: true,
|
testsuites.CapSingleNodeVolume: true,
|
||||||
|
testsuites.CapVolumeLimits: true,
|
||||||
}
|
}
|
||||||
return initHostPathCSIDriver("csi-hostpath",
|
return initHostPathCSIDriver("csi-hostpath",
|
||||||
capabilities,
|
capabilities,
|
||||||
|
@ -109,7 +109,7 @@ func (t *volumeLimitsTestSuite) defineTests(driver TestDriver, pattern testpatte
|
|||||||
// And one extra pod with a CSI volume should get Pending with a condition
|
// And one extra pod with a CSI volume should get Pending with a condition
|
||||||
// that says it's unschedulable because of volume limit.
|
// that says it's unschedulable because of volume limit.
|
||||||
// BEWARE: the test may create lot of volumes and it's really slow.
|
// BEWARE: the test may create lot of volumes and it's really slow.
|
||||||
ginkgo.It("should support volume limits [Slow][Serial]", func() {
|
ginkgo.It("should support volume limits [Serial]", func() {
|
||||||
driverInfo := driver.GetDriverInfo()
|
driverInfo := driver.GetDriverInfo()
|
||||||
if !driverInfo.Capabilities[CapVolumeLimits] {
|
if !driverInfo.Capabilities[CapVolumeLimits] {
|
||||||
ginkgo.Skip(fmt.Sprintf("driver %s does not support volume limits", driverInfo.Name))
|
ginkgo.Skip(fmt.Sprintf("driver %s does not support volume limits", driverInfo.Name))
|
||||||
@ -124,15 +124,19 @@ func (t *volumeLimitsTestSuite) defineTests(driver TestDriver, pattern testpatte
|
|||||||
l.config, l.testCleanup = driver.PrepareTest(f)
|
l.config, l.testCleanup = driver.PrepareTest(f)
|
||||||
defer l.testCleanup()
|
defer l.testCleanup()
|
||||||
|
|
||||||
ginkgo.By("Picking a random node")
|
ginkgo.By("Picking a node")
|
||||||
var nodeName string
|
// Some CSI drivers are deployed to a single node (e.g csi-hostpath),
|
||||||
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
// so we use that node instead of picking a random one.
|
||||||
framework.ExpectNoError(err)
|
nodeName := l.config.ClientNodeName
|
||||||
nodeName = node.Name
|
if nodeName == "" {
|
||||||
|
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
nodeName = node.Name
|
||||||
|
}
|
||||||
framework.Logf("Selected node %s", nodeName)
|
framework.Logf("Selected node %s", nodeName)
|
||||||
|
|
||||||
ginkgo.By("Checking node limits")
|
ginkgo.By("Checking node limits")
|
||||||
limit, err := getNodeLimits(l.cs, nodeName, driverInfo)
|
limit, err := getNodeLimits(l.cs, l.config, nodeName, driverInfo)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
framework.Logf("Node %s can handle %d volumes of driver %s", nodeName, limit, driverInfo.Name)
|
framework.Logf("Node %s can handle %d volumes of driver %s", nodeName, limit, driverInfo.Name)
|
||||||
@ -283,9 +287,9 @@ func waitForAllPVCsPhase(cs clientset.Interface, timeout time.Duration, pvcs []*
|
|||||||
return pvNames, err
|
return pvNames, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNodeLimits(cs clientset.Interface, nodeName string, driverInfo *DriverInfo) (int, error) {
|
func getNodeLimits(cs clientset.Interface, config *PerTestConfig, nodeName string, driverInfo *DriverInfo) (int, error) {
|
||||||
if len(driverInfo.InTreePluginName) == 0 {
|
if len(driverInfo.InTreePluginName) == 0 {
|
||||||
return getCSINodeLimits(cs, nodeName, driverInfo)
|
return getCSINodeLimits(cs, config, nodeName, driverInfo)
|
||||||
}
|
}
|
||||||
return getInTreeNodeLimits(cs, nodeName, driverInfo)
|
return getInTreeNodeLimits(cs, nodeName, driverInfo)
|
||||||
}
|
}
|
||||||
@ -317,7 +321,7 @@ func getInTreeNodeLimits(cs clientset.Interface, nodeName string, driverInfo *Dr
|
|||||||
return int(limit.Value()), nil
|
return int(limit.Value()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCSINodeLimits(cs clientset.Interface, nodeName string, driverInfo *DriverInfo) (int, error) {
|
func getCSINodeLimits(cs clientset.Interface, config *PerTestConfig, nodeName string, driverInfo *DriverInfo) (int, error) {
|
||||||
// Wait in a loop, the driver might just have been installed and kubelet takes a while to publish everything.
|
// Wait in a loop, the driver might just have been installed and kubelet takes a while to publish everything.
|
||||||
var limit int
|
var limit int
|
||||||
err := wait.PollImmediate(2*time.Second, csiNodeInfoTimeout, func() (bool, error) {
|
err := wait.PollImmediate(2*time.Second, csiNodeInfoTimeout, func() (bool, error) {
|
||||||
@ -328,7 +332,7 @@ func getCSINodeLimits(cs clientset.Interface, nodeName string, driverInfo *Drive
|
|||||||
}
|
}
|
||||||
var csiDriver *storagev1.CSINodeDriver
|
var csiDriver *storagev1.CSINodeDriver
|
||||||
for _, c := range csiNode.Spec.Drivers {
|
for _, c := range csiNode.Spec.Drivers {
|
||||||
if c.Name == driverInfo.Name {
|
if c.Name == driverInfo.Name || c.Name == config.GetUniqueDriverName() {
|
||||||
csiDriver = &c
|
csiDriver = &c
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user