diff --git a/test/e2e/framework/skipper/skipper.go b/test/e2e/framework/skipper/skipper.go index dbe0214a9fa..11975038b80 100644 --- a/test/e2e/framework/skipper/skipper.go +++ b/test/e2e/framework/skipper/skipper.go @@ -49,6 +49,7 @@ var localStorageCapacityIsolation featuregate.Feature = "LocalStorageCapacityIso var ( downwardAPIHugePages featuregate.Feature = "DownwardAPIHugePages" execProbeTimeout featuregate.Feature = "ExecProbeTimeout" + csiMigration featuregate.Feature = "CSIMigration" ) func skipInternalf(caller int, format string, args ...interface{}) { @@ -154,6 +155,12 @@ func SkipUnlessExecProbeTimeoutEnabled() { } } +func SkipIfCSIMigrationEnabled() { + if utilfeature.DefaultFeatureGate.Enabled(csiMigration) { + skipInternalf(1, "Only supported when %v feature is disabled", csiMigration) + } +} + // SkipIfMissingResource skips if the gvr resource is missing. func SkipIfMissingResource(dynamicClient dynamic.Interface, gvr schema.GroupVersionResource, namespace string) { resourceClient := dynamicClient.Resource(gvr).Namespace(namespace) diff --git a/test/e2e/storage/testsuites/volumelimits.go b/test/e2e/storage/testsuites/volumelimits.go index 772a94756fd..846ecd2dc3c 100644 --- a/test/e2e/storage/testsuites/volumelimits.go +++ b/test/e2e/storage/testsuites/volumelimits.go @@ -234,6 +234,40 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, }) framework.ExpectNoError(err) }) + + ginkgo.It("should verify that all csinodes have volume limits", func() { + driverInfo := driver.GetDriverInfo() + if !driverInfo.Capabilities[storageframework.CapVolumeLimits] { + ginkgo.Skip(fmt.Sprintf("driver %s does not support volume limits", driverInfo.Name)) + } + + l.ns = f.Namespace + l.cs = f.ClientSet + + l.config, l.testCleanup = driver.PrepareTest(f) + defer l.testCleanup() + + nodeNames := []string{} + if l.config.ClientNodeSelection.Name != "" { + // Some CSI drivers are deployed to a single node (e.g csi-hostpath), + // so we check that node instead of checking all of them + nodeNames = append(nodeNames, l.config.ClientNodeSelection.Name) + } else { + nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err) + for _, node := range nodeList.Items { + nodeNames = append(nodeNames, node.Name) + } + } + + for _, nodeName := range nodeNames { + ginkgo.By("Checking csinode limits") + _, err := getNodeLimits(l.cs, l.config, nodeName, driverInfo) + if err != nil { + framework.Failf("Expected volume limits to be set, error: %v", err) + } + } + }) } func cleanupTest(cs clientset.Interface, ns string, runningPodName, unschedulablePodName string, pvcs []*v1.PersistentVolumeClaim, pvNames sets.String, timeout time.Duration) error { diff --git a/test/e2e/storage/volume_limits.go b/test/e2e/storage/volume_limits.go index c290eb49887..64f6fc78202 100644 --- a/test/e2e/storage/volume_limits.go +++ b/test/e2e/storage/volume_limits.go @@ -34,6 +34,8 @@ var _ = utils.SIGDescribe("Volume limits", func() { f := framework.NewDefaultFramework("volume-limits-on-node") ginkgo.BeforeEach(func() { e2eskipper.SkipUnlessProviderIs("aws", "gce", "gke") + // If CSIMigration is enabled, then the limits should be on CSINodes, not Nodes, and another test checks this + e2eskipper.SkipIfCSIMigrationEnabled() c = f.ClientSet framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) })