diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index 8385ca37676..7ecc8bf88c3 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -67,11 +67,9 @@ go_library( "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/metrics:go_default_library", - "//test/e2e/framework/podlogs:go_default_library", "//test/e2e/framework/providers/gce:go_default_library", "//test/e2e/framework/testfiles:go_default_library", "//test/e2e/storage/drivers:go_default_library", - "//test/e2e/storage/testpatterns:go_default_library", "//test/e2e/storage/testsuites:go_default_library", "//test/e2e/storage/utils:go_default_library", "//test/utils/image:go_default_library", diff --git a/test/e2e/storage/csi_volumes.go b/test/e2e/storage/csi_volumes.go index 2ab9ca97026..5ddd5972909 100644 --- a/test/e2e/storage/csi_volumes.go +++ b/test/e2e/storage/csi_volumes.go @@ -17,10 +17,8 @@ limitations under the License. package storage import ( - "context" "encoding/json" "fmt" - "regexp" "strings" "time" @@ -32,9 +30,7 @@ import ( clientset "k8s.io/client-go/kubernetes" csiclient "k8s.io/csi-api/pkg/client/clientset/versioned" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/podlogs" "k8s.io/kubernetes/test/e2e/storage/drivers" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -47,9 +43,9 @@ import ( ) // List of testDrivers to be executed in below loop -var csiTestDrivers = []func(config testsuites.TestConfig) testsuites.TestDriver{ +var csiTestDrivers = []func() testsuites.TestDriver{ drivers.InitHostPathCSIDriver, - drivers.InitGcePDCSIDriver, + func() testsuites.TestDriver { return drivers.InitGcePDCSIDriver(false /* topology enabled */) }, drivers.InitGcePDExternalCSIDriver, drivers.InitHostPathV0CSIDriver, // Don't run tests with mock driver (drivers.InitMockCSIDriver), it does not provide persistent storage. @@ -65,133 +61,58 @@ var csiTestSuites = []func() testsuites.TestSuite{ testsuites.InitSnapshottableTestSuite, } -func csiTunePattern(patterns []testpatterns.TestPattern) []testpatterns.TestPattern { - tunedPatterns := []testpatterns.TestPattern{} - - for _, pattern := range patterns { - // Skip inline volume and pre-provsioned PV tests for csi drivers - if pattern.VolType == testpatterns.InlineVolume || pattern.VolType == testpatterns.PreprovisionedPV { - continue - } - tunedPatterns = append(tunedPatterns, pattern) - } - - return tunedPatterns -} - // This executes testSuites for csi volumes. var _ = utils.SIGDescribe("CSI Volumes", func() { - f := framework.NewDefaultFramework("csi-volumes") - - var ( - cancel context.CancelFunc - cs clientset.Interface - csics csiclient.Interface - ns *v1.Namespace - // Common configuration options for each driver. - config = testsuites.TestConfig{ - Framework: f, - Prefix: "csi", - } - ) - - BeforeEach(func() { - ctx, c := context.WithCancel(context.Background()) - cancel = c - cs = f.ClientSet - csics = f.CSIClientSet - ns = f.Namespace - - // Debugging of the following tests heavily depends on the log output - // of the different containers. Therefore include all of that in log - // files (when using --report-dir, as in the CI) or the output stream - // (otherwise). - to := podlogs.LogOutput{ - StatusWriter: GinkgoWriter, - } - if framework.TestContext.ReportDir == "" { - to.LogWriter = GinkgoWriter - } else { - test := CurrentGinkgoTestDescription() - reg := regexp.MustCompile("[^a-zA-Z0-9_-]+") - // We end the prefix with a slash to ensure that all logs - // end up in a directory named after the current test. - to.LogPathPrefix = framework.TestContext.ReportDir + "/" + - reg.ReplaceAllString(test.FullTestText, "_") + "/" - } - podlogs.CopyAllLogs(ctx, cs, ns.Name, to) - - // pod events are something that the framework already collects itself - // after a failed test. Logging them live is only useful for interactive - // debugging, not when we collect reports. - if framework.TestContext.ReportDir == "" { - podlogs.WatchPods(ctx, cs, ns.Name, GinkgoWriter) - } - }) - - AfterEach(func() { - cancel() - }) - for _, initDriver := range csiTestDrivers { - curDriver := initDriver(config) - curConfig := curDriver.GetDriverInfo().Config + curDriver := initDriver() + Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() { - BeforeEach(func() { - // Reset config. The driver might have modified its copy - // in a previous test. - curDriver.GetDriverInfo().Config = curConfig - - // setupDriver - curDriver.CreateDriver() - }) - - AfterEach(func() { - // Cleanup driver - curDriver.CleanupDriver() - }) - - testsuites.RunTestSuite(f, curDriver, csiTestSuites, csiTunePattern) + testsuites.DefineTestSuite(curDriver, csiTestSuites) }) } Context("CSI Topology test using GCE PD driver [Feature:CSINodeInfo]", func() { - newConfig := config - newConfig.TopologyEnabled = true - driver := drivers.InitGcePDCSIDriver(newConfig).(testsuites.DynamicPVTestDriver) // TODO (#71289) eliminate by moving this test to common test suite. + f := framework.NewDefaultFramework("csitopology") + driver := drivers.InitGcePDCSIDriver(true /* topology enabled */).(testsuites.DynamicPVTestDriver) // TODO (#71289) eliminate by moving this test to common test suite. + var ( + config *testsuites.PerTestConfig + testCleanup func() + ) BeforeEach(func() { - driver.CreateDriver() + config, testCleanup = driver.PrepareTest(f) }) AfterEach(func() { - driver.CleanupDriver() + if testCleanup != nil { + testCleanup() + } }) It("should provision zonal PD with immediate volume binding and AllowedTopologies set and mount the volume to a pod", func() { suffix := "topology-positive" - testTopologyPositive(cs, suffix, ns.GetName(), false /* delayBinding */, true /* allowedTopologies */) + testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), false /* delayBinding */, true /* allowedTopologies */) }) It("should provision zonal PD with delayed volume binding and mount the volume to a pod", func() { suffix := "delayed" - testTopologyPositive(cs, suffix, ns.GetName(), true /* delayBinding */, false /* allowedTopologies */) + testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */, false /* allowedTopologies */) }) It("should provision zonal PD with delayed volume binding and AllowedTopologies set and mount the volume to a pod", func() { suffix := "delayed-topology-positive" - testTopologyPositive(cs, suffix, ns.GetName(), true /* delayBinding */, true /* allowedTopologies */) + testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */, true /* allowedTopologies */) }) It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with immediate volume binding", func() { - framework.SkipUnlessMultizone(cs) + framework.SkipUnlessMultizone(config.Framework.ClientSet) suffix := "topology-negative" - testTopologyNegative(cs, suffix, ns.GetName(), false /* delayBinding */) + testTopologyNegative(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), false /* delayBinding */) }) It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with delayed volume binding", func() { - framework.SkipUnlessMultizone(cs) + framework.SkipUnlessMultizone(config.Framework.ClientSet) suffix := "delayed-topology-negative" - testTopologyNegative(cs, suffix, ns.GetName(), true /* delayBinding */) + testTopologyNegative(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */) }) }) @@ -227,29 +148,30 @@ var _ = utils.SIGDescribe("CSI Volumes", func() { for _, t := range tests { test := t - It(test.name, func() { - By("Deploying mock CSI driver") - config := testsuites.TestConfig{ - Framework: f, - Prefix: "csi-attach", - } + f := framework.NewDefaultFramework("csiattach") - driver = drivers.InitMockCSIDriver(config, test.deployDriverCRD, test.driverAttachable, nil) - driver.CreateDriver() - defer driver.CleanupDriver() + It(test.name, func() { + cs := f.ClientSet + csics := f.CSIClientSet + ns := f.Namespace + + driver = drivers.InitMockCSIDriver(test.deployDriverCRD, test.driverAttachable, nil) + config, testCleanup := driver.PrepareTest(f) + driverName := config.GetUniqueDriverName() + defer testCleanup() if test.deployDriverCRD { - err = waitForCSIDriver(csics, driver) + err = waitForCSIDriver(csics, driverName) framework.ExpectNoError(err, "Failed to get CSIDriver: %v", err) - defer destroyCSIDriver(csics, driver) + defer destroyCSIDriver(csics, driverName) } By("Creating pod") var sc *storagev1.StorageClass if dDriver, ok := driver.(testsuites.DynamicPVTestDriver); ok { - sc = dDriver.GetDynamicProvisionStorageClass("") + sc = dDriver.GetDynamicProvisionStorageClass(config, "") } - nodeName := driver.GetDriverInfo().Config.ClientNodeName + nodeName := config.ClientNodeName scTest := testsuites.StorageClassTest{ Name: driver.GetDriverInfo().Name, Provisioner: sc.Provisioner, @@ -347,29 +269,30 @@ var _ = utils.SIGDescribe("CSI Volumes", func() { } for _, t := range tests { test := t - It(test.name, func() { - By("Deploying mock CSI driver") - config := testsuites.TestConfig{ - Framework: f, - Prefix: "csi-workload", - } + f := framework.NewDefaultFramework("csiworkload") - driver = drivers.InitMockCSIDriver(config, test.deployDriverCRD, true, test.podInfoOnMountVersion) - driver.CreateDriver() - defer driver.CleanupDriver() + It(test.name, func() { + cs := f.ClientSet + csics := f.CSIClientSet + ns := f.Namespace + + driver = drivers.InitMockCSIDriver(test.deployDriverCRD, true, test.podInfoOnMountVersion) + config, testCleanup := driver.PrepareTest(f) + driverName := config.GetUniqueDriverName() + defer testCleanup() if test.deployDriverCRD { - err = waitForCSIDriver(csics, driver) + err = waitForCSIDriver(csics, driverName) framework.ExpectNoError(err, "Failed to get CSIDriver: %v", err) - defer destroyCSIDriver(csics, driver) + defer destroyCSIDriver(csics, driverName) } By("Creating pod") var sc *storagev1.StorageClass if dDriver, ok := driver.(testsuites.DynamicPVTestDriver); ok { - sc = dDriver.GetDynamicProvisionStorageClass("") + sc = dDriver.GetDynamicProvisionStorageClass(config, "") } - nodeName := driver.GetDriverInfo().Config.ClientNodeName + nodeName := config.ClientNodeName scTest := testsuites.StorageClassTest{ Name: driver.GetDriverInfo().Name, Parameters: sc.Parameters, @@ -420,14 +343,16 @@ func testTopologyPositive(cs clientset.Interface, suffix, namespace string, dela topoZone := getRandomClusterZone(cs) addSingleCSIZoneAllowedTopologyToStorageClass(cs, class, topoZone) } - claim := newClaim(test, namespace, suffix) - claim.Spec.StorageClassName = &class.Name + test.Client = cs + test.Claim = newClaim(test, namespace, suffix) + test.Claim.Spec.StorageClassName = &class.Name + test.Class = class if delayBinding { - _, node := testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class, nil /* node selector */, false /* expect unschedulable */) + _, node := test.TestBindingWaitForFirstConsumer(nil /* node selector */, false /* expect unschedulable */) Expect(node).ToNot(BeNil(), "Unexpected nil node found") } else { - testsuites.TestDynamicProvisioning(test, cs, claim, class) + test.TestDynamicProvisioning() } } @@ -447,12 +372,13 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela test.DelayBinding = delayBinding nodeSelector := map[string]string{v1.LabelZoneFailureDomain: podZone} - class := newStorageClass(test, namespace, suffix) - addSingleCSIZoneAllowedTopologyToStorageClass(cs, class, pvZone) - claim := newClaim(test, namespace, suffix) - claim.Spec.StorageClassName = &class.Name + test.Client = cs + test.Class = newStorageClass(test, namespace, suffix) + addSingleCSIZoneAllowedTopologyToStorageClass(cs, test.Class, pvZone) + test.Claim = newClaim(test, namespace, suffix) + test.Claim.Spec.StorageClassName = &test.Class.Name if delayBinding { - testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class, nodeSelector, true /* expect unschedulable */) + test.TestBindingWaitForFirstConsumer(nodeSelector, true /* expect unschedulable */) } else { test.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { // Ensure that a pod cannot be scheduled in an unsuitable zone. @@ -461,13 +387,12 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela defer testsuites.StopPod(cs, pod) framework.ExpectNoError(framework.WaitForPodNameUnschedulableInNamespace(cs, pod.Name, pod.Namespace), "pod should be unschedulable") } - testsuites.TestDynamicProvisioning(test, cs, claim, class) + test.TestDynamicProvisioning() } } -func waitForCSIDriver(csics csiclient.Interface, driver testsuites.TestDriver) error { +func waitForCSIDriver(csics csiclient.Interface, driverName string) error { timeout := 2 * time.Minute - driverName := testsuites.GetUniqueDriverName(driver) framework.Logf("waiting up to %v for CSIDriver %q", timeout, driverName) for start := time.Now(); time.Since(start) < timeout; time.Sleep(framework.Poll) { @@ -479,8 +404,7 @@ func waitForCSIDriver(csics csiclient.Interface, driver testsuites.TestDriver) e return fmt.Errorf("gave up after waiting %v for CSIDriver %q.", timeout, driverName) } -func destroyCSIDriver(csics csiclient.Interface, driver testsuites.TestDriver) { - driverName := testsuites.GetUniqueDriverName(driver) +func destroyCSIDriver(csics csiclient.Interface, driverName string) { driverGet, err := csics.CsiV1alpha1().CSIDrivers().Get(driverName, metav1.GetOptions{}) if err == nil { framework.Logf("deleting %s.%s: %s", driverGet.TypeMeta.APIVersion, driverGet.TypeMeta.Kind, driverGet.ObjectMeta.Name) diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index 9d384c2e4fd..5afdd770db4 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -56,12 +56,11 @@ const ( // hostpathCSI type hostpathCSIDriver struct { - cleanup func() driverInfo testsuites.DriverInfo manifests []string } -func initHostPathCSIDriver(name string, config testsuites.TestConfig, capabilities map[testsuites.Capability]bool, manifests ...string) testsuites.TestDriver { +func initHostPathCSIDriver(name string, capabilities map[testsuites.Capability]bool, manifests ...string) testsuites.TestDriver { return &hostpathCSIDriver{ driverInfo: testsuites.DriverInfo{ Name: name, @@ -71,7 +70,6 @@ func initHostPathCSIDriver(name string, config testsuites.TestConfig, capabiliti "", // Default fsType ), Capabilities: capabilities, - Config: config, }, manifests: manifests, } @@ -82,8 +80,8 @@ var _ testsuites.DynamicPVTestDriver = &hostpathCSIDriver{} var _ testsuites.SnapshottableTestDriver = &hostpathCSIDriver{} // InitHostPathCSIDriver returns hostpathCSIDriver that implements TestDriver interface -func InitHostPathCSIDriver(config testsuites.TestConfig) testsuites.TestDriver { - return initHostPathCSIDriver("csi-hostpath", config, +func InitHostPathCSIDriver() testsuites.TestDriver { + return initHostPathCSIDriver("csi-hostpath", map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapDataSource: true, testsuites.CapMultiPODs: true}, "test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml", "test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml", @@ -104,19 +102,19 @@ func (h *hostpathCSIDriver) GetDriverInfo() *testsuites.DriverInfo { func (h *hostpathCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { - provisioner := testsuites.GetUniqueDriverName(h) +func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { + provisioner := config.GetUniqueDriverName() parameters := map[string]string{} - ns := h.driverInfo.Config.Framework.Namespace.Name + ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-sc", provisioner) return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) } -func (h *hostpathCSIDriver) GetSnapshotClass() *unstructured.Unstructured { - snapshotter := testsuites.GetUniqueDriverName(h) +func (h *hostpathCSIDriver) GetSnapshotClass(config *testsuites.PerTestConfig) *unstructured.Unstructured { + snapshotter := config.GetUniqueDriverName() parameters := map[string]string{} - ns := h.driverInfo.Config.Framework.Namespace.Name + ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-vsc", snapshotter) return testsuites.GetSnapshotClass(snapshotter, parameters, ns, suffix) @@ -126,57 +124,60 @@ func (h *hostpathCSIDriver) GetClaimSize() string { return "5Gi" } -func (h *hostpathCSIDriver) CreateDriver() { +func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { By(fmt.Sprintf("deploying %s driver", h.driverInfo.Name)) - f := h.driverInfo.Config.Framework + cancelLogging := testsuites.StartPodLogs(f) cs := f.ClientSet // The hostpath CSI driver only works when everything runs on the same node. nodes := framework.GetReadySchedulableNodesOrDie(cs) nodeName := nodes.Items[rand.Intn(len(nodes.Items))].Name - h.driverInfo.Config.ClientNodeName = nodeName + config := &testsuites.PerTestConfig{ + Driver: h, + Prefix: "hostpath", + Framework: f, + ClientNodeName: nodeName, + } // TODO (?): the storage.csi.image.version and storage.csi.image.registry // settings are ignored for this test. We could patch the image definitions. o := utils.PatchCSIOptions{ OldDriverName: h.driverInfo.Name, - NewDriverName: testsuites.GetUniqueDriverName(h), + NewDriverName: config.GetUniqueDriverName(), DriverContainerName: "hostpath", - DriverContainerArguments: []string{"--drivername=csi-hostpath-" + f.UniqueName}, + DriverContainerArguments: []string{"--drivername=" + config.GetUniqueDriverName()}, ProvisionerContainerName: "csi-provisioner", SnapshotterContainerName: "csi-snapshotter", NodeName: nodeName, } - cleanup, err := h.driverInfo.Config.Framework.CreateFromManifests(func(item interface{}) error { - return utils.PatchCSIDeployment(h.driverInfo.Config.Framework, o, item) + cleanup, err := config.Framework.CreateFromManifests(func(item interface{}) error { + return utils.PatchCSIDeployment(config.Framework, o, item) }, h.manifests...) - h.cleanup = cleanup if err != nil { framework.Failf("deploying %s driver: %v", h.driverInfo.Name, err) } -} -func (h *hostpathCSIDriver) CleanupDriver() { - if h.cleanup != nil { + return config, func() { By(fmt.Sprintf("uninstalling %s driver", h.driverInfo.Name)) - h.cleanup() + cleanup() + cancelLogging() } } // mockCSI type mockCSIDriver struct { - cleanup func() driverInfo testsuites.DriverInfo manifests []string podInfoVersion *string + attachable bool } var _ testsuites.TestDriver = &mockCSIDriver{} var _ testsuites.DynamicPVTestDriver = &mockCSIDriver{} // InitMockCSIDriver returns a mockCSIDriver that implements TestDriver interface -func InitMockCSIDriver(config testsuites.TestConfig, registerDriver, driverAttachable bool, podInfoVersion *string) testsuites.TestDriver { +func InitMockCSIDriver(registerDriver, driverAttachable bool, podInfoVersion *string) testsuites.TestDriver { driverManifests := []string{ "test/e2e/testing-manifests/storage-csi/cluster-driver-registrar/rbac.yaml", "test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml", @@ -187,16 +188,12 @@ func InitMockCSIDriver(config testsuites.TestConfig, registerDriver, driverAttac "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml", } - config.ServerConfig = &framework.VolumeTestConfig{} - if registerDriver { driverManifests = append(driverManifests, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-cluster-driver-registrar.yaml") } if driverAttachable { driverManifests = append(driverManifests, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml") - } else { - config.ServerConfig.ServerArgs = append(config.ServerConfig.ServerArgs, "--disable-attach") } return &mockCSIDriver{ @@ -212,10 +209,10 @@ func InitMockCSIDriver(config testsuites.TestConfig, registerDriver, driverAttac testsuites.CapFsGroup: false, testsuites.CapExec: false, }, - Config: config, }, manifests: driverManifests, podInfoVersion: podInfoVersion, + attachable: driverAttachable, } } @@ -226,10 +223,10 @@ func (m *mockCSIDriver) GetDriverInfo() *testsuites.DriverInfo { func (m *mockCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (m *mockCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { - provisioner := testsuites.GetUniqueDriverName(m) +func (m *mockCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { + provisioner := config.GetUniqueDriverName() parameters := map[string]string{} - ns := m.driverInfo.Config.Framework.Namespace.Name + ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-sc", provisioner) return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) @@ -239,20 +236,24 @@ func (m *mockCSIDriver) GetClaimSize() string { return "5Gi" } -func (m *mockCSIDriver) CreateDriver() { +func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { By("deploying csi mock driver") - f := m.driverInfo.Config.Framework + cancelLogging := testsuites.StartPodLogs(f) cs := f.ClientSet // pods should be scheduled on the node nodes := framework.GetReadySchedulableNodesOrDie(cs) node := nodes.Items[rand.Intn(len(nodes.Items))] - m.driverInfo.Config.ClientNodeName = node.Name + config := &testsuites.PerTestConfig{ + Driver: m, + Prefix: "mock", + Framework: f, + ClientNodeName: node.Name, + } containerArgs := []string{"--name=csi-mock-" + f.UniqueName} - - if m.driverInfo.Config.ServerConfig != nil && m.driverInfo.Config.ServerConfig.ServerArgs != nil { - containerArgs = append(containerArgs, m.driverInfo.Config.ServerConfig.ServerArgs...) + if !m.attachable { + containerArgs = append(containerArgs, "--disable-attach") } // TODO (?): the storage.csi.image.version and storage.csi.image.registry @@ -264,29 +265,27 @@ func (m *mockCSIDriver) CreateDriver() { DriverContainerArguments: containerArgs, ProvisionerContainerName: "csi-provisioner", ClusterRegistrarContainerName: "csi-cluster-driver-registrar", - NodeName: m.driverInfo.Config.ClientNodeName, + NodeName: config.ClientNodeName, PodInfoVersion: m.podInfoVersion, } cleanup, err := f.CreateFromManifests(func(item interface{}) error { return utils.PatchCSIDeployment(f, o, item) }, m.manifests...) - m.cleanup = cleanup if err != nil { framework.Failf("deploying csi mock driver: %v", err) } -} -func (m *mockCSIDriver) CleanupDriver() { - if m.cleanup != nil { + return config, func() { By("uninstalling csi mock driver") - m.cleanup() + cleanup() + cancelLogging() } } // InitHostPathV0CSIDriver returns a variant of hostpathCSIDriver with different manifests. -func InitHostPathV0CSIDriver(config testsuites.TestConfig) testsuites.TestDriver { - return initHostPathCSIDriver("csi-hostpath-v0", config, +func InitHostPathV0CSIDriver() testsuites.TestDriver { + return initHostPathCSIDriver("csi-hostpath-v0", map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapMultiPODs: true}, "test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml", "test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml", @@ -300,16 +299,17 @@ func InitHostPathV0CSIDriver(config testsuites.TestConfig) testsuites.TestDriver // gce-pd type gcePDCSIDriver struct { - cleanup func() - driverInfo testsuites.DriverInfo + topologyEnabled bool + driverInfo testsuites.DriverInfo } var _ testsuites.TestDriver = &gcePDCSIDriver{} var _ testsuites.DynamicPVTestDriver = &gcePDCSIDriver{} // InitGcePDCSIDriver returns gcePDCSIDriver that implements TestDriver interface -func InitGcePDCSIDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitGcePDCSIDriver(topologyEnabled bool) testsuites.TestDriver { return &gcePDCSIDriver{ + topologyEnabled: topologyEnabled, driverInfo: testsuites.DriverInfo{ Name: GCEPDCSIProvisionerName, FeatureTag: "[Serial]", @@ -327,8 +327,6 @@ func InitGcePDCSIDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapExec: true, testsuites.CapMultiPODs: true, }, - - Config: config, }, } } @@ -338,21 +336,14 @@ func (g *gcePDCSIDriver) GetDriverInfo() *testsuites.DriverInfo { } func (g *gcePDCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { - f := g.driverInfo.Config.Framework framework.SkipUnlessProviderIs("gce", "gke") - if !g.driverInfo.Config.TopologyEnabled { - // Topology is disabled in external-provisioner, so in a multizone cluster, a pod could be - // scheduled in a different zone from the provisioned volume, causing basic provisioning - // tests to fail. - framework.SkipIfMultizone(f.ClientSet) - } if pattern.FsType == "xfs" { framework.SkipUnlessNodeOSDistroIs("ubuntu", "custom") } } -func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { - ns := g.driverInfo.Config.Framework.Namespace.Name +func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { + ns := config.Framework.Namespace.Name provisioner := g.driverInfo.Name suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name) @@ -368,8 +359,16 @@ func (g *gcePDCSIDriver) GetClaimSize() string { return "5Gi" } -func (g *gcePDCSIDriver) CreateDriver() { +func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + if !g.topologyEnabled { + // Topology is disabled in external-provisioner, so in a multizone cluster, a pod could be + // scheduled in a different zone from the provisioned volume, causing basic provisioning + // tests to fail. + framework.SkipIfMultizone(f.ClientSet) + } + By("deploying csi gce-pd driver") + cancelLogging := testsuites.StartPodLogs(f) // It would be safer to rename the gcePD driver, but that // hasn't been done before either and attempts to do so now led to // errors during driver registration, therefore it is disabled @@ -382,7 +381,7 @@ func (g *gcePDCSIDriver) CreateDriver() { // DriverContainerName: "gce-driver", // ProvisionerContainerName: "csi-external-provisioner", // } - createGCESecrets(g.driverInfo.Config.Framework.ClientSet, g.driverInfo.Config.Framework.Namespace.Name) + createGCESecrets(f.ClientSet, f.Namespace.Name) manifests := []string{ "test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml", @@ -392,23 +391,25 @@ func (g *gcePDCSIDriver) CreateDriver() { "test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml", } - if g.driverInfo.Config.TopologyEnabled { + if g.topologyEnabled { manifests = append(manifests, "test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss_alpha.yaml") } else { manifests = append(manifests, "test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml") } - cleanup, err := g.driverInfo.Config.Framework.CreateFromManifests(nil, manifests...) - g.cleanup = cleanup + cleanup, err := f.CreateFromManifests(nil, manifests...) if err != nil { framework.Failf("deploying csi gce-pd driver: %v", err) } -} -func (g *gcePDCSIDriver) CleanupDriver() { - By("uninstalling gce-pd driver") - if g.cleanup != nil { - g.cleanup() - } + return &testsuites.PerTestConfig{ + Driver: g, + Prefix: "gcepd", + Framework: f, + }, func() { + By("uninstalling gce-pd driver") + cleanup() + cancelLogging() + } } // gcePd-external @@ -420,7 +421,7 @@ var _ testsuites.TestDriver = &gcePDExternalCSIDriver{} var _ testsuites.DynamicPVTestDriver = &gcePDExternalCSIDriver{} // InitGcePDExternalCSIDriver returns gcePDExternalCSIDriver that implements TestDriver interface -func InitGcePDExternalCSIDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitGcePDExternalCSIDriver() testsuites.TestDriver { return &gcePDExternalCSIDriver{ driverInfo: testsuites.DriverInfo{ Name: GCEPDCSIProvisionerName, @@ -440,8 +441,6 @@ func InitGcePDExternalCSIDriver(config testsuites.TestConfig) testsuites.TestDri testsuites.CapExec: true, testsuites.CapMultiPODs: true, }, - - Config: config, }, } } @@ -452,14 +451,13 @@ func (g *gcePDExternalCSIDriver) GetDriverInfo() *testsuites.DriverInfo { func (g *gcePDExternalCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { framework.SkipUnlessProviderIs("gce", "gke") - framework.SkipIfMultizone(g.driverInfo.Config.Framework.ClientSet) if pattern.FsType == "xfs" { framework.SkipUnlessNodeOSDistroIs("ubuntu", "custom") } } -func (g *gcePDExternalCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { - ns := g.driverInfo.Config.Framework.Namespace.Name +func (g *gcePDExternalCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { + ns := config.Framework.Namespace.Name provisioner := g.driverInfo.Name suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name) @@ -475,8 +473,12 @@ func (g *gcePDExternalCSIDriver) GetClaimSize() string { return "5Gi" } -func (g *gcePDExternalCSIDriver) CreateDriver() { -} +func (g *gcePDExternalCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + framework.SkipIfMultizone(f.ClientSet) -func (g *gcePDExternalCSIDriver) CleanupDriver() { + return &testsuites.PerTestConfig{ + Driver: g, + Prefix: "gcepdext", + Framework: f, + }, func() {} } diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index 20187d78e08..1be2ef18088 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -69,9 +69,10 @@ type nfsDriver struct { driverInfo testsuites.DriverInfo } -type nfsTestResource struct { +type nfsVolume struct { serverIP string serverPod *v1.Pod + f *framework.Framework } var _ testsuites.TestDriver = &nfsDriver{} @@ -81,7 +82,7 @@ var _ testsuites.PreprovisionedPVTestDriver = &nfsDriver{} var _ testsuites.DynamicPVTestDriver = &nfsDriver{} // InitNFSDriver returns nfsDriver that implements TestDriver interface -func InitNFSDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitNFSDriver() testsuites.TestDriver { return &nfsDriver{ driverInfo: testsuites.DriverInfo{ Name: "nfs", @@ -95,8 +96,6 @@ func InitNFSDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapPersistence: true, testsuites.CapExec: true, }, - - Config: config, }, } } @@ -108,34 +107,34 @@ func (n *nfsDriver) GetDriverInfo() *testsuites.DriverInfo { func (n *nfsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { - ntr, ok := testResource.(*nfsTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to NFS Test Resource") +func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { + nv, ok := volume.(*nfsVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to NFS test volume") return &v1.VolumeSource{ NFS: &v1.NFSVolumeSource{ - Server: ntr.serverIP, + Server: nv.serverIP, Path: "/", ReadOnly: readOnly, }, } } -func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - ntr, ok := testResource.(*nfsTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to NFS Test Resource") +func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + nv, ok := volume.(*nfsVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to NFS test volume") return &v1.PersistentVolumeSource{ NFS: &v1.NFSVolumeSource{ - Server: ntr.serverIP, + Server: nv.serverIP, Path: "/", ReadOnly: readOnly, }, }, nil } -func (n *nfsDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { +func (n *nfsDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := n.externalPluginName parameters := map[string]string{"mountOptions": "vers=4.1"} - ns := n.driverInfo.Config.Framework.Namespace.Name + ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-sc", n.driverInfo.Name) return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) @@ -145,8 +144,7 @@ func (n *nfsDriver) GetClaimSize() string { return "5Gi" } -func (n *nfsDriver) CreateDriver() { - f := n.driverInfo.Config.Framework +func (n *nfsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { cs := f.ClientSet ns := f.Namespace n.externalPluginName = fmt.Sprintf("example.com/nfs-%s", ns.Name) @@ -163,35 +161,36 @@ func (n *nfsDriver) CreateDriver() { By("creating an external dynamic provisioner pod") n.externalProvisionerPod = utils.StartExternalProvisioner(cs, ns.Name, n.externalPluginName) + + return &testsuites.PerTestConfig{ + Driver: n, + Prefix: "nfs", + Framework: f, + }, func() { + framework.ExpectNoError(framework.DeletePodWithWait(f, cs, n.externalProvisionerPod)) + clusterRoleBindingName := ns.Name + "--" + "cluster-admin" + cs.RbacV1beta1().ClusterRoleBindings().Delete(clusterRoleBindingName, metav1.NewDeleteOptions(0)) + } } -func (n *nfsDriver) CleanupDriver() { - f := n.driverInfo.Config.Framework - cs := f.ClientSet - ns := f.Namespace - - framework.ExpectNoError(framework.DeletePodWithWait(f, cs, n.externalProvisionerPod)) - clusterRoleBindingName := ns.Name + "--" + "cluster-admin" - cs.RbacV1beta1().ClusterRoleBindings().Delete(clusterRoleBindingName, metav1.NewDeleteOptions(0)) -} - -func (n *nfsDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { - f := n.driverInfo.Config.Framework +func (n *nfsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { + f := config.Framework cs := f.ClientSet ns := f.Namespace // NewNFSServer creates a pod for InlineVolume and PreprovisionedPV, // and startExternalProvisioner creates a pods for DynamicPV. - // Therefore, we need a different CreateDriver logic for volType. + // Therefore, we need a different PrepareTest logic for volType. switch volType { case testpatterns.InlineVolume: fallthrough case testpatterns.PreprovisionedPV: - config, serverPod, serverIP := framework.NewNFSServer(cs, ns.Name, []string{}) - n.driverInfo.Config.ServerConfig = &config - return &nfsTestResource{ + c, serverPod, serverIP := framework.NewNFSServer(cs, ns.Name, []string{}) + config.ServerConfig = &c + return &nfsVolume{ serverIP: serverIP, serverPod: serverPod, + f: f, } case testpatterns.DynamicPV: // Do nothing @@ -201,22 +200,8 @@ func (n *nfsDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { return nil } -func (n *nfsDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { - f := n.driverInfo.Config.Framework - - ntr, ok := testResource.(*nfsTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to NFS Test Resource") - - switch volType { - case testpatterns.InlineVolume: - fallthrough - case testpatterns.PreprovisionedPV: - framework.CleanUpVolumeServer(f, ntr.serverPod) - case testpatterns.DynamicPV: - // Do nothing - default: - framework.Failf("Unsupported volType:%v is specified", volType) - } +func (v *nfsVolume) DeleteVolume() { + framework.CleanUpVolumeServer(v.f, v.serverPod) } // Gluster @@ -224,9 +209,10 @@ type glusterFSDriver struct { driverInfo testsuites.DriverInfo } -type glusterTestResource struct { +type glusterVolume struct { prefix string serverPod *v1.Pod + f *framework.Framework } var _ testsuites.TestDriver = &glusterFSDriver{} @@ -235,7 +221,7 @@ var _ testsuites.InlineVolumeTestDriver = &glusterFSDriver{} var _ testsuites.PreprovisionedPVTestDriver = &glusterFSDriver{} // InitGlusterFSDriver returns glusterFSDriver that implements TestDriver interface -func InitGlusterFSDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitGlusterFSDriver() testsuites.TestDriver { return &glusterFSDriver{ driverInfo: testsuites.DriverInfo{ Name: "gluster", @@ -247,8 +233,6 @@ func InitGlusterFSDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapPersistence: true, testsuites.CapExec: true, }, - - Config: config, }, } } @@ -261,11 +245,11 @@ func (g *glusterFSDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu", "custom") } -func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { - gtr, ok := testResource.(*glusterTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Gluster Test Resource") +func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { + gv, ok := volume.(*glusterVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to Gluster test volume") - name := gtr.prefix + "-server" + name := gv.prefix + "-server" return &v1.VolumeSource{ Glusterfs: &v1.GlusterfsVolumeSource{ EndpointsName: name, @@ -276,11 +260,11 @@ func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, testReso } } -func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - gtr, ok := testResource.(*glusterTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Gluster Test Resource") +func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + gv, ok := volume.(*glusterVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to Gluster test volume") - name := gtr.prefix + "-server" + name := gv.prefix + "-server" return &v1.PersistentVolumeSource{ Glusterfs: &v1.GlusterfsPersistentVolumeSource{ EndpointsName: name, @@ -291,34 +275,34 @@ func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string }, nil } -func (g *glusterFSDriver) CreateDriver() { +func (g *glusterFSDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: g, + Prefix: "gluster", + Framework: f, + }, func() {} } -func (g *glusterFSDriver) CleanupDriver() { -} - -func (g *glusterFSDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { - f := g.driverInfo.Config.Framework +func (g *glusterFSDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { + f := config.Framework cs := f.ClientSet ns := f.Namespace - config, serverPod, _ := framework.NewGlusterfsServer(cs, ns.Name) - g.driverInfo.Config.ServerConfig = &config - return &glusterTestResource{ + c, serverPod, _ := framework.NewGlusterfsServer(cs, ns.Name) + config.ServerConfig = &c + return &glusterVolume{ prefix: config.Prefix, serverPod: serverPod, + f: f, } } -func (g *glusterFSDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { - f := g.driverInfo.Config.Framework +func (v *glusterVolume) DeleteVolume() { + f := v.f cs := f.ClientSet ns := f.Namespace - gtr, ok := testResource.(*glusterTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Gluster Test Resource") - - name := gtr.prefix + "-server" + name := v.prefix + "-server" framework.Logf("Deleting Gluster endpoints %q...", name) err := cs.CoreV1().Endpoints(ns.Name).Delete(name, nil) @@ -328,8 +312,8 @@ func (g *glusterFSDriver) DeleteVolume(volType testpatterns.TestVolType, testRes } framework.Logf("Gluster endpoints %q not found, assuming deleted", name) } - framework.Logf("Deleting Gluster server pod %q...", gtr.serverPod.Name) - err = framework.DeletePodWithWait(f, cs, gtr.serverPod) + framework.Logf("Deleting Gluster server pod %q...", v.serverPod.Name) + err = framework.DeletePodWithWait(f, cs, v.serverPod) if err != nil { framework.Failf("Gluster server pod delete failed: %v", err) } @@ -340,9 +324,10 @@ func (g *glusterFSDriver) DeleteVolume(volType testpatterns.TestVolType, testRes type iSCSIDriver struct { driverInfo testsuites.DriverInfo } -type iSCSITestResource struct { +type iSCSIVolume struct { serverPod *v1.Pod serverIP string + f *framework.Framework } var _ testsuites.TestDriver = &iSCSIDriver{} @@ -351,7 +336,7 @@ var _ testsuites.InlineVolumeTestDriver = &iSCSIDriver{} var _ testsuites.PreprovisionedPVTestDriver = &iSCSIDriver{} // InitISCSIDriver returns iSCSIDriver that implements TestDriver interface -func InitISCSIDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitISCSIDriver() testsuites.TestDriver { return &iSCSIDriver{ driverInfo: testsuites.DriverInfo{ Name: "iscsi", @@ -370,8 +355,6 @@ func InitISCSIDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapBlock: true, testsuites.CapExec: true, }, - - Config: config, }, } } @@ -383,13 +366,13 @@ func (i *iSCSIDriver) GetDriverInfo() *testsuites.DriverInfo { func (i *iSCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { - itr, ok := testResource.(*iSCSITestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to iSCSI Test Resource") +func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { + iv, ok := volume.(*iSCSIVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to iSCSI test volume") volSource := v1.VolumeSource{ ISCSI: &v1.ISCSIVolumeSource{ - TargetPortal: itr.serverIP + ":3260", + TargetPortal: iv.serverIP + ":3260", // from test/images/volume/iscsi/initiatorname.iscsi IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c", Lun: 0, @@ -402,13 +385,13 @@ func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, testResource return &volSource } -func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - itr, ok := testResource.(*iSCSITestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to iSCSI Test Resource") +func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + iv, ok := volume.(*iSCSIVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to iSCSI test volume") pvSource := v1.PersistentVolumeSource{ ISCSI: &v1.ISCSIPersistentVolumeSource{ - TargetPortal: itr.serverIP + ":3260", + TargetPortal: iv.serverIP + ":3260", IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c", Lun: 0, ReadOnly: readOnly, @@ -420,32 +403,30 @@ func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, te return &pvSource, nil } -func (i *iSCSIDriver) CreateDriver() { +func (i *iSCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: i, + Prefix: "iscsi", + Framework: f, + }, func() {} } -func (i *iSCSIDriver) CleanupDriver() { -} - -func (i *iSCSIDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { - f := i.driverInfo.Config.Framework +func (i *iSCSIDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { + f := config.Framework cs := f.ClientSet ns := f.Namespace - config, serverPod, serverIP := framework.NewISCSIServer(cs, ns.Name) - i.driverInfo.Config.ServerConfig = &config - return &iSCSITestResource{ + c, serverPod, serverIP := framework.NewISCSIServer(cs, ns.Name) + config.ServerConfig = &c + return &iSCSIVolume{ serverPod: serverPod, serverIP: serverIP, + f: f, } } -func (i *iSCSIDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { - f := i.driverInfo.Config.Framework - - itr, ok := testResource.(*iSCSITestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to iSCSI Test Resource") - - framework.CleanUpVolumeServer(f, itr.serverPod) +func (v *iSCSIVolume) DeleteVolume() { + framework.CleanUpVolumeServer(v.f, v.serverPod) } // Ceph RBD @@ -453,10 +434,11 @@ type rbdDriver struct { driverInfo testsuites.DriverInfo } -type rbdTestResource struct { +type rbdVolume struct { serverPod *v1.Pod serverIP string secret *v1.Secret + f *framework.Framework } var _ testsuites.TestDriver = &rbdDriver{} @@ -465,7 +447,7 @@ var _ testsuites.InlineVolumeTestDriver = &rbdDriver{} var _ testsuites.PreprovisionedPVTestDriver = &rbdDriver{} // InitRbdDriver returns rbdDriver that implements TestDriver interface -func InitRbdDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitRbdDriver() testsuites.TestDriver { return &rbdDriver{ driverInfo: testsuites.DriverInfo{ Name: "rbd", @@ -484,8 +466,6 @@ func InitRbdDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapBlock: true, testsuites.CapExec: true, }, - - Config: config, }, } } @@ -497,18 +477,18 @@ func (r *rbdDriver) GetDriverInfo() *testsuites.DriverInfo { func (r *rbdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { - rtr, ok := testResource.(*rbdTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to RBD Test Resource") +func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { + rv, ok := volume.(*rbdVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to RBD test volume") volSource := v1.VolumeSource{ RBD: &v1.RBDVolumeSource{ - CephMonitors: []string{rtr.serverIP}, + CephMonitors: []string{rv.serverIP}, RBDPool: "rbd", RBDImage: "foo", RadosUser: "admin", SecretRef: &v1.LocalObjectReference{ - Name: rtr.secret.Name, + Name: rv.secret.Name, }, ReadOnly: readOnly, }, @@ -519,21 +499,21 @@ func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, testResource i return &volSource } -func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - f := r.driverInfo.Config.Framework - ns := f.Namespace +func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + rv, ok := volume.(*rbdVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to RBD test volume") - rtr, ok := testResource.(*rbdTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to RBD Test Resource") + f := rv.f + ns := f.Namespace pvSource := v1.PersistentVolumeSource{ RBD: &v1.RBDPersistentVolumeSource{ - CephMonitors: []string{rtr.serverIP}, + CephMonitors: []string{rv.serverIP}, RBDPool: "rbd", RBDImage: "foo", RadosUser: "admin", SecretRef: &v1.SecretReference{ - Name: rtr.secret.Name, + Name: rv.secret.Name, Namespace: ns.Name, }, ReadOnly: readOnly, @@ -545,33 +525,31 @@ func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, test return &pvSource, nil } -func (r *rbdDriver) CreateDriver() { +func (r *rbdDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: r, + Prefix: "rbd", + Framework: f, + }, func() {} } -func (r *rbdDriver) CleanupDriver() { -} - -func (r *rbdDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { - f := r.driverInfo.Config.Framework +func (r *rbdDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { + f := config.Framework cs := f.ClientSet ns := f.Namespace - config, serverPod, secret, serverIP := framework.NewRBDServer(cs, ns.Name) - r.driverInfo.Config.ServerConfig = &config - return &rbdTestResource{ + c, serverPod, secret, serverIP := framework.NewRBDServer(cs, ns.Name) + config.ServerConfig = &c + return &rbdVolume{ serverPod: serverPod, serverIP: serverIP, secret: secret, + f: f, } } -func (r *rbdDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { - f := r.driverInfo.Config.Framework - - rtr, ok := testResource.(*rbdTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to RBD Test Resource") - - framework.CleanUpVolumeServerWithSecret(f, rtr.serverPod, rtr.secret) +func (v *rbdVolume) DeleteVolume() { + framework.CleanUpVolumeServerWithSecret(v.f, v.serverPod, v.secret) } // Ceph @@ -583,10 +561,11 @@ type cephFSDriver struct { driverInfo testsuites.DriverInfo } -type cephTestResource struct { +type cephVolume struct { serverPod *v1.Pod serverIP string secret *v1.Secret + f *framework.Framework } var _ testsuites.TestDriver = &cephFSDriver{} @@ -595,7 +574,7 @@ var _ testsuites.InlineVolumeTestDriver = &cephFSDriver{} var _ testsuites.PreprovisionedPVTestDriver = &cephFSDriver{} // InitCephFSDriver returns cephFSDriver that implements TestDriver interface -func InitCephFSDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitCephFSDriver() testsuites.TestDriver { return &cephFSDriver{ driverInfo: testsuites.DriverInfo{ Name: "ceph", @@ -608,8 +587,6 @@ func InitCephFSDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapPersistence: true, testsuites.CapExec: true, }, - - Config: config, }, } } @@ -621,35 +598,34 @@ func (c *cephFSDriver) GetDriverInfo() *testsuites.DriverInfo { func (c *cephFSDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { - ctr, ok := testResource.(*cephTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Ceph Test Resource") +func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { + cv, ok := volume.(*cephVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to Ceph test volume") return &v1.VolumeSource{ CephFS: &v1.CephFSVolumeSource{ - Monitors: []string{ctr.serverIP + ":6789"}, + Monitors: []string{cv.serverIP + ":6789"}, User: "kube", SecretRef: &v1.LocalObjectReference{ - Name: ctr.secret.Name, + Name: cv.secret.Name, }, ReadOnly: readOnly, }, } } -func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - f := c.driverInfo.Config.Framework - ns := f.Namespace +func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + cv, ok := volume.(*cephVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to Ceph test volume") - ctr, ok := testResource.(*cephTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Ceph Test Resource") + ns := cv.f.Namespace return &v1.PersistentVolumeSource{ CephFS: &v1.CephFSPersistentVolumeSource{ - Monitors: []string{ctr.serverIP + ":6789"}, + Monitors: []string{cv.serverIP + ":6789"}, User: "kube", SecretRef: &v1.SecretReference{ - Name: ctr.secret.Name, + Name: cv.secret.Name, Namespace: ns.Name, }, ReadOnly: readOnly, @@ -657,33 +633,31 @@ func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, t }, nil } -func (c *cephFSDriver) CreateDriver() { +func (c *cephFSDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: c, + Prefix: "cephfs", + Framework: f, + }, func() {} } -func (c *cephFSDriver) CleanupDriver() { -} - -func (c *cephFSDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { - f := c.driverInfo.Config.Framework +func (c *cephFSDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { + f := config.Framework cs := f.ClientSet ns := f.Namespace - config, serverPod, secret, serverIP := framework.NewRBDServer(cs, ns.Name) - c.driverInfo.Config.ServerConfig = &config - return &cephTestResource{ + cfg, serverPod, secret, serverIP := framework.NewRBDServer(cs, ns.Name) + config.ServerConfig = &cfg + return &cephVolume{ serverPod: serverPod, serverIP: serverIP, secret: secret, + f: f, } } -func (c *cephFSDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { - f := c.driverInfo.Config.Framework - - ctr, ok := testResource.(*cephTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Ceph Test Resource") - - framework.CleanUpVolumeServerWithSecret(f, ctr.serverPod, ctr.secret) +func (v *cephVolume) DeleteVolume() { + framework.CleanUpVolumeServerWithSecret(v.f, v.serverPod, v.secret) } // Hostpath @@ -698,7 +672,7 @@ var _ testsuites.PreprovisionedVolumeTestDriver = &hostPathDriver{} var _ testsuites.InlineVolumeTestDriver = &hostPathDriver{} // InitHostPathDriver returns hostPathDriver that implements TestDriver interface -func InitHostPathDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitHostPathDriver() testsuites.TestDriver { return &hostPathDriver{ driverInfo: testsuites.DriverInfo{ Name: "hostPath", @@ -709,8 +683,6 @@ func InitHostPathDriver(config testsuites.TestConfig) testsuites.TestDriver { Capabilities: map[testsuites.Capability]bool{ testsuites.CapPersistence: true, }, - - Config: config, }, } } @@ -722,7 +694,7 @@ func (h *hostPathDriver) GetDriverInfo() *testsuites.DriverInfo { func (h *hostPathDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (h *hostPathDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { +func (h *hostPathDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { // hostPath doesn't support readOnly volume if readOnly { return nil @@ -734,26 +706,25 @@ func (h *hostPathDriver) GetVolumeSource(readOnly bool, fsType string, testResou } } -func (h *hostPathDriver) CreateDriver() { +func (h *hostPathDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: h, + Prefix: "hostpath", + Framework: f, + }, func() {} } -func (h *hostPathDriver) CleanupDriver() { -} - -func (h *hostPathDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { - f := h.driverInfo.Config.Framework +func (h *hostPathDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { + f := config.Framework cs := f.ClientSet // pods should be scheduled on the node nodes := framework.GetReadySchedulableNodesOrDie(cs) node := nodes.Items[rand.Intn(len(nodes.Items))] - h.driverInfo.Config.ClientNodeName = node.Name + config.ClientNodeName = node.Name return nil } -func (h *hostPathDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { -} - // HostPathSymlink type hostPathSymlinkDriver struct { node v1.Node @@ -761,10 +732,11 @@ type hostPathSymlinkDriver struct { driverInfo testsuites.DriverInfo } -type hostPathSymlinkTestResource struct { +type hostPathSymlinkVolume struct { targetPath string sourcePath string prepPod *v1.Pod + f *framework.Framework } var _ testsuites.TestDriver = &hostPathSymlinkDriver{} @@ -772,7 +744,7 @@ var _ testsuites.PreprovisionedVolumeTestDriver = &hostPathSymlinkDriver{} var _ testsuites.InlineVolumeTestDriver = &hostPathSymlinkDriver{} // InitHostPathSymlinkDriver returns hostPathSymlinkDriver that implements TestDriver interface -func InitHostPathSymlinkDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitHostPathSymlinkDriver() testsuites.TestDriver { return &hostPathSymlinkDriver{ driverInfo: testsuites.DriverInfo{ Name: "hostPathSymlink", @@ -783,8 +755,6 @@ func InitHostPathSymlinkDriver(config testsuites.TestConfig) testsuites.TestDriv Capabilities: map[testsuites.Capability]bool{ testsuites.CapPersistence: true, }, - - Config: config, }, } } @@ -796,9 +766,9 @@ func (h *hostPathSymlinkDriver) GetDriverInfo() *testsuites.DriverInfo { func (h *hostPathSymlinkDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { - htr, ok := testResource.(*hostPathSymlinkTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Hostpath Symlink Test Resource") +func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { + hv, ok := volume.(*hostPathSymlinkVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to Hostpath Symlink test volume") // hostPathSymlink doesn't support readOnly volume if readOnly { @@ -806,19 +776,21 @@ func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, te } return &v1.VolumeSource{ HostPath: &v1.HostPathVolumeSource{ - Path: htr.targetPath, + Path: hv.targetPath, }, } } -func (h *hostPathSymlinkDriver) CreateDriver() { +func (h *hostPathSymlinkDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: h, + Prefix: "hostpathsymlink", + Framework: f, + }, func() {} } -func (h *hostPathSymlinkDriver) CleanupDriver() { -} - -func (h *hostPathSymlinkDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { - f := h.driverInfo.Config.Framework +func (h *hostPathSymlinkDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { + f := config.Framework cs := f.ClientSet sourcePath := fmt.Sprintf("/tmp/%v", f.Namespace.Name) @@ -828,7 +800,7 @@ func (h *hostPathSymlinkDriver) CreateVolume(volType testpatterns.TestVolType) i // pods should be scheduled on the node nodes := framework.GetReadySchedulableNodesOrDie(cs) node := nodes.Items[rand.Intn(len(nodes.Items))] - h.driverInfo.Config.ClientNodeName = node.Name + config.ClientNodeName = node.Name cmd := fmt.Sprintf("mkdir %v -m 777 && ln -s %v %v", sourcePath, sourcePath, targetPath) privileged := true @@ -878,23 +850,21 @@ func (h *hostPathSymlinkDriver) CreateVolume(volType testpatterns.TestVolType) i err = framework.DeletePodWithWait(f, f.ClientSet, pod) Expect(err).ToNot(HaveOccurred(), "while deleting hostPath init pod") - return &hostPathSymlinkTestResource{ + return &hostPathSymlinkVolume{ sourcePath: sourcePath, targetPath: targetPath, prepPod: prepPod, + f: f, } } -func (h *hostPathSymlinkDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { - f := h.driverInfo.Config.Framework +func (v *hostPathSymlinkVolume) DeleteVolume() { + f := v.f - htr, ok := testResource.(*hostPathSymlinkTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Hostpath Symlink Test Resource") + cmd := fmt.Sprintf("rm -rf %v&& rm -rf %v", v.targetPath, v.sourcePath) + v.prepPod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", cmd} - cmd := fmt.Sprintf("rm -rf %v&& rm -rf %v", htr.targetPath, htr.sourcePath) - htr.prepPod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", cmd} - - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(htr.prepPod) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(v.prepPod) Expect(err).ToNot(HaveOccurred(), "while creating hostPath teardown pod") err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) @@ -914,7 +884,7 @@ var _ testsuites.PreprovisionedVolumeTestDriver = &emptydirDriver{} var _ testsuites.InlineVolumeTestDriver = &emptydirDriver{} // InitEmptydirDriver returns emptydirDriver that implements TestDriver interface -func InitEmptydirDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitEmptydirDriver() testsuites.TestDriver { return &emptydirDriver{ driverInfo: testsuites.DriverInfo{ Name: "emptydir", @@ -925,8 +895,6 @@ func InitEmptydirDriver(config testsuites.TestConfig) testsuites.TestDriver { Capabilities: map[testsuites.Capability]bool{ testsuites.CapExec: true, }, - - Config: config, }, } } @@ -938,7 +906,7 @@ func (e *emptydirDriver) GetDriverInfo() *testsuites.DriverInfo { func (e *emptydirDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (e *emptydirDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { +func (e *emptydirDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { // emptydir doesn't support readOnly volume if readOnly { return nil @@ -948,17 +916,16 @@ func (e *emptydirDriver) GetVolumeSource(readOnly bool, fsType string, testResou } } -func (e *emptydirDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { +func (e *emptydirDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { return nil } -func (e *emptydirDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { -} - -func (e *emptydirDriver) CreateDriver() { -} - -func (e *emptydirDriver) CleanupDriver() { +func (e *emptydirDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: e, + Prefix: "emptydir", + Framework: f, + }, func() {} } // Cinder @@ -970,7 +937,7 @@ type cinderDriver struct { driverInfo testsuites.DriverInfo } -type cinderTestResource struct { +type cinderVolume struct { volumeName string volumeID string } @@ -982,7 +949,7 @@ var _ testsuites.PreprovisionedPVTestDriver = &cinderDriver{} var _ testsuites.DynamicPVTestDriver = &cinderDriver{} // InitCinderDriver returns cinderDriver that implements TestDriver interface -func InitCinderDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitCinderDriver() testsuites.TestDriver { return &cinderDriver{ driverInfo: testsuites.DriverInfo{ Name: "cinder", @@ -996,8 +963,6 @@ func InitCinderDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapFsGroup: true, testsuites.CapExec: true, }, - - Config: config, }, } } @@ -1010,13 +975,13 @@ func (c *cinderDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { framework.SkipUnlessProviderIs("openstack") } -func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { - ctr, ok := testResource.(*cinderTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Cinder Test Resource") +func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { + cv, ok := volume.(*cinderVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to Cinder test volume") volSource := v1.VolumeSource{ Cinder: &v1.CinderVolumeSource{ - VolumeID: ctr.volumeID, + VolumeID: cv.volumeID, ReadOnly: readOnly, }, } @@ -1026,13 +991,13 @@ func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, testResourc return &volSource } -func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - ctr, ok := testResource.(*cinderTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Cinder Test Resource") +func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + cv, ok := volume.(*cinderVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to Cinder test volume") pvSource := v1.PersistentVolumeSource{ Cinder: &v1.CinderPersistentVolumeSource{ - VolumeID: ctr.volumeID, + VolumeID: cv.volumeID, ReadOnly: readOnly, }, } @@ -1042,13 +1007,13 @@ func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, t return &pvSource, nil } -func (c *cinderDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { +func (c *cinderDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := "kubernetes.io/cinder" parameters := map[string]string{} if fsType != "" { parameters["fsType"] = fsType } - ns := c.driverInfo.Config.Framework.Namespace.Name + ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-sc", c.driverInfo.Name) return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) @@ -1058,14 +1023,16 @@ func (c *cinderDriver) GetClaimSize() string { return "5Gi" } -func (c *cinderDriver) CreateDriver() { +func (c *cinderDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: c, + Prefix: "cinder", + Framework: f, + }, func() {} } -func (c *cinderDriver) CleanupDriver() { -} - -func (c *cinderDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { - f := c.driverInfo.Config.Framework +func (c *cinderDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { + f := config.Framework ns := f.Namespace // We assume that namespace.Name is a random string @@ -1095,20 +1062,15 @@ func (c *cinderDriver) CreateVolume(volType testpatterns.TestVolType) interface{ } framework.Logf("Volume ID: %s", volumeID) Expect(volumeID).NotTo(Equal("")) - return &cinderTestResource{ + return &cinderVolume{ volumeName: volumeName, volumeID: volumeID, } } -func (c *cinderDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { - ctr, ok := testResource.(*cinderTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Cinder Test Resource") +func (v *cinderVolume) DeleteVolume() { + name := v.volumeName - deleteCinderVolume(ctr.volumeName) -} - -func deleteCinderVolume(name string) error { // Try to delete the volume for several seconds - it takes // a while for the plugin to detach it. var output []byte @@ -1120,12 +1082,11 @@ func deleteCinderVolume(name string) error { output, err = exec.Command("cinder", "delete", name).CombinedOutput() if err == nil { framework.Logf("Cinder volume %s deleted", name) - return nil + return } framework.Logf("Failed to delete volume %s: %v", name, err) } framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:])) - return err } // GCE @@ -1133,7 +1094,7 @@ type gcePdDriver struct { driverInfo testsuites.DriverInfo } -type gcePdTestResource struct { +type gcePdVolume struct { volumeName string } @@ -1144,7 +1105,7 @@ var _ testsuites.PreprovisionedPVTestDriver = &gcePdDriver{} var _ testsuites.DynamicPVTestDriver = &gcePdDriver{} // InitGceDriver returns gcePdDriver that implements TestDriver interface -func InitGcePdDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitGcePdDriver() testsuites.TestDriver { return &gcePdDriver{ driverInfo: testsuites.DriverInfo{ Name: "gcepd", @@ -1163,8 +1124,6 @@ func InitGcePdDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapBlock: true, testsuites.CapExec: true, }, - - Config: config, }, } } @@ -1177,12 +1136,12 @@ func (g *gcePdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { framework.SkipUnlessProviderIs("gce", "gke") } -func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { - gtr, ok := testResource.(*gcePdTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to GCE PD Test Resource") +func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { + gv, ok := volume.(*gcePdVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to GCE PD test volume") volSource := v1.VolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ - PDName: gtr.volumeName, + PDName: gv.volumeName, ReadOnly: readOnly, }, } @@ -1192,12 +1151,12 @@ func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, testResource return &volSource } -func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - gtr, ok := testResource.(*gcePdTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to GCE PD Test Resource") +func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + gv, ok := volume.(*gcePdVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to GCE PD test volume") pvSource := v1.PersistentVolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ - PDName: gtr.volumeName, + PDName: gv.volumeName, ReadOnly: readOnly, }, } @@ -1207,13 +1166,13 @@ func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, te return &pvSource, nil } -func (g *gcePdDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { +func (g *gcePdDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := "kubernetes.io/gce-pd" parameters := map[string]string{} if fsType != "" { parameters["fsType"] = fsType } - ns := g.driverInfo.Config.Framework.Namespace.Name + ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name) return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) @@ -1223,32 +1182,32 @@ func (h *gcePdDriver) GetClaimSize() string { return "5Gi" } -func (g *gcePdDriver) CreateDriver() { +func (g *gcePdDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: g, + Prefix: "gcepd", + Framework: f, + }, func() {} } -func (g *gcePdDriver) CleanupDriver() { -} - -func (g *gcePdDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { +func (g *gcePdDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { if volType == testpatterns.InlineVolume { // PD will be created in framework.TestContext.CloudConfig.Zone zone, // so pods should be also scheduled there. - g.driverInfo.Config.ClientNodeSelector = map[string]string{ + config.ClientNodeSelector = map[string]string{ v1.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone, } } By("creating a test gce pd volume") vname, err := framework.CreatePDWithRetry() Expect(err).NotTo(HaveOccurred()) - return &gcePdTestResource{ + return &gcePdVolume{ volumeName: vname, } } -func (g *gcePdDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { - gtr, ok := testResource.(*gcePdTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to GCE PD Test Resource") - framework.DeletePDWithRetry(gtr.volumeName) +func (v *gcePdVolume) DeleteVolume() { + framework.DeletePDWithRetry(v.volumeName) } // vSphere @@ -1256,7 +1215,7 @@ type vSphereDriver struct { driverInfo testsuites.DriverInfo } -type vSphereTestResource struct { +type vSphereVolume struct { volumePath string nodeInfo *vspheretest.NodeInfo } @@ -1268,7 +1227,7 @@ var _ testsuites.PreprovisionedPVTestDriver = &vSphereDriver{} var _ testsuites.DynamicPVTestDriver = &vSphereDriver{} // InitVSphereDriver returns vSphereDriver that implements TestDriver interface -func InitVSphereDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitVSphereDriver() testsuites.TestDriver { return &vSphereDriver{ driverInfo: testsuites.DriverInfo{ Name: "vSphere", @@ -1282,8 +1241,6 @@ func InitVSphereDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapFsGroup: true, testsuites.CapExec: true, }, - - Config: config, }, } } @@ -1295,9 +1252,9 @@ func (v *vSphereDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { framework.SkipUnlessProviderIs("vsphere") } -func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { - vtr, ok := testResource.(*vSphereTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to vSphere Test Resource") +func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { + vsv, ok := volume.(*vSphereVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to vSphere test volume") // vSphere driver doesn't seem to support readOnly volume // TODO: check if it is correct @@ -1306,7 +1263,7 @@ func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, testResour } volSource := v1.VolumeSource{ VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{ - VolumePath: vtr.volumePath, + VolumePath: vsv.volumePath, }, } if fsType != "" { @@ -1315,9 +1272,9 @@ func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, testResour return &volSource } -func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - vtr, ok := testResource.(*vSphereTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to vSphere Test Resource") +func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + vsv, ok := volume.(*vSphereVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to vSphere test volume") // vSphere driver doesn't seem to support readOnly volume // TODO: check if it is correct @@ -1326,7 +1283,7 @@ func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, } pvSource := v1.PersistentVolumeSource{ VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{ - VolumePath: vtr.volumePath, + VolumePath: vsv.volumePath, }, } if fsType != "" { @@ -1335,13 +1292,13 @@ func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, return &pvSource, nil } -func (v *vSphereDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { +func (v *vSphereDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := "kubernetes.io/vsphere-volume" parameters := map[string]string{} if fsType != "" { parameters["fsType"] = fsType } - ns := v.driverInfo.Config.Framework.Namespace.Name + ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-sc", v.driverInfo.Name) return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) @@ -1351,29 +1308,28 @@ func (v *vSphereDriver) GetClaimSize() string { return "5Gi" } -func (v *vSphereDriver) CreateDriver() { +func (v *vSphereDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: v, + Prefix: "vsphere", + Framework: f, + }, func() {} } -func (v *vSphereDriver) CleanupDriver() { -} - -func (v *vSphereDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { - f := v.driverInfo.Config.Framework +func (v *vSphereDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { + f := config.Framework vspheretest.Bootstrap(f) nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo() volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef) Expect(err).NotTo(HaveOccurred()) - return &vSphereTestResource{ + return &vSphereVolume{ volumePath: volumePath, nodeInfo: nodeInfo, } } -func (v *vSphereDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { - vtr, ok := testResource.(*vSphereTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to vSphere Test Resource") - - vtr.nodeInfo.VSphere.DeleteVolume(vtr.volumePath, vtr.nodeInfo.DataCenterRef) +func (v *vSphereVolume) DeleteVolume() { + v.nodeInfo.VSphere.DeleteVolume(v.volumePath, v.nodeInfo.DataCenterRef) } // Azure @@ -1381,7 +1337,7 @@ type azureDriver struct { driverInfo testsuites.DriverInfo } -type azureTestResource struct { +type azureVolume struct { volumeName string } @@ -1392,7 +1348,7 @@ var _ testsuites.PreprovisionedPVTestDriver = &azureDriver{} var _ testsuites.DynamicPVTestDriver = &azureDriver{} // InitAzureDriver returns azureDriver that implements TestDriver interface -func InitAzureDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitAzureDriver() testsuites.TestDriver { return &azureDriver{ driverInfo: testsuites.DriverInfo{ Name: "azure", @@ -1407,8 +1363,6 @@ func InitAzureDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapBlock: true, testsuites.CapExec: true, }, - - Config: config, }, } } @@ -1421,16 +1375,16 @@ func (a *azureDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { framework.SkipUnlessProviderIs("azure") } -func (a *azureDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { - atr, ok := testResource.(*azureTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Azure Test Resource") +func (a *azureDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { + av, ok := volume.(*azureVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to Azure test volume") - diskName := atr.volumeName[(strings.LastIndex(atr.volumeName, "/") + 1):] + diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):] volSource := v1.VolumeSource{ AzureDisk: &v1.AzureDiskVolumeSource{ DiskName: diskName, - DataDiskURI: atr.volumeName, + DataDiskURI: av.volumeName, ReadOnly: &readOnly, }, } @@ -1440,16 +1394,16 @@ func (a *azureDriver) GetVolumeSource(readOnly bool, fsType string, testResource return &volSource } -func (a *azureDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - atr, ok := testResource.(*azureTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Azure Test Resource") +func (a *azureDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + av, ok := volume.(*azureVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to Azure test volume") - diskName := atr.volumeName[(strings.LastIndex(atr.volumeName, "/") + 1):] + diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):] pvSource := v1.PersistentVolumeSource{ AzureDisk: &v1.AzureDiskVolumeSource{ DiskName: diskName, - DataDiskURI: atr.volumeName, + DataDiskURI: av.volumeName, ReadOnly: &readOnly, }, } @@ -1459,13 +1413,13 @@ func (a *azureDriver) GetPersistentVolumeSource(readOnly bool, fsType string, te return &pvSource, nil } -func (a *azureDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { +func (a *azureDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := "kubernetes.io/azure-disk" parameters := map[string]string{} if fsType != "" { parameters["fsType"] = fsType } - ns := a.driverInfo.Config.Framework.Namespace.Name + ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name) return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) @@ -1475,26 +1429,25 @@ func (a *azureDriver) GetClaimSize() string { return "5Gi" } -func (a *azureDriver) CreateDriver() { +func (a *azureDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: a, + Prefix: "azure", + Framework: f, + }, func() {} } -func (a *azureDriver) CleanupDriver() { -} - -func (a *azureDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { +func (a *azureDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { By("creating a test azure disk volume") volumeName, err := framework.CreatePDWithRetry() Expect(err).NotTo(HaveOccurred()) - return &azureTestResource{ + return &azureVolume{ volumeName: volumeName, } } -func (a *azureDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { - atr, ok := testResource.(*azureTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Azure Test Resource") - - framework.DeletePDWithRetry(atr.volumeName) +func (v *azureVolume) DeleteVolume() { + framework.DeletePDWithRetry(v.volumeName) } // AWS @@ -1513,7 +1466,7 @@ var _ testsuites.TestDriver = &awsDriver{} var _ testsuites.DynamicPVTestDriver = &awsDriver{} // InitAwsDriver returns awsDriver that implements TestDriver interface -func InitAwsDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitAwsDriver() testsuites.TestDriver { return &awsDriver{ driverInfo: testsuites.DriverInfo{ Name: "aws", @@ -1529,8 +1482,6 @@ func InitAwsDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapBlock: true, testsuites.CapExec: true, }, - - Config: config, }, } } @@ -1545,7 +1496,7 @@ func (a *awsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { // TODO: Fix authorization error in attach operation and uncomment below /* -func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { +func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { volSource := v1.VolumeSource{ AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ VolumeID: a.volumeName, @@ -1558,7 +1509,7 @@ func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, testResource i return &volSource } -func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { +func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { pvSource := v1.PersistentVolumeSource{ AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ VolumeID: a.volumeName, @@ -1572,13 +1523,13 @@ func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, test } */ -func (a *awsDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { +func (a *awsDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := "kubernetes.io/aws-ebs" parameters := map[string]string{} if fsType != "" { parameters["fsType"] = fsType } - ns := a.driverInfo.Config.Framework.Namespace.Name + ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name) return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) @@ -1588,22 +1539,24 @@ func (a *awsDriver) GetClaimSize() string { return "5Gi" } -func (a *awsDriver) CreateDriver() { -} - -func (a *awsDriver) CleanupDriver() { +func (a *awsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: a, + Prefix: "aws", + Framework: f, + }, func() {} } // TODO: Fix authorization error in attach operation and uncomment below /* -func (a *awsDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { +func (a *awsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { By("creating a test aws volume") var err error a.volumeName, err = framework.CreatePDWithRetry() Expect(err).NotTo(HaveOccurred()) } -func (a *awsDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { +DeleteVolume() { framework.DeletePDWithRetry(a.volumeName) } */ @@ -1619,6 +1572,11 @@ type localDriver struct { ltrMgr utils.LocalTestResourceManager } +type localVolume struct { + ltrMgr utils.LocalTestResourceManager + ltr *utils.LocalTestResource +} + var ( // capabilities defaultLocalVolumeCapabilities = map[testsuites.Capability]bool{ @@ -1655,7 +1613,7 @@ var _ testsuites.TestDriver = &localDriver{} var _ testsuites.PreprovisionedVolumeTestDriver = &localDriver{} var _ testsuites.PreprovisionedPVTestDriver = &localDriver{} -func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func(config testsuites.TestConfig) testsuites.TestDriver { +func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func() testsuites.TestDriver { maxFileSize := defaultLocalVolumeMaxFileSize if maxFileSizeByVolType, ok := localVolumeMaxFileSizes[volumeType]; ok { maxFileSize = maxFileSizeByVolType @@ -1668,8 +1626,7 @@ func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func(config if capabilitiesByType, ok := localVolumeCapabitilies[volumeType]; ok { capabilities = capabilitiesByType } - return func(config testsuites.TestConfig) testsuites.TestDriver { - hostExec := utils.NewHostExec(config.Framework) + return func() testsuites.TestDriver { // custom tag to distinguish from tests of other volume types featureTag := fmt.Sprintf("[LocalVolumeType: %s]", volumeType) // For GCE Local SSD volumes, we must run serially @@ -1683,11 +1640,8 @@ func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func(config MaxFileSize: maxFileSize, SupportedFsType: supportedFsTypes, Capabilities: capabilities, - Config: config, }, - hostExec: hostExec, volumeType: volumeType, - ltrMgr: utils.NewLocalResourceManager("local-driver", hostExec, "/tmp"), } } } @@ -1711,45 +1665,42 @@ func (l *localDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } } -func (l *localDriver) CreateDriver() { +func (l *localDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { // choose a randome node to test against - l.node = l.randomNode() -} - -func (l *localDriver) CleanupDriver() { - l.hostExec.Cleanup() -} - -func (l *localDriver) randomNode() *v1.Node { - f := l.driverInfo.Config.Framework nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - node := nodes.Items[rand.Intn(len(nodes.Items))] - return &node + l.node = &nodes.Items[rand.Intn(len(nodes.Items))] + + l.hostExec = utils.NewHostExec(f) + l.ltrMgr = utils.NewLocalResourceManager("local-driver", l.hostExec, "/tmp") + + return &testsuites.PerTestConfig{ + Driver: l, + Prefix: "local", + Framework: f, + ClientNodeName: l.node.Name, + }, func() { + l.hostExec.Cleanup() + } } -func (l *localDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { +func (l *localDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { switch volType { case testpatterns.PreprovisionedPV: node := l.node // assign this to schedule pod on this node - l.driverInfo.Config.ClientNodeName = node.Name - return l.ltrMgr.Create(node, l.volumeType, nil) + config.ClientNodeName = node.Name + return &localVolume{ + ltrMgr: l.ltrMgr, + ltr: l.ltrMgr.Create(node, l.volumeType, nil), + } default: framework.Failf("Unsupported volType: %v is specified", volType) } return nil } -func (l *localDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { - ltr, ok := testResource.(*utils.LocalTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to local Test Resource") - switch volType { - case testpatterns.PreprovisionedPV: - l.ltrMgr.Remove(ltr) - default: - framework.Failf("Unsupported volType: %v is specified", volType) - } - return +func (v *localVolume) DeleteVolume() { + v.ltrMgr.Remove(v.ltr) } func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity { @@ -1778,13 +1729,13 @@ func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity } } -func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - ltr, ok := testResource.(*utils.LocalTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to local Test Resource") +func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + lv, ok := volume.(*localVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to local test volume") return &v1.PersistentVolumeSource{ Local: &v1.LocalVolumeSource{ - Path: ltr.Path, + Path: lv.ltr.Path, FSType: &fsType, }, - }, l.nodeAffinityForNode(ltr.Node) + }, l.nodeAffinityForNode(lv.ltr.Node) } diff --git a/test/e2e/storage/in_tree_volumes.go b/test/e2e/storage/in_tree_volumes.go index f1b33791430..23ceaa3c860 100644 --- a/test/e2e/storage/in_tree_volumes.go +++ b/test/e2e/storage/in_tree_volumes.go @@ -18,15 +18,13 @@ package storage import ( . "github.com/onsi/ginkgo" - "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/drivers" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" ) // List of testDrivers to be executed in below loop -var testDrivers = []func(config testsuites.TestConfig) testsuites.TestDriver{ +var testDrivers = []func() testsuites.TestDriver{ drivers.InitNFSDriver, drivers.InitGlusterFSDriver, drivers.InitISCSIDriver, @@ -59,41 +57,13 @@ var testSuites = []func() testsuites.TestSuite{ testsuites.InitProvisioningTestSuite, } -func intreeTunePattern(patterns []testpatterns.TestPattern) []testpatterns.TestPattern { - return patterns -} - // This executes testSuites for in-tree volumes. var _ = utils.SIGDescribe("In-tree Volumes", func() { - f := framework.NewDefaultFramework("volumes") - - var ( - // Common configuration options for all drivers. - config = testsuites.TestConfig{ - Framework: f, - Prefix: "in-tree", - } - ) - for _, initDriver := range testDrivers { - curDriver := initDriver(config) - curConfig := curDriver.GetDriverInfo().Config + curDriver := initDriver() + Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() { - BeforeEach(func() { - // Reset config. The driver might have modified its copy - // in a previous test. - curDriver.GetDriverInfo().Config = curConfig - - // setupDriver - curDriver.CreateDriver() - }) - - AfterEach(func() { - // Cleanup driver - curDriver.CleanupDriver() - }) - - testsuites.RunTestSuite(f, curDriver, testSuites, intreeTunePattern) + testsuites.DefineTestSuite(curDriver, testSuites) }) } }) diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index b58a04568ce..5cf05f9816a 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -143,10 +143,11 @@ func testVolumeProvisioning(c clientset.Interface, ns string) { } for _, test := range tests { - class := newStorageClass(test, ns, "" /* suffix */) - claim := newClaim(test, ns, "" /* suffix */) - claim.Spec.StorageClassName = &class.Name - testsuites.TestDynamicProvisioning(test, c, claim, class) + test.Client = c + test.Class = newStorageClass(test, ns, "" /* suffix */) + test.Claim = newClaim(test, ns, "" /* suffix */) + test.Claim.Spec.StorageClassName = &test.Class.Name + test.TestDynamicProvisioning() } } @@ -301,6 +302,7 @@ func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string) func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int) { test := testsuites.StorageClassTest{ + Client: c, Name: "Regional PD storage class with waitForFirstConsumer test on GCE", Provisioner: "kubernetes.io/gce-pd", Parameters: map[string]string{ @@ -312,14 +314,14 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int) } suffix := "delayed-regional" - class := newStorageClass(test, ns, suffix) + test.Class = newStorageClass(test, ns, suffix) var claims []*v1.PersistentVolumeClaim for i := 0; i < pvcCount; i++ { claim := newClaim(test, ns, suffix) - claim.Spec.StorageClassName = &class.Name + claim.Spec.StorageClassName = &test.Class.Name claims = append(claims, claim) } - pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */) + pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */) if node == nil { framework.Failf("unexpected nil node found") } @@ -345,17 +347,20 @@ func testRegionalAllowedTopologies(c clientset.Interface, ns string) { } suffix := "topo-regional" - class := newStorageClass(test, ns, suffix) + test.Client = c + test.Class = newStorageClass(test, ns, suffix) zones := getTwoRandomZones(c) - addAllowedTopologiesToStorageClass(c, class, zones) - claim := newClaim(test, ns, suffix) - claim.Spec.StorageClassName = &class.Name - pv := testsuites.TestDynamicProvisioning(test, c, claim, class) + addAllowedTopologiesToStorageClass(c, test.Class, zones) + test.Claim = newClaim(test, ns, suffix) + test.Claim.Spec.StorageClassName = &test.Class.Name + + pv := test.TestDynamicProvisioning() checkZonesFromLabelAndAffinity(pv, sets.NewString(zones...), true) } func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns string, pvcCount int) { test := testsuites.StorageClassTest{ + Client: c, Name: "Regional PD storage class with allowedTopologies and waitForFirstConsumer test on GCE", Provisioner: "kubernetes.io/gce-pd", Parameters: map[string]string{ @@ -367,16 +372,16 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s } suffix := "topo-delayed-regional" - class := newStorageClass(test, ns, suffix) + test.Class = newStorageClass(test, ns, suffix) topoZones := getTwoRandomZones(c) - addAllowedTopologiesToStorageClass(c, class, topoZones) + addAllowedTopologiesToStorageClass(c, test.Class, topoZones) var claims []*v1.PersistentVolumeClaim for i := 0; i < pvcCount; i++ { claim := newClaim(test, ns, suffix) - claim.Spec.StorageClassName = &class.Name + claim.Spec.StorageClassName = &test.Class.Name claims = append(claims, claim) } - pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */) + pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */) if node == nil { framework.Failf("unexpected nil node found") } diff --git a/test/e2e/storage/testsuites/BUILD b/test/e2e/storage/testsuites/BUILD index 9a692f0a435..e6a0221a404 100644 --- a/test/e2e/storage/testsuites/BUILD +++ b/test/e2e/storage/testsuites/BUILD @@ -31,6 +31,7 @@ go_library( "//staging/src/k8s.io/client-go/dynamic:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/podlogs:go_default_library", "//test/e2e/storage/testpatterns:go_default_library", "//test/e2e/storage/utils:go_default_library", "//test/utils/image:go_default_library", diff --git a/test/e2e/storage/testsuites/base.go b/test/e2e/storage/testsuites/base.go index 75425b98d33..85dd8347c6f 100644 --- a/test/e2e/storage/testsuites/base.go +++ b/test/e2e/storage/testsuites/base.go @@ -17,7 +17,9 @@ limitations under the License. package testsuites import ( + "context" "fmt" + "regexp" "time" . "github.com/onsi/ginkgo" @@ -32,6 +34,7 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/podlogs" "k8s.io/kubernetes/test/e2e/storage/testpatterns" ) @@ -39,10 +42,10 @@ import ( type TestSuite interface { // getTestSuiteInfo returns the TestSuiteInfo for this TestSuite getTestSuiteInfo() TestSuiteInfo - // skipUnsupportedTest skips the test if this TestSuite is not suitable to be tested with the combination of TestPattern and TestDriver - skipUnsupportedTest(testpatterns.TestPattern, TestDriver) - // execTest executes test of the testpattern for the driver - execTest(TestDriver, testpatterns.TestPattern) + // defineTest defines tests of the testpattern for the driver. + // Called inside a Ginkgo context that reflects the current driver and test pattern, + // so the test suite can define tests directly with ginkgo.It. + defineTests(TestDriver, testpatterns.TestPattern) } // TestSuiteInfo represents a set of parameters for TestSuite @@ -54,11 +57,8 @@ type TestSuiteInfo struct { // TestResource represents an interface for resources that is used by TestSuite type TestResource interface { - // setupResource sets up test resources to be used for the tests with the - // combination of TestDriver and TestPattern - setupResource(TestDriver, testpatterns.TestPattern) - // cleanupResource clean up the test resources created in SetupResource - cleanupResource(TestDriver, testpatterns.TestPattern) + // cleanupResource cleans up the test resources created when setting up the resource + cleanupResource() } func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string { @@ -66,27 +66,34 @@ func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string { return fmt.Sprintf("[Testpattern: %s]%s %s%s", pattern.Name, pattern.FeatureTag, tsInfo.name, tsInfo.featureTag) } -// RunTestSuite runs all testpatterns of all testSuites for a driver -func RunTestSuite(f *framework.Framework, driver TestDriver, tsInits []func() TestSuite, tunePatternFunc func([]testpatterns.TestPattern) []testpatterns.TestPattern) { +// DefineTestSuite defines tests for all testpatterns and all testSuites for a driver +func DefineTestSuite(driver TestDriver, tsInits []func() TestSuite) { for _, testSuiteInit := range tsInits { suite := testSuiteInit() - patterns := tunePatternFunc(suite.getTestSuiteInfo().testPatterns) - - for _, pattern := range patterns { - suite.execTest(driver, pattern) + for _, pattern := range suite.getTestSuiteInfo().testPatterns { + p := pattern + Context(getTestNameStr(suite, p), func() { + BeforeEach(func() { + // Skip unsupported tests to avoid unnecessary resource initialization + skipUnsupportedTest(driver, p) + }) + suite.defineTests(driver, p) + }) } } } -// skipUnsupportedTest will skip tests if the combination of driver, testsuite, and testpattern +// skipUnsupportedTest will skip tests if the combination of driver, and testpattern // is not suitable to be tested. // Whether it needs to be skipped is checked by following steps: // 1. Check if Whether SnapshotType is supported by driver from its interface // 2. Check if Whether volType is supported by driver from its interface // 3. Check if fsType is supported // 4. Check with driver specific logic -// 5. Check with testSuite specific logic -func skipUnsupportedTest(suite TestSuite, driver TestDriver, pattern testpatterns.TestPattern) { +// +// Test suites can also skip tests inside their own defineTests function or in +// individual tests. +func skipUnsupportedTest(driver TestDriver, pattern testpatterns.TestPattern) { dInfo := driver.GetDriverInfo() var isSupported bool @@ -130,9 +137,6 @@ func skipUnsupportedTest(suite TestSuite, driver TestDriver, pattern testpattern // 4. Check with driver specific logic driver.SkipUnsupportedTest(pattern) - - // 5. Check with testSuite specific logic - suite.skipUnsupportedTest(pattern, driver) } // genericVolumeTestResource is a generic implementation of TestResource that wil be able to @@ -141,40 +145,45 @@ func skipUnsupportedTest(suite TestSuite, driver TestDriver, pattern testpattern // Also, see subpath.go in the same directory for how to extend and use it. type genericVolumeTestResource struct { driver TestDriver + config *PerTestConfig + pattern testpatterns.TestPattern volType string volSource *v1.VolumeSource pvc *v1.PersistentVolumeClaim pv *v1.PersistentVolume sc *storagev1.StorageClass - driverTestResource interface{} + volume TestVolume } var _ TestResource = &genericVolumeTestResource{} -// setupResource sets up genericVolumeTestResource -func (r *genericVolumeTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) { - r.driver = driver +func createGenericVolumeTestResource(driver TestDriver, config *PerTestConfig, pattern testpatterns.TestPattern) *genericVolumeTestResource { + r := genericVolumeTestResource{ + driver: driver, + config: config, + pattern: pattern, + } dInfo := driver.GetDriverInfo() - f := dInfo.Config.Framework + f := config.Framework cs := f.ClientSet fsType := pattern.FsType volType := pattern.VolType // Create volume for pre-provisioned volume tests - r.driverTestResource = CreateVolume(driver, volType) + r.volume = CreateVolume(driver, config, volType) switch volType { case testpatterns.InlineVolume: framework.Logf("Creating resource for inline volume") if iDriver, ok := driver.(InlineVolumeTestDriver); ok { - r.volSource = iDriver.GetVolumeSource(false, fsType, r.driverTestResource) + r.volSource = iDriver.GetVolumeSource(false, fsType, r.volume) r.volType = dInfo.Name } case testpatterns.PreprovisionedPV: framework.Logf("Creating resource for pre-provisioned PV") if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok { - pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, fsType, r.driverTestResource) + pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, fsType, r.volume) if pvSource != nil { r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPV(f, dInfo.Name, pvSource, volumeNodeAffinity, false) } @@ -184,7 +193,7 @@ func (r *genericVolumeTestResource) setupResource(driver TestDriver, pattern tes framework.Logf("Creating resource for dynamic PV") if dDriver, ok := driver.(DynamicPVTestDriver); ok { claimSize := dDriver.GetClaimSize() - r.sc = dDriver.GetDynamicProvisionStorageClass(fsType) + r.sc = dDriver.GetDynamicProvisionStorageClass(r.config, fsType) By("creating a StorageClass " + r.sc.Name) var err error @@ -204,13 +213,14 @@ func (r *genericVolumeTestResource) setupResource(driver TestDriver, pattern tes if r.volSource == nil { framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, volType) } + + return &r } // cleanupResource cleans up genericVolumeTestResource -func (r *genericVolumeTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) { - dInfo := driver.GetDriverInfo() - f := dInfo.Config.Framework - volType := pattern.VolType +func (r *genericVolumeTestResource) cleanupResource() { + f := r.config.Framework + volType := r.pattern.VolType if r.pvc != nil || r.pv != nil { switch volType { @@ -241,7 +251,9 @@ func (r *genericVolumeTestResource) cleanupResource(driver TestDriver, pattern t } // Cleanup volume for pre-provisioned volume tests - DeleteVolume(driver, volType, r.driverTestResource) + if r.volume != nil { + r.volume.DeleteVolume() + } } func createVolumeSourceWithPVCPV( @@ -354,7 +366,7 @@ func deleteStorageClass(cs clientset.Interface, className string) { // the testsuites package whereas framework.VolumeTestConfig is merely // an implementation detail. It contains fields that have no effect, // which makes it unsuitable for use in the testsuits public API. -func convertTestConfig(in *TestConfig) framework.VolumeTestConfig { +func convertTestConfig(in *PerTestConfig) framework.VolumeTestConfig { if in.ServerConfig != nil { return *in.ServerConfig } @@ -388,3 +400,42 @@ func getSnapshot(claimName string, ns, snapshotClassName string) *unstructured.U return snapshot } + +// StartPodLogs begins capturing log output and events from current +// and future pods running in the namespace of the framework. That +// ends when the returned cleanup function is called. +// +// The output goes to log files (when using --report-dir, as in the +// CI) or the output stream (otherwise). +func StartPodLogs(f *framework.Framework) func() { + ctx, cancel := context.WithCancel(context.Background()) + cs := f.ClientSet + ns := f.Namespace + + to := podlogs.LogOutput{ + StatusWriter: GinkgoWriter, + } + if framework.TestContext.ReportDir == "" { + to.LogWriter = GinkgoWriter + } else { + test := CurrentGinkgoTestDescription() + reg := regexp.MustCompile("[^a-zA-Z0-9_-]+") + // We end the prefix with a slash to ensure that all logs + // end up in a directory named after the current test. + // + // TODO: use a deeper directory hierarchy once gubernator + // supports that (https://github.com/kubernetes/test-infra/issues/10289). + to.LogPathPrefix = framework.TestContext.ReportDir + "/" + + reg.ReplaceAllString(test.FullTestText, "_") + "/" + } + podlogs.CopyAllLogs(ctx, cs, ns.Name, to) + + // pod events are something that the framework already collects itself + // after a failed test. Logging them live is only useful for interactive + // debugging, not when we collect reports. + if framework.TestContext.ReportDir == "" { + podlogs.WatchPods(ctx, cs, ns.Name, GinkgoWriter) + } + + return cancel +} diff --git a/test/e2e/storage/testsuites/driveroperations.go b/test/e2e/storage/testsuites/driveroperations.go index e0adbc06007..d17b3619bfc 100644 --- a/test/e2e/storage/testsuites/driveroperations.go +++ b/test/e2e/storage/testsuites/driveroperations.go @@ -37,13 +37,13 @@ func GetDriverNameWithFeatureTags(driver TestDriver) string { } // CreateVolume creates volume for test unless dynamicPV test -func CreateVolume(driver TestDriver, volType testpatterns.TestVolType) interface{} { +func CreateVolume(driver TestDriver, config *PerTestConfig, volType testpatterns.TestVolType) TestVolume { switch volType { case testpatterns.InlineVolume: fallthrough case testpatterns.PreprovisionedPV: if pDriver, ok := driver.(PreprovisionedVolumeTestDriver); ok { - return pDriver.CreateVolume(volType) + return pDriver.CreateVolume(config, volType) } case testpatterns.DynamicPV: // No need to create volume @@ -53,22 +53,6 @@ func CreateVolume(driver TestDriver, volType testpatterns.TestVolType) interface return nil } -// DeleteVolume deletes volume for test unless dynamicPV test -func DeleteVolume(driver TestDriver, volType testpatterns.TestVolType, testResource interface{}) { - switch volType { - case testpatterns.InlineVolume: - fallthrough - case testpatterns.PreprovisionedPV: - if pDriver, ok := driver.(PreprovisionedVolumeTestDriver); ok { - pDriver.DeleteVolume(volType, testResource) - } - case testpatterns.DynamicPV: - // No need to delete volume - default: - framework.Failf("Invalid volType specified: %v", volType) - } -} - // GetStorageClass constructs a new StorageClass instance // with a unique name that is based on namespace + suffix. func GetStorageClass( @@ -119,8 +103,3 @@ func GetSnapshotClass( return snapshotClass } - -// GetUniqueDriverName returns unique driver name that can be used parallelly in tests -func GetUniqueDriverName(driver TestDriver) string { - return fmt.Sprintf("%s-%s", driver.GetDriverInfo().Name, driver.GetDriverInfo().Config.Framework.UniqueName) -} diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 5478042385e..9bca03aa78d 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -41,6 +41,9 @@ import ( // StorageClassTest represents parameters to be used by provisioning tests. // Not all parameters are used by all tests. type StorageClassTest struct { + Client clientset.Interface + Claim *v1.PersistentVolumeClaim + Class *storage.StorageClass Name string CloudProviders []string Provisioner string @@ -76,183 +79,162 @@ func (p *provisioningTestSuite) getTestSuiteInfo() TestSuiteInfo { return p.tsInfo } -func (p *provisioningTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) { -} +func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { + type local struct { + config *PerTestConfig + testCleanup func() -func createProvisioningTestInput(driver TestDriver, pattern testpatterns.TestPattern) (provisioningTestResource, provisioningTestInput) { - // Setup test resource for driver and testpattern - resource := provisioningTestResource{} - resource.setupResource(driver, pattern) - - input := provisioningTestInput{ - testCase: StorageClassTest{ - ClaimSize: resource.claimSize, - ExpectedSize: resource.claimSize, - }, - cs: driver.GetDriverInfo().Config.Framework.ClientSet, - dc: driver.GetDriverInfo().Config.Framework.DynamicClient, - pvc: resource.pvc, - sc: resource.sc, - vsc: resource.vsc, - dInfo: driver.GetDriverInfo(), - nodeName: driver.GetDriverInfo().Config.ClientNodeName, + testCase *StorageClassTest + cs clientset.Interface + pvc *v1.PersistentVolumeClaim + sc *storage.StorageClass } + var ( + dInfo = driver.GetDriverInfo() + dDriver DynamicPVTestDriver + l local + ) - return resource, input -} - -func (p *provisioningTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) { - Context(getTestNameStr(p, pattern), func() { - var ( - resource provisioningTestResource - input provisioningTestInput - needsCleanup bool - ) - - BeforeEach(func() { - needsCleanup = false - // Skip unsupported tests to avoid unnecessary resource initialization - skipUnsupportedTest(p, driver, pattern) - needsCleanup = true - - // Create test input - resource, input = createProvisioningTestInput(driver, pattern) - }) - - AfterEach(func() { - if needsCleanup { - resource.cleanupResource(driver, pattern) - } - }) - - // Ginkgo's "Global Shared Behaviors" require arguments for a shared function - // to be a single struct and to be passed as a pointer. - // Please see https://onsi.github.io/ginkgo/#global-shared-behaviors for details. - testProvisioning(&input) - }) -} - -type provisioningTestResource struct { - driver TestDriver - - claimSize string - sc *storage.StorageClass - pvc *v1.PersistentVolumeClaim - // follow parameter is used to test provision volume from snapshot - vsc *unstructured.Unstructured -} - -var _ TestResource = &provisioningTestResource{} - -func (p *provisioningTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) { - // Setup provisioningTest resource - switch pattern.VolType { - case testpatterns.DynamicPV: - if dDriver, ok := driver.(DynamicPVTestDriver); ok { - p.sc = dDriver.GetDynamicProvisionStorageClass("") - if p.sc == nil { - framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", driver.GetDriverInfo().Name) - } - p.driver = driver - p.claimSize = dDriver.GetClaimSize() - p.pvc = getClaim(p.claimSize, driver.GetDriverInfo().Config.Framework.Namespace.Name) - p.pvc.Spec.StorageClassName = &p.sc.Name - framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", p.sc, p.pvc) - if sDriver, ok := driver.(SnapshottableTestDriver); ok { - p.vsc = sDriver.GetSnapshotClass() - } + BeforeEach(func() { + // Check preconditions. + if pattern.VolType != testpatterns.DynamicPV { + framework.Skipf("Suite %q does not support %v", p.tsInfo.name, pattern.VolType) + } + ok := false + dDriver, ok = driver.(DynamicPVTestDriver) + if !ok { + framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType) + } + }) + + // This intentionally comes after checking the preconditions because it + // registers its own BeforeEach which creates the namespace. Beware that it + // also registers an AfterEach which renders f unusable. Any code using + // f must run inside an It or Context callback. + f := framework.NewDefaultFramework("provisioning") + + init := func() { + l = local{} + + // Now do the more expensive test initialization. + l.config, l.testCleanup = driver.PrepareTest(f) + l.cs = l.config.Framework.ClientSet + claimSize := dDriver.GetClaimSize() + l.sc = dDriver.GetDynamicProvisionStorageClass(l.config, "") + if l.sc == nil { + framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name) + } + l.pvc = getClaim(claimSize, l.config.Framework.Namespace.Name) + l.pvc.Spec.StorageClassName = &l.sc.Name + framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", l.sc, l.pvc) + l.testCase = &StorageClassTest{ + Client: l.config.Framework.ClientSet, + Claim: l.pvc, + Class: l.sc, + ClaimSize: claimSize, + ExpectedSize: claimSize, } - default: - framework.Failf("Dynamic Provision test doesn't support: %s", pattern.VolType) } -} -func (p *provisioningTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) { -} - -type provisioningTestInput struct { - testCase StorageClassTest - cs clientset.Interface - dc dynamic.Interface - pvc *v1.PersistentVolumeClaim - sc *storage.StorageClass - vsc *unstructured.Unstructured - dInfo *DriverInfo - nodeName string -} - -func testProvisioning(input *provisioningTestInput) { - // common checker for most of the test cases below - pvcheck := func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { - PVWriteReadSingleNodeCheck(input.cs, claim, volume, NodeSelection{Name: input.nodeName}) + cleanup := func() { + if l.testCleanup != nil { + l.testCleanup() + l.testCleanup = nil + } } It("should provision storage with defaults", func() { - input.testCase.PvCheck = pvcheck - TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) + init() + defer cleanup() + + l.testCase.TestDynamicProvisioning() }) It("should provision storage with mount options", func() { - if input.dInfo.SupportedMountOption == nil { - framework.Skipf("Driver %q does not define supported mount option - skipping", input.dInfo.Name) + if dInfo.SupportedMountOption == nil { + framework.Skipf("Driver %q does not define supported mount option - skipping", dInfo.Name) } - input.sc.MountOptions = input.dInfo.SupportedMountOption.Union(input.dInfo.RequiredMountOption).List() - input.testCase.PvCheck = pvcheck - TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) + init() + defer cleanup() + + l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List() + l.testCase.TestDynamicProvisioning() }) It("should access volume from different nodes", func() { + init() + defer cleanup() + // The assumption is that if the test hasn't been // locked onto a single node, then the driver is // usable on all of them *and* supports accessing a volume // from any node. - if input.nodeName != "" { - framework.Skipf("Driver %q only supports testing on one node - skipping", input.dInfo.Name) + if l.config.ClientNodeName != "" { + framework.Skipf("Driver %q only supports testing on one node - skipping", dInfo.Name) } + // Ensure that we actually have more than one node. - nodes := framework.GetReadySchedulableNodesOrDie(input.cs) + nodes := framework.GetReadySchedulableNodesOrDie(l.cs) if len(nodes.Items) <= 1 { framework.Skipf("need more than one node - skipping") } - input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { - PVMultiNodeCheck(input.cs, claim, volume, NodeSelection{Name: input.nodeName}) + l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + PVMultiNodeCheck(l.cs, claim, volume, NodeSelection{Name: l.config.ClientNodeName}) } - TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) + l.testCase.TestDynamicProvisioning() }) It("should create and delete block persistent volumes", func() { - if !input.dInfo.Capabilities[CapBlock] { - framework.Skipf("Driver %q does not support BlockVolume - skipping", input.dInfo.Name) + if !dInfo.Capabilities[CapBlock] { + framework.Skipf("Driver %q does not support BlockVolume - skipping", dInfo.Name) } + + init() + defer cleanup() + block := v1.PersistentVolumeBlock - input.testCase.VolumeMode = &block - input.pvc.Spec.VolumeMode = &block - TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) + l.testCase.VolumeMode = &block + l.pvc.Spec.VolumeMode = &block + l.testCase.TestDynamicProvisioning() }) It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() { - if !input.dInfo.Capabilities[CapDataSource] { - framework.Skipf("Driver %q does not support populate data from snapshot - skipping", input.dInfo.Name) + if !dInfo.Capabilities[CapDataSource] { + framework.Skipf("Driver %q does not support populate data from snapshot - skipping", dInfo.Name) } - dataSource, cleanupFunc := prepareDataSourceForProvisioning(NodeSelection{Name: input.nodeName}, input.cs, input.dc, input.pvc, input.sc, input.vsc) + sDriver, ok := driver.(SnapshottableTestDriver) + if !ok { + framework.Failf("Driver %q has CapDataSource but does not implement SnapshottableTestDriver", dInfo.Name) + } + + init() + defer cleanup() + + dc := l.config.Framework.DynamicClient + vsc := sDriver.GetSnapshotClass(l.config) + dataSource, cleanupFunc := prepareDataSourceForProvisioning(NodeSelection{Name: l.config.ClientNodeName}, l.cs, dc, l.pvc, l.sc, vsc) defer cleanupFunc() - input.pvc.Spec.DataSource = dataSource - input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + l.pvc.Spec.DataSource = dataSource + l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { By("checking whether the created volume has the pre-populated data") command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace) - RunInPodWithVolume(input.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, NodeSelection{Name: input.nodeName}) + RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, NodeSelection{Name: l.config.ClientNodeName}) } - TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) + l.testCase.TestDynamicProvisioning() }) It("should allow concurrent writes on the same node", func() { - if !input.dInfo.Capabilities[CapMultiPODs] { - framework.Skipf("Driver %q does not support multiple concurrent pods - skipping", input.dInfo.Name) + if !dInfo.Capabilities[CapMultiPODs] { + framework.Skipf("Driver %q does not support multiple concurrent pods - skipping", dInfo.Name) } - input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + + init() + defer cleanup() + + l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { // We start two pods concurrently on the same node, // using the same PVC. Both wait for other to create a // file before returning. The pods are forced onto the @@ -265,7 +247,7 @@ func testProvisioning(input *provisioningTestInput) { defer GinkgoRecover() defer wg.Done() node := NodeSelection{ - Name: input.nodeName, + Name: l.config.ClientNodeName, } if podName == secondPodName { node.Affinity = &v1.Affinity{ @@ -283,18 +265,24 @@ func testProvisioning(input *provisioningTestInput) { }, } } - RunInPodWithVolume(input.cs, claim.Namespace, claim.Name, podName, command, node) + RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, podName, command, node) } go run(firstPodName, "touch /mnt/test/first && while ! [ -f /mnt/test/second ]; do sleep 1; done") go run(secondPodName, "touch /mnt/test/second && while ! [ -f /mnt/test/first ]; do sleep 1; done") wg.Wait() } - TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) + l.testCase.TestDynamicProvisioning() }) } -// TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest and storageClass -func TestDynamicProvisioning(t StorageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass) *v1.PersistentVolume { +// TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest +func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { + client := t.Client + Expect(client).NotTo(BeNil(), "StorageClassTest.Client is required") + claim := t.Claim + Expect(claim).NotTo(BeNil(), "StorageClassTest.Claim is required") + class := t.Class + var err error if class != nil { Expect(*claim.Spec.StorageClassName).To(Equal(class.Name)) @@ -493,29 +481,29 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai pod = nil } -func TestBindingWaitForFirstConsumer(t StorageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass, nodeSelector map[string]string, expectUnschedulable bool) (*v1.PersistentVolume, *v1.Node) { - pvs, node := TestBindingWaitForFirstConsumerMultiPVC(t, client, []*v1.PersistentVolumeClaim{claim}, class, nodeSelector, expectUnschedulable) +func (t StorageClassTest) TestBindingWaitForFirstConsumer(nodeSelector map[string]string, expectUnschedulable bool) (*v1.PersistentVolume, *v1.Node) { + pvs, node := t.TestBindingWaitForFirstConsumerMultiPVC([]*v1.PersistentVolumeClaim{t.Claim}, nodeSelector, expectUnschedulable) if pvs == nil { return nil, node } return pvs[0], node } -func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientset.Interface, claims []*v1.PersistentVolumeClaim, class *storage.StorageClass, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) { +func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) { var err error Expect(len(claims)).ToNot(Equal(0)) namespace := claims[0].Namespace - By("creating a storage class " + class.Name) - class, err = client.StorageV1().StorageClasses().Create(class) + By("creating a storage class " + t.Class.Name) + class, err := t.Client.StorageV1().StorageClasses().Create(t.Class) Expect(err).NotTo(HaveOccurred()) - defer deleteStorageClass(client, class.Name) + defer deleteStorageClass(t.Client, class.Name) By("creating claims") var claimNames []string var createdClaims []*v1.PersistentVolumeClaim for _, claim := range claims { - c, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim) + c, err := t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim) claimNames = append(claimNames, c.Name) createdClaims = append(createdClaims, c) Expect(err).NotTo(HaveOccurred()) @@ -523,7 +511,7 @@ func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientse defer func() { var errors map[string]error for _, claim := range createdClaims { - err := framework.DeletePersistentVolumeClaim(client, claim.Name, claim.Namespace) + err := framework.DeletePersistentVolumeClaim(t.Client, claim.Name, claim.Namespace) if err != nil { errors[claim.Name] = err } @@ -537,44 +525,44 @@ func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientse // Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out By("checking the claims are in pending state") - err = framework.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, client, namespace, claimNames, 2*time.Second /* Poll */, framework.ClaimProvisionShortTimeout, true) + err = framework.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, framework.ClaimProvisionShortTimeout, true) Expect(err).To(HaveOccurred()) - verifyPVCsPending(client, createdClaims) + verifyPVCsPending(t.Client, createdClaims) By("creating a pod referring to the claims") // Create a pod referring to the claim and wait for it to get to running var pod *v1.Pod if expectUnschedulable { - pod, err = framework.CreateUnschedulablePod(client, namespace, nodeSelector, createdClaims, true /* isPrivileged */, "" /* command */) + pod, err = framework.CreateUnschedulablePod(t.Client, namespace, nodeSelector, createdClaims, true /* isPrivileged */, "" /* command */) } else { - pod, err = framework.CreatePod(client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */) + pod, err = framework.CreatePod(t.Client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */) } Expect(err).NotTo(HaveOccurred()) defer func() { - framework.DeletePodOrFail(client, pod.Namespace, pod.Name) - framework.WaitForPodToDisappear(client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout) + framework.DeletePodOrFail(t.Client, pod.Namespace, pod.Name) + framework.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout) }() if expectUnschedulable { // Verify that no claims are provisioned. - verifyPVCsPending(client, createdClaims) + verifyPVCsPending(t.Client, createdClaims) return nil, nil } // collect node details - node, err := client.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{}) + node, err := t.Client.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) By("re-checking the claims to see they binded") var pvs []*v1.PersistentVolume for _, claim := range createdClaims { // Get new copy of the claim - claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) + claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) // make sure claim did bind - err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout) + err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout) Expect(err).NotTo(HaveOccurred()) - pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{}) + pv, err := t.Client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) pvs = append(pvs, pv) } diff --git a/test/e2e/storage/testsuites/snapshottable.go b/test/e2e/storage/testsuites/snapshottable.go index 98f4ca0c8e6..64507514500 100644 --- a/test/e2e/storage/testsuites/snapshottable.go +++ b/test/e2e/storage/testsuites/snapshottable.go @@ -24,13 +24,10 @@ import ( . "github.com/onsi/gomega" "k8s.io/api/core/v1" - storage "k8s.io/api/storage/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" - clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/testpatterns" ) @@ -48,13 +45,12 @@ var ( ) type SnapshotClassTest struct { - Name string - CloudProviders []string - Snapshotter string - Parameters map[string]string - NodeName string - NodeSelector map[string]string // NodeSelector for the pod - SnapshotContentCheck func(snapshotContent *unstructured.Unstructured) error + Name string + CloudProviders []string + Snapshotter string + Parameters map[string]string + NodeName string + NodeSelector map[string]string // NodeSelector for the pod } type snapshottableTestSuite struct { @@ -79,218 +75,129 @@ func (s *snapshottableTestSuite) getTestSuiteInfo() TestSuiteInfo { return s.tsInfo } -func (s *snapshottableTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) { - dInfo := driver.GetDriverInfo() - if !dInfo.Capabilities[CapDataSource] { - framework.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name) - } -} +func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { + var ( + sDriver SnapshottableTestDriver + dDriver DynamicPVTestDriver + ) -func createSnapshottableTestInput(driver TestDriver, pattern testpatterns.TestPattern) (snapshottableTestResource, snapshottableTestInput) { - // Setup test resource for driver and testpattern - resource := snapshottableTestResource{} - resource.setupResource(driver, pattern) - - dInfo := driver.GetDriverInfo() - input := snapshottableTestInput{ - testCase: SnapshotClassTest{ - NodeName: dInfo.Config.ClientNodeName, - }, - cs: dInfo.Config.Framework.ClientSet, - dc: dInfo.Config.Framework.DynamicClient, - pvc: resource.pvc, - sc: resource.sc, - vsc: resource.vsc, - dInfo: dInfo, - } - - return resource, input -} - -func (s *snapshottableTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) { - Context(getTestNameStr(s, pattern), func() { - var ( - resource snapshottableTestResource - input snapshottableTestInput - needsCleanup bool - ) - - BeforeEach(func() { - needsCleanup = false - // Skip unsupported tests to avoid unnecessary resource initialization - skipUnsupportedTest(s, driver, pattern) - needsCleanup = true - - // Create test input - resource, input = createSnapshottableTestInput(driver, pattern) - }) - - AfterEach(func() { - if needsCleanup { - resource.cleanupResource(driver, pattern) - } - }) - - // Ginkgo's "Global Shared Behaviors" require arguments for a shared function - // to be a single struct and to be passed as a pointer. - // Please see https://onsi.github.io/ginkgo/#global-shared-behaviors for details. - testSnapshot(&input) + BeforeEach(func() { + // Check preconditions. + Expect(pattern.SnapshotType).To(Equal(testpatterns.DynamicCreatedSnapshot)) + dInfo := driver.GetDriverInfo() + ok := false + sDriver, ok = driver.(SnapshottableTestDriver) + if !dInfo.Capabilities[CapDataSource] || !ok { + framework.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name) + } + dDriver, ok = driver.(DynamicPVTestDriver) + if !ok { + framework.Skipf("Driver %q does not support dynamic provisioning - skipping", driver.GetDriverInfo().Name) + } }) -} -type snapshottableTestResource struct { - driver TestDriver - claimSize string + // This intentionally comes after checking the preconditions because it + // registers its own BeforeEach which creates the namespace. Beware that it + // also registers an AfterEach which renders f unusable. Any code using + // f must run inside an It or Context callback. + f := framework.NewDefaultFramework("snapshotting") - sc *storage.StorageClass - pvc *v1.PersistentVolumeClaim - // volume snapshot class - vsc *unstructured.Unstructured -} + It("should create snapshot with defaults [Feature:VolumeSnapshotDataSource]", func() { + cs := f.ClientSet + dc := f.DynamicClient -var _ TestResource = &snapshottableTestResource{} + // Now do the more expensive test initialization. + config, testCleanup := driver.PrepareTest(f) + defer testCleanup() -func (s *snapshottableTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) { - // Setup snapshottableTest resource - switch pattern.SnapshotType { - case testpatterns.DynamicCreatedSnapshot: - if dDriver, ok := driver.(DynamicPVTestDriver); ok { - s.sc = dDriver.GetDynamicProvisionStorageClass("") - if s.sc == nil { - framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", driver.GetDriverInfo().Name) - } - s.driver = driver - s.claimSize = dDriver.GetClaimSize() - s.pvc = getClaim(s.claimSize, driver.GetDriverInfo().Config.Framework.Namespace.Name) - s.pvc.Spec.StorageClassName = &s.sc.Name - framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", s.sc, s.pvc) - - if sDriver, ok := driver.(SnapshottableTestDriver); ok { - s.vsc = sDriver.GetSnapshotClass() - } + vsc := sDriver.GetSnapshotClass(config) + class := dDriver.GetDynamicProvisionStorageClass(config, "") + if class == nil { + framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", driver.GetDriverInfo().Name) } - default: - framework.Failf("Dynamic Snapshot test doesn't support: %s", pattern.SnapshotType) - } -} + claimSize := dDriver.GetClaimSize() + pvc := getClaim(claimSize, config.Framework.Namespace.Name) + pvc.Spec.StorageClassName = &class.Name + framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", class, pvc) -func (s *snapshottableTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) { -} - -type snapshottableTestInput struct { - testCase SnapshotClassTest - cs clientset.Interface - dc dynamic.Interface - pvc *v1.PersistentVolumeClaim - sc *storage.StorageClass - // volume snapshot class - vsc *unstructured.Unstructured - dInfo *DriverInfo -} - -func testSnapshot(input *snapshottableTestInput) { - It("should create snapshot with defaults [Feature:VolumeSnapshotDataSource]", func() { - TestCreateSnapshot(input.testCase, input.cs, input.dc, input.pvc, input.sc, input.vsc) - }) -} - -// TestCreateSnapshot tests dynamic creating snapshot with specified SnapshotClassTest and snapshotClass -func TestCreateSnapshot( - t SnapshotClassTest, - client clientset.Interface, - dynamicClient dynamic.Interface, - claim *v1.PersistentVolumeClaim, - class *storage.StorageClass, - snapshotClass *unstructured.Unstructured, -) *unstructured.Unstructured { - var err error - if class != nil { By("creating a StorageClass " + class.Name) - class, err = client.StorageV1().StorageClasses().Create(class) + class, err := cs.StorageV1().StorageClasses().Create(class) Expect(err).NotTo(HaveOccurred()) defer func() { framework.Logf("deleting storage class %s", class.Name) - framework.ExpectNoError(client.StorageV1().StorageClasses().Delete(class.Name, nil)) + framework.ExpectNoError(cs.StorageV1().StorageClasses().Delete(class.Name, nil)) }() - } - By("creating a claim") - claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim) - Expect(err).NotTo(HaveOccurred()) - defer func() { - framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name) - // typically this claim has already been deleted - err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil) - if err != nil && !apierrs.IsNotFound(err) { - framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err) - } - }() - err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout) - Expect(err).NotTo(HaveOccurred()) - - By("checking the claim") - // Get new copy of the claim - claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - // Get the bound PV - pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("creating a SnapshotClass") - snapshotClass, err = dynamicClient.Resource(snapshotClassGVR).Create(snapshotClass, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - defer func() { - framework.Logf("deleting SnapshotClass %s", snapshotClass.GetName()) - framework.ExpectNoError(dynamicClient.Resource(snapshotClassGVR).Delete(snapshotClass.GetName(), nil)) - }() - - By("creating a snapshot") - snapshot := getSnapshot(claim.Name, claim.Namespace, snapshotClass.GetName()) - - snapshot, err = dynamicClient.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Create(snapshot, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - defer func() { - framework.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName()) - // typically this snapshot has already been deleted - err = dynamicClient.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Delete(snapshot.GetName(), nil) - if err != nil && !apierrs.IsNotFound(err) { - framework.Failf("Error deleting snapshot %q. Error: %v", claim.Name, err) - } - }() - err = WaitForSnapshotReady(dynamicClient, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout) - Expect(err).NotTo(HaveOccurred()) - - By("checking the snapshot") - // Get new copy of the snapshot - snapshot, err = dynamicClient.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Get(snapshot.GetName(), metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - // Get the bound snapshotContent - snapshotSpec := snapshot.Object["spec"].(map[string]interface{}) - snapshotContentName := snapshotSpec["snapshotContentName"].(string) - snapshotContent, err := dynamicClient.Resource(snapshotContentGVR).Get(snapshotContentName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - snapshotContentSpec := snapshotContent.Object["spec"].(map[string]interface{}) - volumeSnapshotRef := snapshotContentSpec["volumeSnapshotRef"].(map[string]interface{}) - persistentVolumeRef := snapshotContentSpec["persistentVolumeRef"].(map[string]interface{}) - - // Check SnapshotContent properties - By("checking the SnapshotContent") - Expect(snapshotContentSpec["snapshotClassName"]).To(Equal(snapshotClass.GetName())) - Expect(volumeSnapshotRef["name"]).To(Equal(snapshot.GetName())) - Expect(volumeSnapshotRef["namespace"]).To(Equal(snapshot.GetNamespace())) - Expect(persistentVolumeRef["name"]).To(Equal(pv.Name)) - - // Run the checker - if t.SnapshotContentCheck != nil { - err = t.SnapshotContentCheck(snapshotContent) + By("creating a claim") + pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) + Expect(err).NotTo(HaveOccurred()) + defer func() { + framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name) + // typically this claim has already been deleted + err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil) + if err != nil && !apierrs.IsNotFound(err) { + framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err) + } + }() + err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) Expect(err).NotTo(HaveOccurred()) - } - return snapshotContent + By("checking the claim") + // Get new copy of the claim + pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // Get the bound PV + pv, err := cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a SnapshotClass") + vsc, err = dc.Resource(snapshotClassGVR).Create(vsc, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + defer func() { + framework.Logf("deleting SnapshotClass %s", vsc.GetName()) + framework.ExpectNoError(dc.Resource(snapshotClassGVR).Delete(vsc.GetName(), nil)) + }() + + By("creating a snapshot") + snapshot := getSnapshot(pvc.Name, pvc.Namespace, vsc.GetName()) + + snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Create(snapshot, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + defer func() { + framework.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName()) + // typically this snapshot has already been deleted + err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Delete(snapshot.GetName(), nil) + if err != nil && !apierrs.IsNotFound(err) { + framework.Failf("Error deleting snapshot %q. Error: %v", pvc.Name, err) + } + }() + err = WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("checking the snapshot") + // Get new copy of the snapshot + snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Get(snapshot.GetName(), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // Get the bound snapshotContent + snapshotSpec := snapshot.Object["spec"].(map[string]interface{}) + snapshotContentName := snapshotSpec["snapshotContentName"].(string) + snapshotContent, err := dc.Resource(snapshotContentGVR).Get(snapshotContentName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + snapshotContentSpec := snapshotContent.Object["spec"].(map[string]interface{}) + volumeSnapshotRef := snapshotContentSpec["volumeSnapshotRef"].(map[string]interface{}) + persistentVolumeRef := snapshotContentSpec["persistentVolumeRef"].(map[string]interface{}) + + // Check SnapshotContent properties + By("checking the SnapshotContent") + Expect(snapshotContentSpec["snapshotClassName"]).To(Equal(vsc.GetName())) + Expect(volumeSnapshotRef["name"]).To(Equal(snapshot.GetName())) + Expect(volumeSnapshotRef["namespace"]).To(Equal(snapshot.GetNamespace())) + Expect(persistentVolumeRef["name"]).To(Equal(pv.Name)) + }) } // WaitForSnapshotReady waits for a VolumeSnapshot to be ready to use or until timeout occurs, whichever comes first. diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index 3e35e2ecda2..adfb87e78aa 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -71,346 +71,351 @@ func (s *subPathTestSuite) getTestSuiteInfo() TestSuiteInfo { return s.tsInfo } -func (s *subPathTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) { -} +func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { + type local struct { + config *PerTestConfig + testCleanup func() -func createSubPathTestInput(pattern testpatterns.TestPattern, resource subPathTestResource) subPathTestInput { - driver := resource.driver - dInfo := driver.GetDriverInfo() - f := dInfo.Config.Framework - subPath := f.Namespace.Name - subPathDir := filepath.Join(volumePath, subPath) - - return subPathTestInput{ - f: f, - subPathDir: subPathDir, - filePathInSubpath: filepath.Join(volumePath, fileName), - filePathInVolume: filepath.Join(subPathDir, fileName), - volType: resource.volType, - pod: resource.pod, - formatPod: resource.formatPod, - volSource: resource.genericVolumeTestResource.volSource, - roVol: resource.roVolSource, + resource *genericVolumeTestResource + roVolSource *v1.VolumeSource + pod *v1.Pod + formatPod *v1.Pod + subPathDir string + filePathInSubpath string + filePathInVolume string } -} + var l local -func (s *subPathTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) { - Context(getTestNameStr(s, pattern), func() { - var ( - resource subPathTestResource - input subPathTestInput - needsCleanup bool - ) + // No preconditions to test. Normally they would be in a BeforeEach here. - BeforeEach(func() { - needsCleanup = false - // Skip unsupported tests to avoid unnecessary resource initialization - skipUnsupportedTest(s, driver, pattern) - needsCleanup = true + // This intentionally comes after checking the preconditions because it + // registers its own BeforeEach which creates the namespace. Beware that it + // also registers an AfterEach which renders f unusable. Any code using + // f must run inside an It or Context callback. + f := framework.NewDefaultFramework("provisioning") - // Setup test resource for driver and testpattern - resource = subPathTestResource{} - resource.setupResource(driver, pattern) + init := func() { + l = local{} - // Create test input - input = createSubPathTestInput(pattern, resource) - }) + // Now do the more expensive test initialization. + l.config, l.testCleanup = driver.PrepareTest(f) + l.resource = createGenericVolumeTestResource(driver, l.config, pattern) - AfterEach(func() { - if needsCleanup { - resource.cleanupResource(driver, pattern) + // Setup subPath test dependent resource + volType := pattern.VolType + switch volType { + case testpatterns.InlineVolume: + if iDriver, ok := driver.(InlineVolumeTestDriver); ok { + l.roVolSource = iDriver.GetVolumeSource(true, pattern.FsType, l.resource.volume) } - }) - - testSubPath(&input) - }) -} - -type subPathTestResource struct { - genericVolumeTestResource - - roVolSource *v1.VolumeSource - pod *v1.Pod - formatPod *v1.Pod -} - -var _ TestResource = &subPathTestResource{} - -func (s *subPathTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) { - s.driver = driver - dInfo := s.driver.GetDriverInfo() - f := dInfo.Config.Framework - fsType := pattern.FsType - volType := pattern.VolType - - // Setup generic test resource - s.genericVolumeTestResource.setupResource(driver, pattern) - - // Setup subPath test dependent resource - switch volType { - case testpatterns.InlineVolume: - if iDriver, ok := driver.(InlineVolumeTestDriver); ok { - s.roVolSource = iDriver.GetVolumeSource(true, fsType, s.genericVolumeTestResource.driverTestResource) + case testpatterns.PreprovisionedPV: + l.roVolSource = &v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: l.resource.pvc.Name, + ReadOnly: true, + }, + } + case testpatterns.DynamicPV: + l.roVolSource = &v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: l.resource.pvc.Name, + ReadOnly: true, + }, + } + default: + framework.Failf("SubPath test doesn't support: %s", volType) } - case testpatterns.PreprovisionedPV: - s.roVolSource = &v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: s.genericVolumeTestResource.pvc.Name, - ReadOnly: true, - }, - } - case testpatterns.DynamicPV: - s.roVolSource = &v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: s.genericVolumeTestResource.pvc.Name, - ReadOnly: true, - }, - } - default: - framework.Failf("SubPath test doesn't support: %s", volType) + + subPath := f.Namespace.Name + l.pod = SubpathTestPod(f, subPath, l.resource.volType, l.resource.volSource, true) + l.pod.Spec.NodeName = l.config.ClientNodeName + l.pod.Spec.NodeSelector = l.config.ClientNodeSelector + + l.formatPod = volumeFormatPod(f, l.resource.volSource) + l.formatPod.Spec.NodeName = l.config.ClientNodeName + l.formatPod.Spec.NodeSelector = l.config.ClientNodeSelector + + l.subPathDir = filepath.Join(volumePath, subPath) + l.filePathInSubpath = filepath.Join(volumePath, fileName) + l.filePathInVolume = filepath.Join(l.subPathDir, fileName) } - subPath := f.Namespace.Name - config := dInfo.Config - s.pod = SubpathTestPod(f, subPath, s.volType, s.volSource, true) - s.pod.Spec.NodeName = config.ClientNodeName - s.pod.Spec.NodeSelector = config.ClientNodeSelector + cleanup := func() { + if l.pod != nil { + By("Deleting pod") + err := framework.DeletePodWithWait(f, f.ClientSet, l.pod) + Expect(err).ToNot(HaveOccurred(), "while deleting pod") + l.pod = nil + } - s.formatPod = volumeFormatPod(f, s.volSource) - s.formatPod.Spec.NodeName = config.ClientNodeName - s.formatPod.Spec.NodeSelector = config.ClientNodeSelector -} + if l.resource != nil { + l.resource.cleanupResource() + l.resource = nil + } -func (s *subPathTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) { - dInfo := driver.GetDriverInfo() - f := dInfo.Config.Framework + if l.testCleanup != nil { + l.testCleanup() + l.testCleanup = nil + } + } - // Cleanup subPath test dependent resource - By("Deleting pod") - err := framework.DeletePodWithWait(f, f.ClientSet, s.pod) - Expect(err).ToNot(HaveOccurred(), "while deleting pod") - - // Cleanup generic test resource - s.genericVolumeTestResource.cleanupResource(driver, pattern) -} - -type subPathTestInput struct { - f *framework.Framework - subPathDir string - filePathInSubpath string - filePathInVolume string - volType string - pod *v1.Pod - formatPod *v1.Pod - volSource *v1.VolumeSource - roVol *v1.VolumeSource -} - -func testSubPath(input *subPathTestInput) { It("should support non-existent path", func() { + init() + defer cleanup() + // Write the file in the subPath from init container 1 - setWriteCommand(input.filePathInSubpath, &input.pod.Spec.InitContainers[1]) + setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1]) // Read it from outside the subPath from container 1 - testReadFile(input.f, input.filePathInVolume, input.pod, 1) + testReadFile(f, l.filePathInVolume, l.pod, 1) }) It("should support existing directory", func() { + init() + defer cleanup() + // Create the directory - setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir)) + setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir)) // Write the file in the subPath from init container 1 - setWriteCommand(input.filePathInSubpath, &input.pod.Spec.InitContainers[1]) + setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1]) // Read it from outside the subPath from container 1 - testReadFile(input.f, input.filePathInVolume, input.pod, 1) + testReadFile(f, l.filePathInVolume, l.pod, 1) }) It("should support existing single file", func() { + init() + defer cleanup() + // Create the file in the init container - setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", input.subPathDir, input.filePathInVolume)) + setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", l.subPathDir, l.filePathInVolume)) // Read it from inside the subPath from container 0 - testReadFile(input.f, input.filePathInSubpath, input.pod, 0) + testReadFile(f, l.filePathInSubpath, l.pod, 0) }) It("should support file as subpath", func() { - // Create the file in the init container - setInitCommand(input.pod, fmt.Sprintf("echo %s > %s", input.f.Namespace.Name, input.subPathDir)) + init() + defer cleanup() - TestBasicSubpath(input.f, input.f.Namespace.Name, input.pod) + // Create the file in the init container + setInitCommand(l.pod, fmt.Sprintf("echo %s > %s", f.Namespace.Name, l.subPathDir)) + + TestBasicSubpath(f, f.Namespace.Name, l.pod) }) It("should fail if subpath directory is outside the volume [Slow]", func() { + init() + defer cleanup() + // Create the subpath outside the volume - setInitCommand(input.pod, fmt.Sprintf("ln -s /bin %s", input.subPathDir)) + setInitCommand(l.pod, fmt.Sprintf("ln -s /bin %s", l.subPathDir)) // Pod should fail - testPodFailSubpath(input.f, input.pod, false) + testPodFailSubpath(f, l.pod, false) }) It("should fail if subpath file is outside the volume [Slow]", func() { + init() + defer cleanup() + // Create the subpath outside the volume - setInitCommand(input.pod, fmt.Sprintf("ln -s /bin/sh %s", input.subPathDir)) + setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/sh %s", l.subPathDir)) // Pod should fail - testPodFailSubpath(input.f, input.pod, false) + testPodFailSubpath(f, l.pod, false) }) It("should fail if non-existent subpath is outside the volume [Slow]", func() { + init() + defer cleanup() + // Create the subpath outside the volume - setInitCommand(input.pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", input.subPathDir)) + setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", l.subPathDir)) // Pod should fail - testPodFailSubpath(input.f, input.pod, false) + testPodFailSubpath(f, l.pod, false) }) It("should fail if subpath with backstepping is outside the volume [Slow]", func() { + init() + defer cleanup() + // Create the subpath outside the volume - setInitCommand(input.pod, fmt.Sprintf("ln -s ../ %s", input.subPathDir)) + setInitCommand(l.pod, fmt.Sprintf("ln -s ../ %s", l.subPathDir)) // Pod should fail - testPodFailSubpath(input.f, input.pod, false) + testPodFailSubpath(f, l.pod, false) }) It("should support creating multiple subpath from same volumes [Slow]", func() { + init() + defer cleanup() + subpathDir1 := filepath.Join(volumePath, "subpath1") subpathDir2 := filepath.Join(volumePath, "subpath2") filepath1 := filepath.Join("/test-subpath1", fileName) filepath2 := filepath.Join("/test-subpath2", fileName) - setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2)) + setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2)) - addSubpathVolumeContainer(&input.pod.Spec.Containers[0], v1.VolumeMount{ + addSubpathVolumeContainer(&l.pod.Spec.Containers[0], v1.VolumeMount{ Name: volumeName, MountPath: "/test-subpath1", SubPath: "subpath1", }) - addSubpathVolumeContainer(&input.pod.Spec.Containers[0], v1.VolumeMount{ + addSubpathVolumeContainer(&l.pod.Spec.Containers[0], v1.VolumeMount{ Name: volumeName, MountPath: "/test-subpath2", SubPath: "subpath2", }) // Write the files from container 0 and instantly read them back - addMultipleWrites(&input.pod.Spec.Containers[0], filepath1, filepath2) - testMultipleReads(input.f, input.pod, 0, filepath1, filepath2) + addMultipleWrites(&l.pod.Spec.Containers[0], filepath1, filepath2) + testMultipleReads(f, l.pod, 0, filepath1, filepath2) }) It("should support restarting containers using directory as subpath [Slow]", func() { - // Create the directory - setInitCommand(input.pod, fmt.Sprintf("mkdir -p %v; touch %v", input.subPathDir, probeFilePath)) + init() + defer cleanup() - testPodContainerRestart(input.f, input.pod) + // Create the directory + setInitCommand(l.pod, fmt.Sprintf("mkdir -p %v; touch %v", l.subPathDir, probeFilePath)) + + testPodContainerRestart(f, l.pod) }) It("should support restarting containers using file as subpath [Slow]", func() { - // Create the file - setInitCommand(input.pod, fmt.Sprintf("touch %v; touch %v", input.subPathDir, probeFilePath)) + init() + defer cleanup() - testPodContainerRestart(input.f, input.pod) + // Create the file + setInitCommand(l.pod, fmt.Sprintf("touch %v; touch %v", l.subPathDir, probeFilePath)) + + testPodContainerRestart(f, l.pod) }) It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() { - testSubpathReconstruction(input.f, input.pod, false) + init() + defer cleanup() + + testSubpathReconstruction(f, l.pod, false) }) It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() { - if strings.HasPrefix(input.volType, "hostPath") || strings.HasPrefix(input.volType, "csi-hostpath") { + init() + defer cleanup() + + if strings.HasPrefix(l.resource.volType, "hostPath") || strings.HasPrefix(l.resource.volType, "csi-hostpath") { // TODO: This skip should be removed once #61446 is fixed - framework.Skipf("%s volume type does not support reconstruction, skipping", input.volType) + framework.Skipf("%s volume type does not support reconstruction, skipping", l.resource.volType) } - testSubpathReconstruction(input.f, input.pod, true) + + testSubpathReconstruction(f, l.pod, true) }) It("should support readOnly directory specified in the volumeMount", func() { + init() + defer cleanup() + // Create the directory - setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir)) + setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir)) // Write the file in the volume from init container 2 - setWriteCommand(input.filePathInVolume, &input.pod.Spec.InitContainers[2]) + setWriteCommand(l.filePathInVolume, &l.pod.Spec.InitContainers[2]) // Read it from inside the subPath from container 0 - input.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true - testReadFile(input.f, input.filePathInSubpath, input.pod, 0) + l.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true + testReadFile(f, l.filePathInSubpath, l.pod, 0) }) It("should support readOnly file specified in the volumeMount", func() { + init() + defer cleanup() + // Create the file - setInitCommand(input.pod, fmt.Sprintf("touch %s", input.subPathDir)) + setInitCommand(l.pod, fmt.Sprintf("touch %s", l.subPathDir)) // Write the file in the volume from init container 2 - setWriteCommand(input.subPathDir, &input.pod.Spec.InitContainers[2]) + setWriteCommand(l.subPathDir, &l.pod.Spec.InitContainers[2]) // Read it from inside the subPath from container 0 - input.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true - testReadFile(input.f, volumePath, input.pod, 0) + l.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true + testReadFile(f, volumePath, l.pod, 0) }) It("should support existing directories when readOnly specified in the volumeSource", func() { - if input.roVol == nil { - framework.Skipf("Volume type %v doesn't support readOnly source", input.volType) + init() + defer cleanup() + if l.roVolSource == nil { + framework.Skipf("Volume type %v doesn't support readOnly source", l.resource.volType) } - pod := input.pod.DeepCopy() + origpod := l.pod.DeepCopy() // Create the directory - setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir)) + setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir)) // Write the file in the subPath from init container 1 - setWriteCommand(input.filePathInSubpath, &input.pod.Spec.InitContainers[1]) + setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1]) // Read it from inside the subPath from container 0 - testReadFile(input.f, input.filePathInSubpath, input.pod, 0) + testReadFile(f, l.filePathInSubpath, l.pod, 0) // Reset the pod - input.pod = pod + l.pod = origpod // Set volume source to read only - input.pod.Spec.Volumes[0].VolumeSource = *input.roVol + l.pod.Spec.Volumes[0].VolumeSource = *l.roVolSource // Read it from inside the subPath from container 0 - testReadFile(input.f, input.filePathInSubpath, input.pod, 0) + testReadFile(f, l.filePathInSubpath, l.pod, 0) }) It("should verify container cannot write to subpath readonly volumes", func() { - if input.roVol == nil { - framework.Skipf("Volume type %v doesn't support readOnly source", input.volType) + init() + defer cleanup() + if l.roVolSource == nil { + framework.Skipf("Volume type %v doesn't support readOnly source", l.resource.volType) } // Format the volume while it's writable - formatVolume(input.f, input.formatPod) + formatVolume(f, l.formatPod) // Set volume source to read only - input.pod.Spec.Volumes[0].VolumeSource = *input.roVol + l.pod.Spec.Volumes[0].VolumeSource = *l.roVolSource // Write the file in the volume from container 0 - setWriteCommand(input.subPathDir, &input.pod.Spec.Containers[0]) + setWriteCommand(l.subPathDir, &l.pod.Spec.Containers[0]) // Pod should fail - testPodFailSubpath(input.f, input.pod, true) + testPodFailSubpath(f, l.pod, true) }) It("should be able to unmount after the subpath directory is deleted", func() { - // Change volume container to busybox so we can exec later - input.pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox) - input.pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"} + init() + defer cleanup() - By(fmt.Sprintf("Creating pod %s", input.pod.Name)) - removeUnusedContainers(input.pod) - pod, err := input.f.ClientSet.CoreV1().Pods(input.f.Namespace.Name).Create(input.pod) + // Change volume container to busybox so we can exec later + l.pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox) + l.pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"} + + By(fmt.Sprintf("Creating pod %s", l.pod.Name)) + removeUnusedContainers(l.pod) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(l.pod) Expect(err).ToNot(HaveOccurred(), "while creating pod") defer func() { By(fmt.Sprintf("Deleting pod %s", pod.Name)) - framework.DeletePodWithWait(input.f, input.f.ClientSet, pod) + framework.DeletePodWithWait(f, f.ClientSet, pod) }() // Wait for pod to be running - err = framework.WaitForPodRunningInNamespace(input.f.ClientSet, pod) + err = framework.WaitForPodRunningInNamespace(f.ClientSet, l.pod) Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running") // Exec into container that mounted the volume, delete subpath directory - rmCmd := fmt.Sprintf("rm -rf %s", input.subPathDir) - _, err = podContainerExec(pod, 1, rmCmd) + rmCmd := fmt.Sprintf("rm -rf %s", l.subPathDir) + _, err = podContainerExec(l.pod, 1, rmCmd) Expect(err).ToNot(HaveOccurred(), "while removing subpath directory") // Delete pod (from defer) and wait for it to be successfully deleted diff --git a/test/e2e/storage/testsuites/testdriver.go b/test/e2e/storage/testsuites/testdriver.go index 4e35afc2da8..d5b3bf58ba0 100644 --- a/test/e2e/storage/testsuites/testdriver.go +++ b/test/e2e/storage/testsuites/testdriver.go @@ -25,35 +25,53 @@ import ( "k8s.io/kubernetes/test/e2e/storage/testpatterns" ) -// TestDriver represents an interface for a driver to be tested in TestSuite +// TestDriver represents an interface for a driver to be tested in TestSuite. +// Except for GetDriverInfo, all methods will be called at test runtime and thus +// can use framework.Skipf, framework.Fatal, Gomega assertions, etc. type TestDriver interface { - // GetDriverInfo returns DriverInfo for the TestDriver + // GetDriverInfo returns DriverInfo for the TestDriver. This must be static + // information. GetDriverInfo() *DriverInfo - // CreateDriver creates all driver resources that is required for TestDriver method - // except CreateVolume - CreateDriver() - // CreateDriver cleanup all the resources that is created in CreateDriver - CleanupDriver() - // SkipUnsupportedTest skips test in Testpattern is not suitable to test with the TestDriver + + // SkipUnsupportedTest skips test if Testpattern is not + // suitable to test with the TestDriver. It gets called after + // parsing parameters of the test suite and before the + // framework is initialized. Cheap tests that just check + // parameters like the cloud provider can and should be + // done in SkipUnsupportedTest to avoid setting up more + // expensive resources like framework.Framework. Tests that + // depend on a connection to the cluster can be done in + // PrepareTest once the framework is ready. SkipUnsupportedTest(testpatterns.TestPattern) + + // PrepareTest is called at test execution time each time a new test case is about to start. + // It sets up all necessary resources and returns the per-test configuration + // plus a cleanup function that frees all allocated resources. + PrepareTest(f *framework.Framework) (*PerTestConfig, func()) +} + +// TestVolume is the result of PreprovisionedVolumeTestDriver.CreateVolume. +// The only common functionality is to delete it. Individual driver interfaces +// have additional methods that work with volumes created by them. +type TestVolume interface { + DeleteVolume() } // PreprovisionedVolumeTestDriver represents an interface for a TestDriver that has pre-provisioned volume type PreprovisionedVolumeTestDriver interface { TestDriver - // CreateVolume creates a pre-provisioned volume. - CreateVolume(testpatterns.TestVolType) interface{} - // DeleteVolume deletes a volume that is created in CreateVolume - DeleteVolume(testpatterns.TestVolType, interface{}) + // CreateVolume creates a pre-provisioned volume of the desired volume type. + CreateVolume(config *PerTestConfig, volumeType testpatterns.TestVolType) TestVolume } // InlineVolumeTestDriver represents an interface for a TestDriver that supports InlineVolume type InlineVolumeTestDriver interface { PreprovisionedVolumeTestDriver + // GetVolumeSource returns a volumeSource for inline volume. // It will set readOnly and fsType to the volumeSource, if TestDriver supports both of them. // It will return nil, if the TestDriver doesn't support either of the parameters. - GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource + GetVolumeSource(readOnly bool, fsType string, testVolume TestVolume) *v1.VolumeSource } // PreprovisionedPVTestDriver represents an interface for a TestDriver that supports PreprovisionedPV @@ -62,8 +80,7 @@ type PreprovisionedPVTestDriver interface { // GetPersistentVolumeSource returns a PersistentVolumeSource with volume node affinity for pre-provisioned Persistent Volume. // It will set readOnly and fsType to the PersistentVolumeSource, if TestDriver supports both of them. // It will return nil, if the TestDriver doesn't support either of the parameters. - // Volume node affinity is optional, it will be nil for volumes which does not have volume node affinity. - GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) + GetPersistentVolumeSource(readOnly bool, fsType string, testVolume TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) } // DynamicPVTestDriver represents an interface for a TestDriver that supports DynamicPV @@ -72,7 +89,7 @@ type DynamicPVTestDriver interface { // GetDynamicProvisionStorageClass returns a StorageClass dynamic provision Persistent Volume. // It will set fsType to the StorageClass, if TestDriver supports it. // It will return nil, if the TestDriver doesn't support it. - GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass + GetDynamicProvisionStorageClass(config *PerTestConfig, fsType string) *storagev1.StorageClass // GetClaimSize returns the size of the volume that is to be provisioned ("5Gi", "1Mi"). // The size must be chosen so that the resulting volume is large enough for all @@ -85,7 +102,7 @@ type SnapshottableTestDriver interface { TestDriver // GetSnapshotClass returns a SnapshotClass to create snapshot. // It will return nil, if the TestDriver doesn't support it. - GetSnapshotClass() *unstructured.Unstructured + GetSnapshotClass(config *PerTestConfig) *unstructured.Unstructured } // Capability represents a feature that a volume plugin supports @@ -106,7 +123,7 @@ const ( CapMultiPODs Capability = "multipods" ) -// DriverInfo represents a combination of parameters to be used in implementation of TestDriver +// DriverInfo represents static information about a TestDriver. type DriverInfo struct { Name string // Name of the driver FeatureTag string // FeatureTag for the driver @@ -116,14 +133,15 @@ type DriverInfo struct { SupportedMountOption sets.String // Map of string for supported mount option RequiredMountOption sets.String // Map of string for required mount option (Optional) Capabilities map[Capability]bool // Map that represents plugin capabilities - - Config TestConfig // Test configuration for the current test. } -// TestConfig represents parameters that control test execution. -// They can still be modified after defining tests, for example -// in a BeforeEach or when creating the driver. -type TestConfig struct { +// PerTestConfig represents parameters that control test execution. +// One instance gets allocated for each test and is then passed +// via pointer to functions involved in the test. +type PerTestConfig struct { + // The test driver for the test. + Driver TestDriver + // Some short word that gets inserted into dynamically // generated entities (pods, paths) as first part of the name // to make debugging easier. Can be the same for different @@ -148,8 +166,9 @@ type TestConfig struct { // the configuration that then has to be used to run tests. // The values above are ignored for such tests. ServerConfig *framework.VolumeTestConfig - - // TopologyEnabled indicates that the Topology feature gate - // should be enabled in external-provisioner - TopologyEnabled bool +} + +// GetUniqueDriverName returns unique driver name that can be used parallelly in tests +func (config *PerTestConfig) GetUniqueDriverName() string { + return config.Driver.GetDriverInfo().Name + "-" + config.Framework.UniqueName } diff --git a/test/e2e/storage/testsuites/volume_io.go b/test/e2e/storage/testsuites/volume_io.go index ad32f84fa5f..1ec29ba5a07 100644 --- a/test/e2e/storage/testsuites/volume_io.go +++ b/test/e2e/storage/testsuites/volume_io.go @@ -74,87 +74,65 @@ func (t *volumeIOTestSuite) getTestSuiteInfo() TestSuiteInfo { return t.tsInfo } -func (t *volumeIOTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) { -} +func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { + type local struct { + config *PerTestConfig + testCleanup func() -func createVolumeIOTestInput(pattern testpatterns.TestPattern, resource genericVolumeTestResource) volumeIOTestInput { - var fsGroup *int64 - driver := resource.driver - dInfo := driver.GetDriverInfo() - f := dInfo.Config.Framework - fileSizes := createFileSizes(dInfo.MaxFileSize) - volSource := resource.volSource + resource *genericVolumeTestResource + } + var ( + dInfo = driver.GetDriverInfo() + l local + ) - if volSource == nil { - framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name) + // No preconditions to test. Normally they would be in a BeforeEach here. + + // This intentionally comes after checking the preconditions because it + // registers its own BeforeEach which creates the namespace. Beware that it + // also registers an AfterEach which renders f unusable. Any code using + // f must run inside an It or Context callback. + f := framework.NewDefaultFramework("volumeio") + + init := func() { + l = local{} + + // Now do the more expensive test initialization. + l.config, l.testCleanup = driver.PrepareTest(f) + l.resource = createGenericVolumeTestResource(driver, l.config, pattern) + if l.resource.volSource == nil { + framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name) + } } - if dInfo.Capabilities[CapFsGroup] { - fsGroupVal := int64(1234) - fsGroup = &fsGroupVal + cleanup := func() { + if l.resource != nil { + l.resource.cleanupResource() + l.resource = nil + } + + if l.testCleanup != nil { + l.testCleanup() + l.testCleanup = nil + } } - return volumeIOTestInput{ - f: f, - name: dInfo.Name, - config: &dInfo.Config, - volSource: *volSource, - testFile: fmt.Sprintf("%s_io_test_%s", dInfo.Name, f.Namespace.Name), - podSec: v1.PodSecurityContext{ - FSGroup: fsGroup, - }, - fileSizes: fileSizes, - } -} - -func (t *volumeIOTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) { - Context(getTestNameStr(t, pattern), func() { - var ( - resource genericVolumeTestResource - input volumeIOTestInput - needsCleanup bool - ) - - BeforeEach(func() { - needsCleanup = false - // Skip unsupported tests to avoid unnecessary resource initialization - skipUnsupportedTest(t, driver, pattern) - needsCleanup = true - - // Setup test resource for driver and testpattern - resource = genericVolumeTestResource{} - resource.setupResource(driver, pattern) - - // Create test input - input = createVolumeIOTestInput(pattern, resource) - }) - - AfterEach(func() { - if needsCleanup { - resource.cleanupResource(driver, pattern) - } - }) - - execTestVolumeIO(&input) - }) -} - -type volumeIOTestInput struct { - f *framework.Framework - name string - config *TestConfig - volSource v1.VolumeSource - testFile string - podSec v1.PodSecurityContext - fileSizes []int64 -} - -func execTestVolumeIO(input *volumeIOTestInput) { It("should write files of various sizes, verify size, validate content [Slow]", func() { - f := input.f - cs := f.ClientSet + init() + defer cleanup() - err := testVolumeIO(f, cs, convertTestConfig(input.config), input.volSource, &input.podSec, input.testFile, input.fileSizes) + cs := f.ClientSet + fileSizes := createFileSizes(dInfo.MaxFileSize) + testFile := fmt.Sprintf("%s_io_test_%s", dInfo.Name, f.Namespace.Name) + var fsGroup *int64 + if dInfo.Capabilities[CapFsGroup] { + fsGroupVal := int64(1234) + fsGroup = &fsGroupVal + } + podSec := v1.PodSecurityContext{ + FSGroup: fsGroup, + } + err := testVolumeIO(f, cs, convertTestConfig(l.config), *l.resource.volSource, &podSec, testFile, fileSizes) Expect(err).NotTo(HaveOccurred()) }) } diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index 6b3ea843488..b90fddb9b44 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -26,6 +26,7 @@ import ( storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" + clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -61,316 +62,253 @@ func (t *volumeModeTestSuite) getTestSuiteInfo() TestSuiteInfo { return t.tsInfo } -func (t *volumeModeTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) { -} +func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { + type local struct { + config *PerTestConfig + testCleanup func() -func createVolumeModeTestInput(pattern testpatterns.TestPattern, resource volumeModeTestResource) volumeModeTestInput { - driver := resource.driver - dInfo := driver.GetDriverInfo() - f := dInfo.Config.Framework - - return volumeModeTestInput{ - f: f, - sc: resource.sc, - pvc: resource.pvc, - pv: resource.pv, - testVolType: pattern.VolType, - nodeName: dInfo.Config.ClientNodeName, - volMode: pattern.VolMode, - isBlockSupported: dInfo.Capabilities[CapBlock], + cs clientset.Interface + ns *v1.Namespace + sc *storagev1.StorageClass + pvc *v1.PersistentVolumeClaim + pv *v1.PersistentVolume + volume TestVolume } -} - -func getVolumeModeTestFunc(pattern testpatterns.TestPattern, driver TestDriver) func(*volumeModeTestInput) { - dInfo := driver.GetDriverInfo() - isBlockSupported := dInfo.Capabilities[CapBlock] - volMode := pattern.VolMode - volType := pattern.VolType - - switch volType { - case testpatterns.PreprovisionedPV: - if volMode == v1.PersistentVolumeBlock && !isBlockSupported { - return testVolumeModeFailForPreprovisionedPV - } - return testVolumeModeSuccessForPreprovisionedPV - case testpatterns.DynamicPV: - if volMode == v1.PersistentVolumeBlock && !isBlockSupported { - return testVolumeModeFailForDynamicPV - } - return testVolumeModeSuccessForDynamicPV - default: - framework.Failf("Volume mode test doesn't support volType: %v", volType) - } - return nil -} - -func (t *volumeModeTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) { - Context(getTestNameStr(t, pattern), func() { - var ( - resource volumeModeTestResource - input volumeModeTestInput - testFunc func(*volumeModeTestInput) - needsCleanup bool - ) - - testFunc = getVolumeModeTestFunc(pattern, driver) - - BeforeEach(func() { - needsCleanup = false - // Skip unsupported tests to avoid unnecessary resource initialization - skipUnsupportedTest(t, driver, pattern) - needsCleanup = true - - // Setup test resource for driver and testpattern - resource = volumeModeTestResource{} - resource.setupResource(driver, pattern) - - // Create test input - input = createVolumeModeTestInput(pattern, resource) - }) - - AfterEach(func() { - if needsCleanup { - resource.cleanupResource(driver, pattern) - } - }) - - testFunc(&input) - }) -} - -type volumeModeTestResource struct { - driver TestDriver - - sc *storagev1.StorageClass - pvc *v1.PersistentVolumeClaim - pv *v1.PersistentVolume - - driverTestResource interface{} -} - -var _ TestResource = &volumeModeTestResource{} - -func (s *volumeModeTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) { - s.driver = driver - dInfo := driver.GetDriverInfo() - f := dInfo.Config.Framework - ns := f.Namespace - fsType := pattern.FsType - volBindMode := storagev1.VolumeBindingImmediate - volMode := pattern.VolMode - volType := pattern.VolType - var ( - scName string - pvSource *v1.PersistentVolumeSource - volumeNodeAffinity *v1.VolumeNodeAffinity + dInfo = driver.GetDriverInfo() + l local ) - // Create volume for pre-provisioned volume tests - s.driverTestResource = CreateVolume(driver, volType) + // No preconditions to test. Normally they would be in a BeforeEach here. - switch volType { - case testpatterns.PreprovisionedPV: - if volMode == v1.PersistentVolumeBlock { - scName = fmt.Sprintf("%s-%s-sc-for-block", ns.Name, dInfo.Name) - } else if volMode == v1.PersistentVolumeFilesystem { - scName = fmt.Sprintf("%s-%s-sc-for-file", ns.Name, dInfo.Name) - } - if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok { - pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, s.driverTestResource) - if pvSource == nil { - framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name) + // This intentionally comes after checking the preconditions because it + // registers its own BeforeEach which creates the namespace. Beware that it + // also registers an AfterEach which renders f unusable. Any code using + // f must run inside an It or Context callback. + f := framework.NewDefaultFramework("volumemode") + + init := func() { + l = local{} + l.ns = f.Namespace + l.cs = f.ClientSet + + // Now do the more expensive test initialization. + l.config, l.testCleanup = driver.PrepareTest(f) + + fsType := pattern.FsType + volBindMode := storagev1.VolumeBindingImmediate + + var ( + scName string + pvSource *v1.PersistentVolumeSource + volumeNodeAffinity *v1.VolumeNodeAffinity + ) + + // Create volume for pre-provisioned volume tests + l.volume = CreateVolume(driver, l.config, pattern.VolType) + + switch pattern.VolType { + case testpatterns.PreprovisionedPV: + if pattern.VolMode == v1.PersistentVolumeBlock { + scName = fmt.Sprintf("%s-%s-sc-for-block", l.ns.Name, dInfo.Name) + } else if pattern.VolMode == v1.PersistentVolumeFilesystem { + scName = fmt.Sprintf("%s-%s-sc-for-file", l.ns.Name, dInfo.Name) } + if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok { + pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, l.volume) + if pvSource == nil { + framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name) + } - sc, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, volMode, *pvSource, volumeNodeAffinity) - s.sc = sc - s.pv = framework.MakePersistentVolume(pvConfig) - s.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns.Name) + storageClass, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, pattern.VolMode, *pvSource, volumeNodeAffinity) + l.sc = storageClass + l.pv = framework.MakePersistentVolume(pvConfig) + l.pvc = framework.MakePersistentVolumeClaim(pvcConfig, l.ns.Name) + } + case testpatterns.DynamicPV: + if dDriver, ok := driver.(DynamicPVTestDriver); ok { + l.sc = dDriver.GetDynamicProvisionStorageClass(l.config, fsType) + if l.sc == nil { + framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name) + } + l.sc.VolumeBindingMode = &volBindMode + + claimSize := dDriver.GetClaimSize() + l.pvc = getClaim(claimSize, l.ns.Name) + l.pvc.Spec.StorageClassName = &l.sc.Name + l.pvc.Spec.VolumeMode = &pattern.VolMode + } + default: + framework.Failf("Volume mode test doesn't support: %s", pattern.VolType) + } + } + + cleanup := func() { + if l.pv != nil || l.pvc != nil { + By("Deleting pv and pvc") + errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, l.pv, l.pvc) + if len(errs) > 0 { + framework.Logf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs)) + } + l.pv = nil + l.pvc = nil + } + + if l.sc != nil { + By("Deleting sc") + deleteStorageClass(f.ClientSet, l.sc.Name) + l.sc = nil + } + + if l.volume != nil { + l.volume.DeleteVolume() + l.volume = nil + } + + if l.testCleanup != nil { + l.testCleanup() + l.testCleanup = nil + } + } + + // We register different tests depending on the drive + isBlockSupported := dInfo.Capabilities[CapBlock] + switch pattern.VolType { + case testpatterns.PreprovisionedPV: + if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported { + It("should fail to create pod by failing to mount volume", func() { + init() + defer cleanup() + + var err error + + By("Creating sc") + l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc) + Expect(err).NotTo(HaveOccurred()) + + By("Creating pv and pvc") + l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv) + Expect(err).NotTo(HaveOccurred()) + + // Prebind pv + l.pvc.Spec.VolumeName = l.pv.Name + l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc) + Expect(err).NotTo(HaveOccurred()) + + framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc)) + + By("Creating pod") + pod, err := framework.CreateSecPodWithNodeName(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, + false, "", false, false, framework.SELinuxLabel, + nil, l.config.ClientNodeName, framework.PodStartTimeout) + defer func() { + framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod)) + }() + Expect(err).To(HaveOccurred()) + }) + } else { + It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() { + init() + defer cleanup() + + var err error + + By("Creating sc") + l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc) + Expect(err).NotTo(HaveOccurred()) + + By("Creating pv and pvc") + l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv) + Expect(err).NotTo(HaveOccurred()) + + // Prebind pv + l.pvc.Spec.VolumeName = l.pv.Name + l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc) + Expect(err).NotTo(HaveOccurred()) + + framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc)) + + By("Creating pod") + pod, err := framework.CreateSecPodWithNodeName(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, + false, "", false, false, framework.SELinuxLabel, + nil, l.config.ClientNodeName, framework.PodStartTimeout) + defer func() { + framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod)) + }() + Expect(err).NotTo(HaveOccurred()) + + By("Checking if persistent volume exists as expected volume mode") + utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1") + + By("Checking if read/write to persistent volume works properly") + utils.CheckReadWriteToPath(pod, pattern.VolMode, "/mnt/volume1") + }) + // TODO(mkimuram): Add more tests } case testpatterns.DynamicPV: - if dDriver, ok := driver.(DynamicPVTestDriver); ok { - s.sc = dDriver.GetDynamicProvisionStorageClass(fsType) - if s.sc == nil { - framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name) - } - s.sc.VolumeBindingMode = &volBindMode + if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported { + It("should fail in binding dynamic provisioned PV to PVC", func() { + init() + defer cleanup() - claimSize := dDriver.GetClaimSize() - s.pvc = getClaim(claimSize, ns.Name) - s.pvc.Spec.StorageClassName = &s.sc.Name - s.pvc.Spec.VolumeMode = &volMode + var err error + + By("Creating sc") + l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc) + Expect(err).NotTo(HaveOccurred()) + + By("Creating pv and pvc") + l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc) + Expect(err).NotTo(HaveOccurred()) + + err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) + Expect(err).To(HaveOccurred()) + }) + } else { + It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() { + init() + defer cleanup() + + var err error + + By("Creating sc") + l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc) + Expect(err).NotTo(HaveOccurred()) + + By("Creating pv and pvc") + l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc) + Expect(err).NotTo(HaveOccurred()) + + err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) + Expect(err).NotTo(HaveOccurred()) + + l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.pvc.Namespace).Get(l.pvc.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + l.pv, err = l.cs.CoreV1().PersistentVolumes().Get(l.pvc.Spec.VolumeName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating pod") + pod, err := framework.CreateSecPodWithNodeName(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, + false, "", false, false, framework.SELinuxLabel, + nil, l.config.ClientNodeName, framework.PodStartTimeout) + defer func() { + framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod)) + }() + Expect(err).NotTo(HaveOccurred()) + + By("Checking if persistent volume exists as expected volume mode") + utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1") + + By("Checking if read/write to persistent volume works properly") + utils.CheckReadWriteToPath(pod, pattern.VolMode, "/mnt/volume1") + }) + // TODO(mkimuram): Add more tests } default: - framework.Failf("Volume mode test doesn't support: %s", volType) - } -} - -func (s *volumeModeTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) { - dInfo := driver.GetDriverInfo() - f := dInfo.Config.Framework - cs := f.ClientSet - ns := f.Namespace - volType := pattern.VolType - - By("Deleting pv and pvc") - errs := framework.PVPVCCleanup(cs, ns.Name, s.pv, s.pvc) - if len(errs) > 0 { - framework.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs)) - } - By("Deleting sc") - if s.sc != nil { - deleteStorageClass(cs, s.sc.Name) + framework.Failf("Volume mode test doesn't support volType: %v", pattern.VolType) } - // Cleanup volume for pre-provisioned volume tests - DeleteVolume(driver, volType, s.driverTestResource) -} - -type volumeModeTestInput struct { - f *framework.Framework - sc *storagev1.StorageClass - pvc *v1.PersistentVolumeClaim - pv *v1.PersistentVolume - testVolType testpatterns.TestVolType - nodeName string - volMode v1.PersistentVolumeMode - isBlockSupported bool -} - -func testVolumeModeFailForPreprovisionedPV(input *volumeModeTestInput) { - It("should fail to create pod by failing to mount volume", func() { - f := input.f - cs := f.ClientSet - ns := f.Namespace - var err error - - By("Creating sc") - input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc) - Expect(err).NotTo(HaveOccurred()) - - By("Creating pv and pvc") - input.pv, err = cs.CoreV1().PersistentVolumes().Create(input.pv) - Expect(err).NotTo(HaveOccurred()) - - // Prebind pv - input.pvc.Spec.VolumeName = input.pv.Name - input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc) - Expect(err).NotTo(HaveOccurred()) - - framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, input.pv, input.pvc)) - - By("Creating pod") - pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc}, - false, "", false, false, framework.SELinuxLabel, - nil, input.nodeName, framework.PodStartTimeout) - defer func() { - framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod)) - }() - Expect(err).To(HaveOccurred()) - }) -} - -func testVolumeModeSuccessForPreprovisionedPV(input *volumeModeTestInput) { - It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() { - f := input.f - cs := f.ClientSet - ns := f.Namespace - var err error - - By("Creating sc") - input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc) - Expect(err).NotTo(HaveOccurred()) - - By("Creating pv and pvc") - input.pv, err = cs.CoreV1().PersistentVolumes().Create(input.pv) - Expect(err).NotTo(HaveOccurred()) - - // Prebind pv - input.pvc.Spec.VolumeName = input.pv.Name - input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc) - Expect(err).NotTo(HaveOccurred()) - - framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, input.pv, input.pvc)) - - By("Creating pod") - pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc}, - false, "", false, false, framework.SELinuxLabel, - nil, input.nodeName, framework.PodStartTimeout) - defer func() { - framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod)) - }() - Expect(err).NotTo(HaveOccurred()) - - By("Checking if persistent volume exists as expected volume mode") - utils.CheckVolumeModeOfPath(pod, input.volMode, "/mnt/volume1") - - By("Checking if read/write to persistent volume works properly") - utils.CheckReadWriteToPath(pod, input.volMode, "/mnt/volume1") - }) - // TODO(mkimuram): Add more tests -} - -func testVolumeModeFailForDynamicPV(input *volumeModeTestInput) { - It("should fail in binding dynamic provisioned PV to PVC", func() { - f := input.f - cs := f.ClientSet - ns := f.Namespace - var err error - - By("Creating sc") - input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc) - Expect(err).NotTo(HaveOccurred()) - - By("Creating pv and pvc") - input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc) - Expect(err).NotTo(HaveOccurred()) - - err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, input.pvc.Namespace, input.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) - Expect(err).To(HaveOccurred()) - }) -} - -func testVolumeModeSuccessForDynamicPV(input *volumeModeTestInput) { - It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() { - f := input.f - cs := f.ClientSet - ns := f.Namespace - var err error - - By("Creating sc") - input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc) - Expect(err).NotTo(HaveOccurred()) - - By("Creating pv and pvc") - input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc) - Expect(err).NotTo(HaveOccurred()) - - err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, input.pvc.Namespace, input.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) - Expect(err).NotTo(HaveOccurred()) - - input.pvc, err = cs.CoreV1().PersistentVolumeClaims(input.pvc.Namespace).Get(input.pvc.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - input.pv, err = cs.CoreV1().PersistentVolumes().Get(input.pvc.Spec.VolumeName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("Creating pod") - pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc}, - false, "", false, false, framework.SELinuxLabel, - nil, input.nodeName, framework.PodStartTimeout) - defer func() { - framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod)) - }() - Expect(err).NotTo(HaveOccurred()) - - By("Checking if persistent volume exists as expected volume mode") - utils.CheckVolumeModeOfPath(pod, input.volMode, "/mnt/volume1") - - By("Checking if read/write to persistent volume works properly") - utils.CheckReadWriteToPath(pod, input.volMode, "/mnt/volume1") - }) - // TODO(mkimuram): Add more tests } func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1.VolumeBindingMode, diff --git a/test/e2e/storage/testsuites/volumes.go b/test/e2e/storage/testsuites/volumes.go index 57222178eb0..3820295dfa6 100644 --- a/test/e2e/storage/testsuites/volumes.go +++ b/test/e2e/storage/testsuites/volumes.go @@ -89,101 +89,80 @@ func skipExecTest(driver TestDriver) { } } -func createVolumesTestInput(pattern testpatterns.TestPattern, resource genericVolumeTestResource) volumesTestInput { - var fsGroup *int64 - driver := resource.driver - dInfo := driver.GetDriverInfo() - f := dInfo.Config.Framework - volSource := resource.volSource +func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { + type local struct { + config *PerTestConfig + testCleanup func() - if volSource == nil { - framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name) + resource *genericVolumeTestResource + } + var dInfo = driver.GetDriverInfo() + var l local + + // No preconditions to test. Normally they would be in a BeforeEach here. + + // This intentionally comes after checking the preconditions because it + // registers its own BeforeEach which creates the namespace. Beware that it + // also registers an AfterEach which renders f unusable. Any code using + // f must run inside an It or Context callback. + f := framework.NewDefaultFramework("volumeio") + + init := func() { + l = local{} + + // Now do the more expensive test initialization. + l.config, l.testCleanup = driver.PrepareTest(f) + l.resource = createGenericVolumeTestResource(driver, l.config, pattern) + if l.resource.volSource == nil { + framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name) + } } - if dInfo.Capabilities[CapFsGroup] { - fsGroupVal := int64(1234) - fsGroup = &fsGroupVal + cleanup := func() { + if l.resource != nil { + l.resource.cleanupResource() + l.resource = nil + } + + if l.testCleanup != nil { + l.testCleanup() + l.testCleanup = nil + } } - return volumesTestInput{ - f: f, - name: dInfo.Name, - config: &dInfo.Config, - fsGroup: fsGroup, - resource: resource, - fsType: pattern.FsType, - tests: []framework.VolumeTest{ + It("should be mountable", func() { + skipPersistenceTest(driver) + init() + defer func() { + framework.VolumeTestCleanup(f, convertTestConfig(l.config)) + cleanup() + }() + + tests := []framework.VolumeTest{ { - Volume: *volSource, + Volume: *l.resource.volSource, File: "index.html", // Must match content ExpectedContent: fmt.Sprintf("Hello from %s from namespace %s", dInfo.Name, f.Namespace.Name), }, - }, - } -} - -func (t *volumesTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) { - Context(getTestNameStr(t, pattern), func() { - var ( - resource genericVolumeTestResource - input volumesTestInput - needsCleanup bool - ) - - BeforeEach(func() { - needsCleanup = false - // Skip unsupported tests to avoid unnecessary resource initialization - skipUnsupportedTest(t, driver, pattern) - needsCleanup = true - - // Setup test resource for driver and testpattern - resource = genericVolumeTestResource{} - resource.setupResource(driver, pattern) - - // Create test input - input = createVolumesTestInput(pattern, resource) - }) - - AfterEach(func() { - if needsCleanup { - resource.cleanupResource(driver, pattern) - } - }) - - testVolumes(&input) + } + config := convertTestConfig(l.config) + framework.InjectHtml(f.ClientSet, config, tests[0].Volume, tests[0].ExpectedContent) + var fsGroup *int64 + if dInfo.Capabilities[CapFsGroup] { + fsGroupVal := int64(1234) + fsGroup = &fsGroupVal + } + framework.TestVolumeClient(f.ClientSet, config, fsGroup, pattern.FsType, tests) }) -} -type volumesTestInput struct { - f *framework.Framework - name string - config *TestConfig - fsGroup *int64 - fsType string - tests []framework.VolumeTest - resource genericVolumeTestResource -} - -func testVolumes(input *volumesTestInput) { - It("should be mountable", func() { - f := input.f - cs := f.ClientSet - defer framework.VolumeTestCleanup(f, convertTestConfig(input.config)) - - skipPersistenceTest(input.resource.driver) - - volumeTest := input.tests - config := convertTestConfig(input.config) - framework.InjectHtml(cs, config, volumeTest[0].Volume, volumeTest[0].ExpectedContent) - framework.TestVolumeClient(cs, config, input.fsGroup, input.fsType, input.tests) - }) It("should allow exec of files on the volume", func() { - f := input.f - skipExecTest(input.resource.driver) + skipExecTest(driver) + init() + defer cleanup() - testScriptInPod(f, input.resource.volType, input.resource.volSource, input.resource.driver.GetDriverInfo().Config.ClientNodeSelector) + testScriptInPod(f, l.resource.volType, l.resource.volSource, l.config.ClientNodeSelector) }) } diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index ffc6d86bf02..291fb45497b 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -212,21 +212,22 @@ func testZonalDelayedBinding(c clientset.Interface, ns string, specifyAllowedTop action := "creating claims with class with waitForFirstConsumer" suffix := "delayed" var topoZone string - class := newStorageClass(test, ns, suffix) + test.Client = c + test.Class = newStorageClass(test, ns, suffix) if specifyAllowedTopology { action += " and allowedTopologies" suffix += "-topo" topoZone = getRandomClusterZone(c) - addSingleZoneAllowedTopologyToStorageClass(c, class, topoZone) + addSingleZoneAllowedTopologyToStorageClass(c, test.Class, topoZone) } By(action) var claims []*v1.PersistentVolumeClaim for i := 0; i < pvcCount; i++ { claim := newClaim(test, ns, suffix) - claim.Spec.StorageClassName = &class.Name + claim.Spec.StorageClassName = &test.Class.Name claims = append(claims, claim) } - pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */) + pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */) if node == nil { framework.Failf("unexpected nil node found") } @@ -439,10 +440,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { By("Testing " + test.Name) suffix := fmt.Sprintf("%d", i) - class := newStorageClass(test, ns, suffix) - claim := newClaim(test, ns, suffix) - claim.Spec.StorageClassName = &class.Name - testsuites.TestDynamicProvisioning(test, c, claim, class) + test.Client = c + test.Class = newStorageClass(test, ns, suffix) + test.Claim = newClaim(test, ns, suffix) + test.Claim.Spec.StorageClassName = &test.Class.Name + test.TestDynamicProvisioning() } // Run the last test with storage.k8s.io/v1beta1 on pvc @@ -454,9 +456,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { Expect(err).NotTo(HaveOccurred()) defer deleteStorageClass(c, class.Name) - claim := newClaim(*betaTest, ns, "beta") - claim.Spec.StorageClassName = &(class.Name) - testsuites.TestDynamicProvisioning(*betaTest, c, claim, nil) + betaTest.Client = c + betaTest.Class = nil + betaTest.Claim = newClaim(*betaTest, ns, "beta") + betaTest.Claim.Spec.StorageClassName = &(class.Name) + (*betaTest).TestDynamicProvisioning() } }) @@ -464,6 +468,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { framework.SkipUnlessProviderIs("gce", "gke") test := testsuites.StorageClassTest{ + Client: c, Name: "HDD PD on GCE/GKE", CloudProviders: []string{"gce", "gke"}, Provisioner: "kubernetes.io/gce-pd", @@ -478,12 +483,12 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) }, } - class := newStorageClass(test, ns, "reclaimpolicy") + test.Class = newStorageClass(test, ns, "reclaimpolicy") retain := v1.PersistentVolumeReclaimRetain - class.ReclaimPolicy = &retain - claim := newClaim(test, ns, "reclaimpolicy") - claim.Spec.StorageClassName = &class.Name - pv := testsuites.TestDynamicProvisioning(test, c, claim, class) + test.Class.ReclaimPolicy = &retain + test.Claim = newClaim(test, ns, "reclaimpolicy") + test.Claim.Spec.StorageClassName = &test.Class.Name + pv := test.TestDynamicProvisioning() By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased)) framework.ExpectNoError(framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second)) @@ -717,17 +722,18 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { By("creating a StorageClass") test := testsuites.StorageClassTest{ + Client: c, Name: "external provisioner test", Provisioner: externalPluginName, ClaimSize: "1500Mi", ExpectedSize: "1500Mi", } - class := newStorageClass(test, ns, "external") - claim := newClaim(test, ns, "external") - claim.Spec.StorageClassName = &(class.Name) + test.Class = newStorageClass(test, ns, "external") + test.Claim = newClaim(test, ns, "external") + test.Claim.Spec.StorageClassName = &test.Class.Name By("creating a claim with a external provisioning annotation") - testsuites.TestDynamicProvisioning(test, c, claim, class) + test.TestDynamicProvisioning() }) }) @@ -737,13 +743,14 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { By("creating a claim with no annotation") test := testsuites.StorageClassTest{ + Client: c, Name: "default", ClaimSize: "2Gi", ExpectedSize: "2Gi", } - claim := newClaim(test, ns, "default") - testsuites.TestDynamicProvisioning(test, c, claim, nil) + test.Claim = newClaim(test, ns, "default") + test.TestDynamicProvisioning() }) // Modifying the default storage class can be disruptive to other tests that depend on it @@ -816,6 +823,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { serverUrl := "http://" + pod.Status.PodIP + ":8081" By("creating a StorageClass") test := testsuites.StorageClassTest{ + Client: c, Name: "Gluster Dynamic provisioner test", Provisioner: "kubernetes.io/glusterfs", ClaimSize: "2Gi", @@ -823,13 +831,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { Parameters: map[string]string{"resturl": serverUrl}, } suffix := fmt.Sprintf("glusterdptest") - class := newStorageClass(test, ns, suffix) + test.Class = newStorageClass(test, ns, suffix) By("creating a claim object with a suffix for gluster dynamic provisioner") - claim := newClaim(test, ns, suffix) - claim.Spec.StorageClassName = &class.Name + test.Claim = newClaim(test, ns, suffix) + test.Claim.Spec.StorageClassName = &test.Class.Name - testsuites.TestDynamicProvisioning(test, c, claim, class) + test.TestDynamicProvisioning() }) }) @@ -928,12 +936,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { } By("creating a claim with class with allowedTopologies set") suffix := "topology" - class := newStorageClass(test, ns, suffix) + test.Client = c + test.Class = newStorageClass(test, ns, suffix) zone := getRandomClusterZone(c) - addSingleZoneAllowedTopologyToStorageClass(c, class, zone) - claim := newClaim(test, ns, suffix) - claim.Spec.StorageClassName = &class.Name - pv := testsuites.TestDynamicProvisioning(test, c, claim, class) + addSingleZoneAllowedTopologyToStorageClass(c, test.Class, zone) + test.Claim = newClaim(test, ns, suffix) + test.Claim.Spec.StorageClassName = &test.Class.Name + pv := test.TestDynamicProvisioning() checkZoneFromLabelAndAffinity(pv, zone, true) } })