diff --git a/test/e2e/framework/volume/BUILD b/test/e2e/framework/volume/BUILD index 171c9d61ed6..eb698bba139 100644 --- a/test/e2e/framework/volume/BUILD +++ b/test/e2e/framework/volume/BUILD @@ -12,12 +12,13 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/util/exec:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/pod:go_default_library", - "//test/e2e/storage/utils:go_default_library", "//test/utils/image:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/test/e2e/framework/volume/fixtures.go b/test/e2e/framework/volume/fixtures.go index 3fe46fd2831..f19c5690d10 100644 --- a/test/e2e/framework/volume/fixtures.go +++ b/test/e2e/framework/volume/fixtures.go @@ -52,10 +52,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" clientset "k8s.io/client-go/kubernetes" + clientexec "k8s.io/client-go/util/exec" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" - "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" + uexec "k8s.io/utils/exec" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -462,7 +463,7 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, fsGroup *int64, fsTy framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName) // Check that it's a real block device - utils.CheckVolumeModeOfPath(f, pod, test.Mode, deviceName) + CheckVolumeModeOfPath(f, pod, test.Mode, deviceName) } else { // Filesystem: check content fileName := fmt.Sprintf("/opt/%d/%s", i, test.File) @@ -472,7 +473,7 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, fsGroup *int64, fsTy // Check that a directory has been mounted dirName := filepath.Dir(fileName) - utils.CheckVolumeModeOfPath(f, pod, test.Mode, dirName) + CheckVolumeModeOfPath(f, pod, test.Mode, dirName) if !framework.NodeOSDistroIs("windows") { // Filesystem: check fsgroup @@ -698,3 +699,71 @@ func GetLinuxLabel() *v1.SELinuxOptions { return &v1.SELinuxOptions{ Level: "s0:c0,c1"} } + +// CheckVolumeModeOfPath check mode of volume +func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) { + if volMode == v1.PersistentVolumeBlock { + // Check if block exists + VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -b %s", path)) + + // Double check that it's not directory + VerifyExecInPodFail(f, pod, fmt.Sprintf("test -d %s", path), 1) + } else { + // Check if directory exists + VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -d %s", path)) + + // Double check that it's not block + VerifyExecInPodFail(f, pod, fmt.Sprintf("test -b %s", path), 1) + } +} + +// PodExec runs f.ExecCommandInContainerWithFullOutput to execute a shell cmd in target pod +// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245 +// is resolved. Otherwise there will be dependency issue. +func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string, error) { + if framework.NodeOSDistroIs("windows") { + return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "powershell", "/c", shExec) + } + return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec) + +} + +// VerifyExecInPodSucceed verifies shell cmd in target pod succeed +// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245 +// is resolved. Otherwise there will be dependency issue. +func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, shExec string) { + stdout, stderr, err := PodExec(f, pod, shExec) + if err != nil { + + if exiterr, ok := err.(uexec.CodeExitError); ok { + exitCode := exiterr.ExitStatus() + framework.ExpectNoError(err, + "%q should succeed, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s", + shExec, exitCode, exiterr, stdout, stderr) + } else { + framework.ExpectNoError(err, + "%q should succeed, but failed with error message %q\nstdout: %s\nstderr: %s", + shExec, err, stdout, stderr) + } + } +} + +// VerifyExecInPodFail verifies shell cmd in target pod fail with certain exit code +// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245 +// is resolved. Otherwise there will be dependency issue. +func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exitCode int) { + stdout, stderr, err := PodExec(f, pod, shExec) + if err != nil { + if exiterr, ok := err.(clientexec.ExitError); ok { + actualExitCode := exiterr.ExitStatus() + framework.ExpectEqual(actualExitCode, exitCode, + "%q should fail with exit code %d, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s", + shExec, exitCode, actualExitCode, exiterr, stdout, stderr) + } else { + framework.ExpectNoError(err, + "%q should fail with exit code %d, but failed with error message %q\nstdout: %s\nstderr: %s", + shExec, exitCode, err, stdout, stderr) + } + } + framework.ExpectError(err, "%q should fail with exit code %d, but exit without error", shExec, exitCode) +} diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index 6123ed8d2d6..4e723de7742 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -89,7 +89,7 @@ go_library( "//test/e2e/framework/testfiles:go_default_library", "//test/e2e/framework/volume:go_default_library", "//test/e2e/storage/drivers:go_default_library", - "//test/e2e/storage/testpatterns:go_default_library", + "//test/e2e/storage/framework:go_default_library", "//test/e2e/storage/testsuites:go_default_library", "//test/e2e/storage/utils:go_default_library", "//test/utils/image:go_default_library", @@ -117,8 +117,8 @@ filegroup( ":package-srcs", "//test/e2e/storage/drivers:all-srcs", "//test/e2e/storage/external:all-srcs", + "//test/e2e/storage/framework:all-srcs", "//test/e2e/storage/podlogs:all-srcs", - "//test/e2e/storage/testpatterns:all-srcs", "//test/e2e/storage/testsuites:all-srcs", "//test/e2e/storage/utils:all-srcs", "//test/e2e/storage/vsphere:all-srcs", diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index e3a40e64e48..320da7b77c0 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -52,7 +52,7 @@ import ( e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/drivers" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -121,13 +121,13 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { type mockDriverSetup struct { cs clientset.Interface - config *testsuites.PerTestConfig + config *storageframework.PerTestConfig testCleanups []func() pods []*v1.Pod pvcs []*v1.PersistentVolumeClaim sc map[string]*storagev1.StorageClass vsc map[string]*unstructured.Unstructured - driver testsuites.TestDriver + driver storageframework.TestDriver provisioner string tp testParameters } @@ -189,7 +189,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { createPod := func(ephemeral bool) (class *storagev1.StorageClass, claim *v1.PersistentVolumeClaim, pod *v1.Pod) { ginkgo.By("Creating pod") var sc *storagev1.StorageClass - if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok { + if dDriver, ok := m.driver.(storageframework.DynamicPVTestDriver); ok { sc = dDriver.GetDynamicProvisionStorageClass(m.config, "") } scTest := testsuites.StorageClassTest{ @@ -238,7 +238,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { ginkgo.By("Creating pod with fsGroup") nodeSelection := m.config.ClientNodeSelection var sc *storagev1.StorageClass - if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok { + if dDriver, ok := m.driver.(storageframework.DynamicPVTestDriver); ok { sc = dDriver.GetDynamicProvisionStorageClass(m.config, "") } scTest := testsuites.StorageClassTest{ @@ -296,7 +296,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { for _, vsc := range m.vsc { ginkgo.By(fmt.Sprintf("Deleting volumesnapshotclass %s", vsc.GetName())) - m.config.Framework.DynamicClient.Resource(testsuites.SnapshotClassGVR).Delete(context.TODO(), vsc.GetName(), metav1.DeleteOptions{}) + m.config.Framework.DynamicClient.Resource(utils.SnapshotClassGVR).Delete(context.TODO(), vsc.GetName(), metav1.DeleteOptions{}) } ginkgo.By("Cleaning up resources") for _, cleanupFunc := range m.testCleanups { @@ -1247,7 +1247,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { enableSnapshot: true, javascriptHooks: scripts, }) - sDriver, ok := m.driver.(testsuites.SnapshottableTestDriver) + sDriver, ok := m.driver.(storageframework.SnapshottableTestDriver) if !ok { e2eskipper.Skipf("mock driver %s does not support snapshots -- skipping", m.driver.GetDriverInfo().Name) @@ -1257,7 +1257,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { defer cleanup() var sc *storagev1.StorageClass - if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok { + if dDriver, ok := m.driver.(storageframework.DynamicPVTestDriver); ok { sc = dDriver.GetDynamicProvisionStorageClass(m.config, "") } ginkgo.By("Creating storage class") @@ -1272,7 +1272,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { ginkgo.By("Creating snapshot") // TODO: Test VolumeSnapshots with Retain policy - snapshotClass, snapshot := testsuites.CreateSnapshot(sDriver, m.config, testpatterns.DynamicSnapshotDelete, claim.Name, claim.Namespace, f.Timeouts) + snapshotClass, snapshot := storageframework.CreateSnapshot(sDriver, m.config, storageframework.DynamicSnapshotDelete, claim.Name, claim.Namespace, f.Timeouts) framework.ExpectNoError(err, "failed to create snapshot") m.vsc[snapshotClass.GetName()] = snapshotClass volumeSnapshotName := snapshot.GetName() @@ -1306,19 +1306,19 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } ginkgo.By(fmt.Sprintf("Get VolumeSnapshotContent bound to VolumeSnapshot %s", snapshot.GetName())) - snapshotContent := testsuites.GetSnapshotContentFromSnapshot(m.config.Framework.DynamicClient, snapshot) + snapshotContent := utils.GetSnapshotContentFromSnapshot(m.config.Framework.DynamicClient, snapshot) volumeSnapshotContentName := snapshotContent.GetName() ginkgo.By(fmt.Sprintf("Verify VolumeSnapshotContent %s contains finalizer %s", snapshot.GetName(), volumeSnapshotContentFinalizer)) - err = utils.WaitForGVRFinalizer(ctx, m.config.Framework.DynamicClient, testsuites.SnapshotContentGVR, volumeSnapshotContentName, "", volumeSnapshotContentFinalizer, 1*time.Millisecond, 1*time.Minute) + err = utils.WaitForGVRFinalizer(ctx, m.config.Framework.DynamicClient, utils.SnapshotContentGVR, volumeSnapshotContentName, "", volumeSnapshotContentFinalizer, 1*time.Millisecond, 1*time.Minute) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Delete VolumeSnapshotContent %s", snapshotContent.GetName())) - err = m.config.Framework.DynamicClient.Resource(testsuites.SnapshotContentGVR).Delete(ctx, snapshotContent.GetName(), metav1.DeleteOptions{}) + err = m.config.Framework.DynamicClient.Resource(utils.SnapshotContentGVR).Delete(ctx, snapshotContent.GetName(), metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete snapshotcontent: %v", err) ginkgo.By("Get VolumeSnapshotContent from API server and verify deletion timestamp is set") - snapshotContent, err = m.config.Framework.DynamicClient.Resource(testsuites.SnapshotContentGVR).Get(context.TODO(), snapshotContent.GetName(), metav1.GetOptions{}) + snapshotContent, err = m.config.Framework.DynamicClient.Resource(utils.SnapshotContentGVR).Get(context.TODO(), snapshotContent.GetName(), metav1.GetOptions{}) framework.ExpectNoError(err) if snapshotContent.GetDeletionTimestamp() == nil { @@ -1332,15 +1332,15 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } ginkgo.By(fmt.Sprintf("Verify VolumeSnapshot %s contains finalizer %s", snapshot.GetName(), volumeSnapshotBoundFinalizer)) - err = utils.WaitForGVRFinalizer(ctx, m.config.Framework.DynamicClient, testsuites.SnapshotGVR, volumeSnapshotName, f.Namespace.Name, volumeSnapshotBoundFinalizer, 1*time.Millisecond, 1*time.Minute) + err = utils.WaitForGVRFinalizer(ctx, m.config.Framework.DynamicClient, utils.SnapshotGVR, volumeSnapshotName, f.Namespace.Name, volumeSnapshotBoundFinalizer, 1*time.Millisecond, 1*time.Minute) framework.ExpectNoError(err) ginkgo.By("Delete VolumeSnapshot") - err = testsuites.DeleteAndWaitSnapshot(m.config.Framework.DynamicClient, f.Namespace.Name, volumeSnapshotName, framework.Poll, framework.SnapshotDeleteTimeout) + err = utils.DeleteAndWaitSnapshot(m.config.Framework.DynamicClient, f.Namespace.Name, volumeSnapshotName, framework.Poll, framework.SnapshotDeleteTimeout) framework.ExpectNoError(err, fmt.Sprintf("failed to delete VolumeSnapshot %s", volumeSnapshotName)) ginkgo.By(fmt.Sprintf("Wait for VolumeSnapshotContent %s to be deleted", volumeSnapshotContentName)) - err = utils.WaitForGVRDeletion(m.config.Framework.DynamicClient, testsuites.SnapshotContentGVR, volumeSnapshotContentName, framework.Poll, framework.SnapshotDeleteTimeout) + err = utils.WaitForGVRDeletion(m.config.Framework.DynamicClient, utils.SnapshotContentGVR, volumeSnapshotContentName, framework.Poll, framework.SnapshotDeleteTimeout) framework.ExpectNoError(err, fmt.Sprintf("failed to delete VolumeSnapshotContent %s", volumeSnapshotContentName)) }) } @@ -1462,19 +1462,19 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { // Create the subdirectory to ensure that fsGroup propagates createDirectory := fmt.Sprintf("mkdir %s", dirName) - _, _, err = utils.PodExec(f, pod, createDirectory) + _, _, err = e2evolume.PodExec(f, pod, createDirectory) framework.ExpectNoError(err, "failed: creating the directory: %s", err) // Inject the contents onto the mount createFile := fmt.Sprintf("echo '%s' > '%s'; sync", "filecontents", fileName) - _, _, err = utils.PodExec(f, pod, createFile) + _, _, err = e2evolume.PodExec(f, pod, createFile) framework.ExpectNoError(err, "failed: writing the contents: %s", err) // Delete the created file. This step is mandatory, as the mock driver // won't clean up the contents automatically. defer func() { delete := fmt.Sprintf("rm -fr %s", dirName) - _, _, err = utils.PodExec(f, pod, delete) + _, _, err = e2evolume.PodExec(f, pod, delete) framework.ExpectNoError(err, "failed: deleting the directory: %s", err) }() diff --git a/test/e2e/storage/csi_volumes.go b/test/e2e/storage/csi_volumes.go index b2030b4069f..33cd4f897aa 100644 --- a/test/e2e/storage/csi_volumes.go +++ b/test/e2e/storage/csi_volumes.go @@ -18,6 +18,7 @@ package storage import ( "k8s.io/kubernetes/test/e2e/storage/drivers" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -25,7 +26,7 @@ import ( ) // List of testDrivers to be executed in below loop -var csiTestDrivers = []func() testsuites.TestDriver{ +var csiTestDrivers = []func() storageframework.TestDriver{ drivers.InitHostPathCSIDriver, drivers.InitGcePDCSIDriver, // Don't run tests with mock driver (drivers.InitMockCSIDriver), it does not provide persistent storage. @@ -36,8 +37,8 @@ var _ = utils.SIGDescribe("CSI Volumes", func() { for _, initDriver := range csiTestDrivers { curDriver := initDriver() - ginkgo.Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() { - testsuites.DefineTestSuite(curDriver, testsuites.CSISuites) + ginkgo.Context(storageframework.GetDriverNameWithFeatureTags(curDriver), func() { + storageframework.DefineTestSuites(curDriver, testsuites.CSISuites) }) } }) diff --git a/test/e2e/storage/drivers/BUILD b/test/e2e/storage/drivers/BUILD index 56a87c5d490..5ea5a55dd69 100644 --- a/test/e2e/storage/drivers/BUILD +++ b/test/e2e/storage/drivers/BUILD @@ -29,8 +29,7 @@ go_library( "//test/e2e/framework/pv:go_default_library", "//test/e2e/framework/skipper:go_default_library", "//test/e2e/framework/volume:go_default_library", - "//test/e2e/storage/testpatterns:go_default_library", - "//test/e2e/storage/testsuites:go_default_library", + "//test/e2e/storage/framework:go_default_library", "//test/e2e/storage/utils:go_default_library", "//test/e2e/storage/vsphere:go_default_library", "//test/utils/image:go_default_library", diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index 03ec35e557c..901ccc33657 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -57,8 +57,7 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" - "k8s.io/kubernetes/test/e2e/storage/testsuites" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -71,18 +70,18 @@ const ( // hostpathCSI type hostpathCSIDriver struct { - driverInfo testsuites.DriverInfo + driverInfo storageframework.DriverInfo manifests []string cleanupHandle framework.CleanupActionHandle volumeAttributes []map[string]string } -func initHostPathCSIDriver(name string, capabilities map[testsuites.Capability]bool, volumeAttributes []map[string]string, manifests ...string) testsuites.TestDriver { +func initHostPathCSIDriver(name string, capabilities map[storageframework.Capability]bool, volumeAttributes []map[string]string, manifests ...string) storageframework.TestDriver { return &hostpathCSIDriver{ - driverInfo: testsuites.DriverInfo{ + driverInfo: storageframework.DriverInfo{ Name: name, FeatureTag: "", - MaxFileSize: testpatterns.FileSizeMedium, + MaxFileSize: storageframework.FileSizeMedium, SupportedFsType: sets.NewString( "", // Default fsType ), @@ -90,11 +89,11 @@ func initHostPathCSIDriver(name string, capabilities map[testsuites.Capability]b Min: "1Mi", }, Capabilities: capabilities, - StressTestOptions: &testsuites.StressTestOptions{ + StressTestOptions: &storageframework.StressTestOptions{ NumPods: 10, NumRestarts: 10, }, - VolumeSnapshotStressTestOptions: &testsuites.VolumeSnapshotStressTestOptions{ + VolumeSnapshotStressTestOptions: &storageframework.VolumeSnapshotStressTestOptions{ NumPods: 10, NumSnapshots: 10, }, @@ -104,22 +103,22 @@ func initHostPathCSIDriver(name string, capabilities map[testsuites.Capability]b } } -var _ testsuites.TestDriver = &hostpathCSIDriver{} -var _ testsuites.DynamicPVTestDriver = &hostpathCSIDriver{} -var _ testsuites.SnapshottableTestDriver = &hostpathCSIDriver{} -var _ testsuites.EphemeralTestDriver = &hostpathCSIDriver{} +var _ storageframework.TestDriver = &hostpathCSIDriver{} +var _ storageframework.DynamicPVTestDriver = &hostpathCSIDriver{} +var _ storageframework.SnapshottableTestDriver = &hostpathCSIDriver{} +var _ storageframework.EphemeralTestDriver = &hostpathCSIDriver{} // InitHostPathCSIDriver returns hostpathCSIDriver that implements TestDriver interface -func InitHostPathCSIDriver() testsuites.TestDriver { - capabilities := map[testsuites.Capability]bool{ - testsuites.CapPersistence: true, - testsuites.CapSnapshotDataSource: true, - testsuites.CapMultiPODs: true, - testsuites.CapBlock: true, - testsuites.CapPVCDataSource: true, - testsuites.CapControllerExpansion: true, - testsuites.CapSingleNodeVolume: true, - testsuites.CapVolumeLimits: true, +func InitHostPathCSIDriver() storageframework.TestDriver { + capabilities := map[storageframework.Capability]bool{ + storageframework.CapPersistence: true, + storageframework.CapSnapshotDataSource: true, + storageframework.CapMultiPODs: true, + storageframework.CapBlock: true, + storageframework.CapPVCDataSource: true, + storageframework.CapControllerExpansion: true, + storageframework.CapSingleNodeVolume: true, + storageframework.CapVolumeLimits: true, } return initHostPathCSIDriver("csi-hostpath", capabilities, @@ -141,56 +140,56 @@ func InitHostPathCSIDriver() testsuites.TestDriver { ) } -func (h *hostpathCSIDriver) GetDriverInfo() *testsuites.DriverInfo { +func (h *hostpathCSIDriver) GetDriverInfo() *storageframework.DriverInfo { return &h.driverInfo } -func (h *hostpathCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { - if pattern.VolType == testpatterns.CSIInlineVolume && len(h.volumeAttributes) == 0 { +func (h *hostpathCSIDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { + if pattern.VolType == storageframework.CSIInlineVolume && len(h.volumeAttributes) == 0 { e2eskipper.Skipf("%s has no volume attributes defined, doesn't support ephemeral inline volumes", h.driverInfo.Name) } } -func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { +func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := config.GetUniqueDriverName() parameters := map[string]string{} ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-sc", provisioner) - return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) + return storageframework.GetStorageClass(provisioner, parameters, nil, ns, suffix) } -func (h *hostpathCSIDriver) GetVolume(config *testsuites.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) { +func (h *hostpathCSIDriver) GetVolume(config *storageframework.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) { return h.volumeAttributes[volumeNumber%len(h.volumeAttributes)], false /* not shared */, false /* read-write */ } -func (h *hostpathCSIDriver) GetCSIDriverName(config *testsuites.PerTestConfig) string { +func (h *hostpathCSIDriver) GetCSIDriverName(config *storageframework.PerTestConfig) string { return config.GetUniqueDriverName() } -func (h *hostpathCSIDriver) GetSnapshotClass(config *testsuites.PerTestConfig) *unstructured.Unstructured { +func (h *hostpathCSIDriver) GetSnapshotClass(config *storageframework.PerTestConfig) *unstructured.Unstructured { snapshotter := config.GetUniqueDriverName() parameters := map[string]string{} ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-vsc", snapshotter) - return testsuites.GetSnapshotClass(snapshotter, parameters, ns, suffix) + return utils.GenerateSnapshotClassSpec(snapshotter, parameters, ns, suffix) } -func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { +func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { // Create secondary namespace which will be used for creating driver driverNamespace := utils.CreateDriverNamespace(f) ns2 := driverNamespace.Name ns1 := f.Namespace.Name ginkgo.By(fmt.Sprintf("deploying %s driver", h.driverInfo.Name)) - cancelLogging := testsuites.StartPodLogs(f, driverNamespace) + cancelLogging := utils.StartPodLogs(f, driverNamespace) cs := f.ClientSet // The hostpath CSI driver only works when everything runs on the same node. node, err := e2enode.GetRandomReadySchedulableNode(cs) framework.ExpectNoError(err) - config := &testsuites.PerTestConfig{ + config := &storageframework.PerTestConfig{ Driver: h, Prefix: "hostpath", Framework: f, @@ -243,7 +242,7 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.Per // mockCSI type mockCSIDriver struct { - driverInfo testsuites.DriverInfo + driverInfo storageframework.DriverInfo manifests []string podInfo *bool storageCapacity *bool @@ -275,12 +274,12 @@ type CSIMockDriverOpts struct { FSGroupPolicy *storagev1.FSGroupPolicy } -var _ testsuites.TestDriver = &mockCSIDriver{} -var _ testsuites.DynamicPVTestDriver = &mockCSIDriver{} -var _ testsuites.SnapshottableTestDriver = &mockCSIDriver{} +var _ storageframework.TestDriver = &mockCSIDriver{} +var _ storageframework.DynamicPVTestDriver = &mockCSIDriver{} +var _ storageframework.SnapshottableTestDriver = &mockCSIDriver{} // InitMockCSIDriver returns a mockCSIDriver that implements TestDriver interface -func InitMockCSIDriver(driverOpts CSIMockDriverOpts) testsuites.TestDriver { +func InitMockCSIDriver(driverOpts CSIMockDriverOpts) storageframework.TestDriver { driverManifests := []string{ "test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml", "test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml", @@ -308,18 +307,18 @@ func InitMockCSIDriver(driverOpts CSIMockDriverOpts) testsuites.TestDriver { } return &mockCSIDriver{ - driverInfo: testsuites.DriverInfo{ + driverInfo: storageframework.DriverInfo{ Name: "csi-mock", FeatureTag: "", - MaxFileSize: testpatterns.FileSizeMedium, + MaxFileSize: storageframework.FileSizeMedium, SupportedFsType: sets.NewString( "", // Default fsType ), - Capabilities: map[testsuites.Capability]bool{ - testsuites.CapPersistence: false, - testsuites.CapFsGroup: false, - testsuites.CapExec: false, - testsuites.CapVolumeLimits: true, + Capabilities: map[storageframework.Capability]bool{ + storageframework.CapPersistence: false, + storageframework.CapFsGroup: false, + storageframework.CapExec: false, + storageframework.CapVolumeLimits: true, }, }, manifests: driverManifests, @@ -336,45 +335,45 @@ func InitMockCSIDriver(driverOpts CSIMockDriverOpts) testsuites.TestDriver { } } -func (m *mockCSIDriver) GetDriverInfo() *testsuites.DriverInfo { +func (m *mockCSIDriver) GetDriverInfo() *storageframework.DriverInfo { return &m.driverInfo } -func (m *mockCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +func (m *mockCSIDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { } -func (m *mockCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { +func (m *mockCSIDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := config.GetUniqueDriverName() parameters := map[string]string{} ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-sc", provisioner) - return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) + return storageframework.GetStorageClass(provisioner, parameters, nil, ns, suffix) } -func (m *mockCSIDriver) GetSnapshotClass(config *testsuites.PerTestConfig) *unstructured.Unstructured { +func (m *mockCSIDriver) GetSnapshotClass(config *storageframework.PerTestConfig) *unstructured.Unstructured { parameters := map[string]string{} snapshotter := m.driverInfo.Name + "-" + config.Framework.UniqueName ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-vsc", snapshotter) - return testsuites.GetSnapshotClass(snapshotter, parameters, ns, suffix) + return utils.GenerateSnapshotClassSpec(snapshotter, parameters, ns, suffix) } -func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { +func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { // Create secondary namespace which will be used for creating driver driverNamespace := utils.CreateDriverNamespace(f) ns2 := driverNamespace.Name ns1 := f.Namespace.Name ginkgo.By("deploying csi mock driver") - cancelLogging := testsuites.StartPodLogs(f, driverNamespace) + cancelLogging := utils.StartPodLogs(f, driverNamespace) cs := f.ClientSet // pods should be scheduled on the node node, err := e2enode.GetRandomReadySchedulableNode(cs) framework.ExpectNoError(err) - config := &testsuites.PerTestConfig{ + config := &storageframework.PerTestConfig{ Driver: m, Prefix: "mock", Framework: f, @@ -481,21 +480,21 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest // gce-pd type gcePDCSIDriver struct { - driverInfo testsuites.DriverInfo + driverInfo storageframework.DriverInfo cleanupHandle framework.CleanupActionHandle } -var _ testsuites.TestDriver = &gcePDCSIDriver{} -var _ testsuites.DynamicPVTestDriver = &gcePDCSIDriver{} -var _ testsuites.SnapshottableTestDriver = &gcePDCSIDriver{} +var _ storageframework.TestDriver = &gcePDCSIDriver{} +var _ storageframework.DynamicPVTestDriver = &gcePDCSIDriver{} +var _ storageframework.SnapshottableTestDriver = &gcePDCSIDriver{} // InitGcePDCSIDriver returns gcePDCSIDriver that implements TestDriver interface -func InitGcePDCSIDriver() testsuites.TestDriver { +func InitGcePDCSIDriver() storageframework.TestDriver { return &gcePDCSIDriver{ - driverInfo: testsuites.DriverInfo{ + driverInfo: storageframework.DriverInfo{ Name: GCEPDCSIDriverName, FeatureTag: "[Serial]", - MaxFileSize: testpatterns.FileSizeMedium, + MaxFileSize: storageframework.FileSizeMedium, SupportedSizeRange: e2evolume.SizeRange{ Min: "5Gi", }, @@ -507,27 +506,27 @@ func InitGcePDCSIDriver() testsuites.TestDriver { "xfs", ), SupportedMountOption: sets.NewString("debug", "nouid32"), - Capabilities: map[testsuites.Capability]bool{ - testsuites.CapPersistence: true, - testsuites.CapBlock: true, - testsuites.CapFsGroup: true, - testsuites.CapExec: true, - testsuites.CapMultiPODs: true, + Capabilities: map[storageframework.Capability]bool{ + storageframework.CapPersistence: true, + storageframework.CapBlock: true, + storageframework.CapFsGroup: true, + storageframework.CapExec: true, + storageframework.CapMultiPODs: true, // GCE supports volume limits, but the test creates large // number of volumes and times out test suites. - testsuites.CapVolumeLimits: false, - testsuites.CapTopology: true, - testsuites.CapControllerExpansion: true, - testsuites.CapNodeExpansion: true, - testsuites.CapSnapshotDataSource: true, + storageframework.CapVolumeLimits: false, + storageframework.CapTopology: true, + storageframework.CapControllerExpansion: true, + storageframework.CapNodeExpansion: true, + storageframework.CapSnapshotDataSource: true, }, RequiredAccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, TopologyKeys: []string{GCEPDCSIZoneTopologyKey}, - StressTestOptions: &testsuites.StressTestOptions{ + StressTestOptions: &storageframework.StressTestOptions{ NumPods: 10, NumRestarts: 10, }, - VolumeSnapshotStressTestOptions: &testsuites.VolumeSnapshotStressTestOptions{ + VolumeSnapshotStressTestOptions: &storageframework.VolumeSnapshotStressTestOptions{ // GCE only allows for one snapshot per volume to be created at a time, // which can cause test timeouts. We reduce the likelihood of test timeouts // by increasing the number of pods (and volumes) and reducing the number @@ -539,11 +538,11 @@ func InitGcePDCSIDriver() testsuites.TestDriver { } } -func (g *gcePDCSIDriver) GetDriverInfo() *testsuites.DriverInfo { +func (g *gcePDCSIDriver) GetDriverInfo() *storageframework.DriverInfo { return &g.driverInfo } -func (g *gcePDCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +func (g *gcePDCSIDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { e2eskipper.SkipUnlessProviderIs("gce", "gke") if pattern.FsType == "xfs" { e2eskipper.SkipUnlessNodeOSDistroIs("ubuntu", "custom") @@ -553,7 +552,7 @@ func (g *gcePDCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } } -func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { +func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { ns := config.Framework.Namespace.Name provisioner := g.driverInfo.Name suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name) @@ -564,26 +563,26 @@ func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerT } delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer - return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix) + return storageframework.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix) } -func (g *gcePDCSIDriver) GetSnapshotClass(config *testsuites.PerTestConfig) *unstructured.Unstructured { +func (g *gcePDCSIDriver) GetSnapshotClass(config *storageframework.PerTestConfig) *unstructured.Unstructured { parameters := map[string]string{} snapshotter := g.driverInfo.Name ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-vsc", snapshotter) - return testsuites.GetSnapshotClass(snapshotter, parameters, ns, suffix) + return utils.GenerateSnapshotClassSpec(snapshotter, parameters, ns, suffix) } -func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { +func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { ginkgo.By("deploying csi gce-pd driver") // Create secondary namespace which will be used for creating driver driverNamespace := utils.CreateDriverNamespace(f) ns2 := driverNamespace.Name ns1 := f.Namespace.Name - cancelLogging := testsuites.StartPodLogs(f, driverNamespace) + cancelLogging := utils.StartPodLogs(f, driverNamespace) // It would be safer to rename the gcePD driver, but that // hasn't been done before either and attempts to do so now led to // errors during driver registration, therefore it is disabled @@ -592,7 +591,7 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes // These are the options which would have to be used: // o := utils.PatchCSIOptions{ // OldDriverName: g.driverInfo.Name, - // NewDriverName: testsuites.GetUniqueDriverName(g), + // NewDriverName: storageframework.GetUniqueDriverName(g), // DriverContainerName: "gce-driver", // ProvisionerContainerName: "csi-external-provisioner", // } @@ -638,7 +637,7 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes } g.cleanupHandle = framework.AddCleanupAction(cleanupFunc) - return &testsuites.PerTestConfig{ + return &storageframework.PerTestConfig{ Driver: g, Prefix: "gcepd", Framework: f, diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index d44d970b84d..0ea765e52fe 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -60,8 +60,7 @@ import ( e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" - "k8s.io/kubernetes/test/e2e/storage/testsuites" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" "k8s.io/kubernetes/test/e2e/storage/utils" vspheretest "k8s.io/kubernetes/test/e2e/storage/vsphere" imageutils "k8s.io/kubernetes/test/utils/image" @@ -77,7 +76,7 @@ type nfsDriver struct { externalProvisionerPod *v1.Pod externalPluginName string - driverInfo testsuites.DriverInfo + driverInfo storageframework.DriverInfo } type nfsVolume struct { @@ -86,19 +85,19 @@ type nfsVolume struct { f *framework.Framework } -var _ testsuites.TestDriver = &nfsDriver{} -var _ testsuites.PreprovisionedVolumeTestDriver = &nfsDriver{} -var _ testsuites.InlineVolumeTestDriver = &nfsDriver{} -var _ testsuites.PreprovisionedPVTestDriver = &nfsDriver{} -var _ testsuites.DynamicPVTestDriver = &nfsDriver{} +var _ storageframework.TestDriver = &nfsDriver{} +var _ storageframework.PreprovisionedVolumeTestDriver = &nfsDriver{} +var _ storageframework.InlineVolumeTestDriver = &nfsDriver{} +var _ storageframework.PreprovisionedPVTestDriver = &nfsDriver{} +var _ storageframework.DynamicPVTestDriver = &nfsDriver{} // InitNFSDriver returns nfsDriver that implements TestDriver interface -func InitNFSDriver() testsuites.TestDriver { +func InitNFSDriver() storageframework.TestDriver { return &nfsDriver{ - driverInfo: testsuites.DriverInfo{ + driverInfo: storageframework.DriverInfo{ Name: "nfs", InTreePluginName: "kubernetes.io/nfs", - MaxFileSize: testpatterns.FileSizeLarge, + MaxFileSize: storageframework.FileSizeLarge, SupportedSizeRange: e2evolume.SizeRange{ Min: "1Gi", }, @@ -107,24 +106,24 @@ func InitNFSDriver() testsuites.TestDriver { ), SupportedMountOption: sets.NewString("proto=tcp", "relatime"), RequiredMountOption: sets.NewString("vers=4.1"), - Capabilities: map[testsuites.Capability]bool{ - testsuites.CapPersistence: true, - testsuites.CapExec: true, - testsuites.CapRWX: true, - testsuites.CapMultiPODs: true, + Capabilities: map[storageframework.Capability]bool{ + storageframework.CapPersistence: true, + storageframework.CapExec: true, + storageframework.CapRWX: true, + storageframework.CapMultiPODs: true, }, }, } } -func (n *nfsDriver) GetDriverInfo() *testsuites.DriverInfo { +func (n *nfsDriver) GetDriverInfo() *storageframework.DriverInfo { return &n.driverInfo } -func (n *nfsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +func (n *nfsDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { } -func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { +func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { nv, ok := e2evolume.(*nfsVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to NFS test volume") return &v1.VolumeSource{ @@ -136,7 +135,7 @@ func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume test } } -func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { +func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { nv, ok := e2evolume.(*nfsVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to NFS test volume") return &v1.PersistentVolumeSource{ @@ -148,16 +147,16 @@ func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2ev }, nil } -func (n *nfsDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { +func (n *nfsDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := n.externalPluginName parameters := map[string]string{"mountOptions": "vers=4.1"} ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-sc", n.driverInfo.Name) - return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) + return storageframework.GetStorageClass(provisioner, parameters, nil, ns, suffix) } -func (n *nfsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { +func (n *nfsDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { cs := f.ClientSet ns := f.Namespace n.externalPluginName = fmt.Sprintf("example.com/nfs-%s", ns.Name) @@ -176,7 +175,7 @@ func (n *nfsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConf ginkgo.By("creating an external dynamic provisioner pod") n.externalProvisionerPod = utils.StartExternalProvisioner(cs, ns.Name, n.externalPluginName) - return &testsuites.PerTestConfig{ + return &storageframework.PerTestConfig{ Driver: n, Prefix: "nfs", Framework: f, @@ -187,7 +186,7 @@ func (n *nfsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConf } } -func (n *nfsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { +func (n *nfsDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { f := config.Framework cs := f.ClientSet ns := f.Namespace @@ -196,9 +195,9 @@ func (n *nfsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testp // and startExternalProvisioner creates a pod for DynamicPV. // Therefore, we need a different PrepareTest logic for volType. switch volType { - case testpatterns.InlineVolume: + case storageframework.InlineVolume: fallthrough - case testpatterns.PreprovisionedPV: + case storageframework.PreprovisionedPV: c, serverPod, serverHost := e2evolume.NewNFSServer(cs, ns.Name, []string{}) config.ServerConfig = &c return &nfsVolume{ @@ -206,7 +205,7 @@ func (n *nfsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testp serverPod: serverPod, f: f, } - case testpatterns.DynamicPV: + case storageframework.DynamicPV: // Do nothing default: framework.Failf("Unsupported volType:%v is specified", volType) @@ -220,7 +219,7 @@ func (v *nfsVolume) DeleteVolume() { // Gluster type glusterFSDriver struct { - driverInfo testsuites.DriverInfo + driverInfo storageframework.DriverInfo } type glusterVolume struct { @@ -229,43 +228,43 @@ type glusterVolume struct { f *framework.Framework } -var _ testsuites.TestDriver = &glusterFSDriver{} -var _ testsuites.PreprovisionedVolumeTestDriver = &glusterFSDriver{} -var _ testsuites.InlineVolumeTestDriver = &glusterFSDriver{} -var _ testsuites.PreprovisionedPVTestDriver = &glusterFSDriver{} +var _ storageframework.TestDriver = &glusterFSDriver{} +var _ storageframework.PreprovisionedVolumeTestDriver = &glusterFSDriver{} +var _ storageframework.InlineVolumeTestDriver = &glusterFSDriver{} +var _ storageframework.PreprovisionedPVTestDriver = &glusterFSDriver{} // InitGlusterFSDriver returns glusterFSDriver that implements TestDriver interface -func InitGlusterFSDriver() testsuites.TestDriver { +func InitGlusterFSDriver() storageframework.TestDriver { return &glusterFSDriver{ - driverInfo: testsuites.DriverInfo{ + driverInfo: storageframework.DriverInfo{ Name: "gluster", InTreePluginName: "kubernetes.io/glusterfs", - MaxFileSize: testpatterns.FileSizeMedium, + MaxFileSize: storageframework.FileSizeMedium, SupportedSizeRange: e2evolume.SizeRange{ Min: "1Gi", }, SupportedFsType: sets.NewString( "", // Default fsType ), - Capabilities: map[testsuites.Capability]bool{ - testsuites.CapPersistence: true, - testsuites.CapExec: true, - testsuites.CapRWX: true, - testsuites.CapMultiPODs: true, + Capabilities: map[storageframework.Capability]bool{ + storageframework.CapPersistence: true, + storageframework.CapExec: true, + storageframework.CapRWX: true, + storageframework.CapMultiPODs: true, }, }, } } -func (g *glusterFSDriver) GetDriverInfo() *testsuites.DriverInfo { +func (g *glusterFSDriver) GetDriverInfo() *storageframework.DriverInfo { return &g.driverInfo } -func (g *glusterFSDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +func (g *glusterFSDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { e2eskipper.SkipUnlessNodeOSDistroIs("gci", "ubuntu", "custom") } -func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { +func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { gv, ok := e2evolume.(*glusterVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to Gluster test volume") @@ -280,7 +279,7 @@ func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolum } } -func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { +func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { gv, ok := e2evolume.(*glusterVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to Gluster test volume") @@ -295,15 +294,15 @@ func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string }, nil } -func (g *glusterFSDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { - return &testsuites.PerTestConfig{ +func (g *glusterFSDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { + return &storageframework.PerTestConfig{ Driver: g, Prefix: "gluster", Framework: f, }, func() {} } -func (g *glusterFSDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { +func (g *glusterFSDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { f := config.Framework cs := f.ClientSet ns := f.Namespace @@ -354,7 +353,7 @@ func (v *glusterVolume) DeleteVolume() { // iSCSI // The iscsiadm utility and iscsi target kernel modules must be installed on all nodes. type iSCSIDriver struct { - driverInfo testsuites.DriverInfo + driverInfo storageframework.DriverInfo } type iSCSIVolume struct { serverPod *v1.Pod @@ -363,44 +362,44 @@ type iSCSIVolume struct { iqn string } -var _ testsuites.TestDriver = &iSCSIDriver{} -var _ testsuites.PreprovisionedVolumeTestDriver = &iSCSIDriver{} -var _ testsuites.InlineVolumeTestDriver = &iSCSIDriver{} -var _ testsuites.PreprovisionedPVTestDriver = &iSCSIDriver{} +var _ storageframework.TestDriver = &iSCSIDriver{} +var _ storageframework.PreprovisionedVolumeTestDriver = &iSCSIDriver{} +var _ storageframework.InlineVolumeTestDriver = &iSCSIDriver{} +var _ storageframework.PreprovisionedPVTestDriver = &iSCSIDriver{} // InitISCSIDriver returns iSCSIDriver that implements TestDriver interface -func InitISCSIDriver() testsuites.TestDriver { +func InitISCSIDriver() storageframework.TestDriver { return &iSCSIDriver{ - driverInfo: testsuites.DriverInfo{ + driverInfo: storageframework.DriverInfo{ Name: "iscsi", InTreePluginName: "kubernetes.io/iscsi", FeatureTag: "[Feature:Volumes]", - MaxFileSize: testpatterns.FileSizeMedium, + MaxFileSize: storageframework.FileSizeMedium, SupportedFsType: sets.NewString( "", // Default fsType "ext4", ), TopologyKeys: []string{v1.LabelHostname}, - Capabilities: map[testsuites.Capability]bool{ - testsuites.CapPersistence: true, - testsuites.CapFsGroup: true, - testsuites.CapBlock: true, - testsuites.CapExec: true, - testsuites.CapMultiPODs: true, - testsuites.CapTopology: true, + Capabilities: map[storageframework.Capability]bool{ + storageframework.CapPersistence: true, + storageframework.CapFsGroup: true, + storageframework.CapBlock: true, + storageframework.CapExec: true, + storageframework.CapMultiPODs: true, + storageframework.CapTopology: true, }, }, } } -func (i *iSCSIDriver) GetDriverInfo() *testsuites.DriverInfo { +func (i *iSCSIDriver) GetDriverInfo() *storageframework.DriverInfo { return &i.driverInfo } -func (i *iSCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +func (i *iSCSIDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { } -func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { +func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { iv, ok := e2evolume.(*iSCSIVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to iSCSI test volume") @@ -418,7 +417,7 @@ func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume te return &volSource } -func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { +func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { iv, ok := e2evolume.(*iSCSIVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to iSCSI test volume") @@ -436,15 +435,15 @@ func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2 return &pvSource, nil } -func (i *iSCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { - return &testsuites.PerTestConfig{ +func (i *iSCSIDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { + return &storageframework.PerTestConfig{ Driver: i, Prefix: "iscsi", Framework: f, }, func() {} } -func (i *iSCSIDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { +func (i *iSCSIDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { f := config.Framework cs := f.ClientSet ns := f.Namespace @@ -529,7 +528,7 @@ func (v *iSCSIVolume) DeleteVolume() { // Ceph RBD type rbdDriver struct { - driverInfo testsuites.DriverInfo + driverInfo storageframework.DriverInfo } type rbdVolume struct { @@ -539,19 +538,19 @@ type rbdVolume struct { f *framework.Framework } -var _ testsuites.TestDriver = &rbdDriver{} -var _ testsuites.PreprovisionedVolumeTestDriver = &rbdDriver{} -var _ testsuites.InlineVolumeTestDriver = &rbdDriver{} -var _ testsuites.PreprovisionedPVTestDriver = &rbdDriver{} +var _ storageframework.TestDriver = &rbdDriver{} +var _ storageframework.PreprovisionedVolumeTestDriver = &rbdDriver{} +var _ storageframework.InlineVolumeTestDriver = &rbdDriver{} +var _ storageframework.PreprovisionedPVTestDriver = &rbdDriver{} // InitRbdDriver returns rbdDriver that implements TestDriver interface -func InitRbdDriver() testsuites.TestDriver { +func InitRbdDriver() storageframework.TestDriver { return &rbdDriver{ - driverInfo: testsuites.DriverInfo{ + driverInfo: storageframework.DriverInfo{ Name: "rbd", InTreePluginName: "kubernetes.io/rbd", FeatureTag: "[Feature:Volumes][Serial]", - MaxFileSize: testpatterns.FileSizeMedium, + MaxFileSize: storageframework.FileSizeMedium, SupportedSizeRange: e2evolume.SizeRange{ Min: "1Gi", }, @@ -559,25 +558,25 @@ func InitRbdDriver() testsuites.TestDriver { "", // Default fsType "ext4", ), - Capabilities: map[testsuites.Capability]bool{ - testsuites.CapPersistence: true, - testsuites.CapFsGroup: true, - testsuites.CapBlock: true, - testsuites.CapExec: true, - testsuites.CapMultiPODs: true, + Capabilities: map[storageframework.Capability]bool{ + storageframework.CapPersistence: true, + storageframework.CapFsGroup: true, + storageframework.CapBlock: true, + storageframework.CapExec: true, + storageframework.CapMultiPODs: true, }, }, } } -func (r *rbdDriver) GetDriverInfo() *testsuites.DriverInfo { +func (r *rbdDriver) GetDriverInfo() *storageframework.DriverInfo { return &r.driverInfo } -func (r *rbdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +func (r *rbdDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { } -func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { +func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { rv, ok := e2evolume.(*rbdVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to RBD test volume") @@ -599,7 +598,7 @@ func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume test return &volSource } -func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { +func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { rv, ok := e2evolume.(*rbdVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to RBD test volume") @@ -625,15 +624,15 @@ func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2ev return &pvSource, nil } -func (r *rbdDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { - return &testsuites.PerTestConfig{ +func (r *rbdDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { + return &storageframework.PerTestConfig{ Driver: r, Prefix: "rbd", Framework: f, }, func() {} } -func (r *rbdDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { +func (r *rbdDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { f := config.Framework cs := f.ClientSet ns := f.Namespace @@ -654,7 +653,7 @@ func (v *rbdVolume) DeleteVolume() { // Ceph type cephFSDriver struct { - driverInfo testsuites.DriverInfo + driverInfo storageframework.DriverInfo } type cephVolume struct { @@ -664,43 +663,43 @@ type cephVolume struct { f *framework.Framework } -var _ testsuites.TestDriver = &cephFSDriver{} -var _ testsuites.PreprovisionedVolumeTestDriver = &cephFSDriver{} -var _ testsuites.InlineVolumeTestDriver = &cephFSDriver{} -var _ testsuites.PreprovisionedPVTestDriver = &cephFSDriver{} +var _ storageframework.TestDriver = &cephFSDriver{} +var _ storageframework.PreprovisionedVolumeTestDriver = &cephFSDriver{} +var _ storageframework.InlineVolumeTestDriver = &cephFSDriver{} +var _ storageframework.PreprovisionedPVTestDriver = &cephFSDriver{} // InitCephFSDriver returns cephFSDriver that implements TestDriver interface -func InitCephFSDriver() testsuites.TestDriver { +func InitCephFSDriver() storageframework.TestDriver { return &cephFSDriver{ - driverInfo: testsuites.DriverInfo{ + driverInfo: storageframework.DriverInfo{ Name: "ceph", InTreePluginName: "kubernetes.io/cephfs", FeatureTag: "[Feature:Volumes][Serial]", - MaxFileSize: testpatterns.FileSizeMedium, + MaxFileSize: storageframework.FileSizeMedium, SupportedSizeRange: e2evolume.SizeRange{ Min: "1Gi", }, SupportedFsType: sets.NewString( "", // Default fsType ), - Capabilities: map[testsuites.Capability]bool{ - testsuites.CapPersistence: true, - testsuites.CapExec: true, - testsuites.CapRWX: true, - testsuites.CapMultiPODs: true, + Capabilities: map[storageframework.Capability]bool{ + storageframework.CapPersistence: true, + storageframework.CapExec: true, + storageframework.CapRWX: true, + storageframework.CapMultiPODs: true, }, }, } } -func (c *cephFSDriver) GetDriverInfo() *testsuites.DriverInfo { +func (c *cephFSDriver) GetDriverInfo() *storageframework.DriverInfo { return &c.driverInfo } -func (c *cephFSDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +func (c *cephFSDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { } -func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { +func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { cv, ok := e2evolume.(*cephVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to Ceph test volume") @@ -716,7 +715,7 @@ func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume t } } -func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { +func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { cv, ok := e2evolume.(*cephVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to Ceph test volume") @@ -735,15 +734,15 @@ func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e }, nil } -func (c *cephFSDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { - return &testsuites.PerTestConfig{ +func (c *cephFSDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { + return &storageframework.PerTestConfig{ Driver: c, Prefix: "cephfs", Framework: f, }, func() {} } -func (c *cephFSDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { +func (c *cephFSDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { f := config.Framework cs := f.ClientSet ns := f.Namespace @@ -764,42 +763,42 @@ func (v *cephVolume) DeleteVolume() { // Hostpath type hostPathDriver struct { - driverInfo testsuites.DriverInfo + driverInfo storageframework.DriverInfo } -var _ testsuites.TestDriver = &hostPathDriver{} -var _ testsuites.PreprovisionedVolumeTestDriver = &hostPathDriver{} -var _ testsuites.InlineVolumeTestDriver = &hostPathDriver{} +var _ storageframework.TestDriver = &hostPathDriver{} +var _ storageframework.PreprovisionedVolumeTestDriver = &hostPathDriver{} +var _ storageframework.InlineVolumeTestDriver = &hostPathDriver{} // InitHostPathDriver returns hostPathDriver that implements TestDriver interface -func InitHostPathDriver() testsuites.TestDriver { +func InitHostPathDriver() storageframework.TestDriver { return &hostPathDriver{ - driverInfo: testsuites.DriverInfo{ + driverInfo: storageframework.DriverInfo{ Name: "hostPath", InTreePluginName: "kubernetes.io/host-path", - MaxFileSize: testpatterns.FileSizeMedium, + MaxFileSize: storageframework.FileSizeMedium, SupportedFsType: sets.NewString( "", // Default fsType ), TopologyKeys: []string{v1.LabelHostname}, - Capabilities: map[testsuites.Capability]bool{ - testsuites.CapPersistence: true, - testsuites.CapMultiPODs: true, - testsuites.CapSingleNodeVolume: true, - testsuites.CapTopology: true, + Capabilities: map[storageframework.Capability]bool{ + storageframework.CapPersistence: true, + storageframework.CapMultiPODs: true, + storageframework.CapSingleNodeVolume: true, + storageframework.CapTopology: true, }, }, } } -func (h *hostPathDriver) GetDriverInfo() *testsuites.DriverInfo { +func (h *hostPathDriver) GetDriverInfo() *storageframework.DriverInfo { return &h.driverInfo } -func (h *hostPathDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +func (h *hostPathDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { } -func (h *hostPathDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { +func (h *hostPathDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { // hostPath doesn't support readOnly volume if readOnly { return nil @@ -811,15 +810,15 @@ func (h *hostPathDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume } } -func (h *hostPathDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { - return &testsuites.PerTestConfig{ +func (h *hostPathDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { + return &storageframework.PerTestConfig{ Driver: h, Prefix: "hostpath", Framework: f, }, func() {} } -func (h *hostPathDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { +func (h *hostPathDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { f := config.Framework cs := f.ClientSet @@ -832,7 +831,7 @@ func (h *hostPathDriver) CreateVolume(config *testsuites.PerTestConfig, volType // HostPathSymlink type hostPathSymlinkDriver struct { - driverInfo testsuites.DriverInfo + driverInfo storageframework.DriverInfo } type hostPathSymlinkVolume struct { @@ -842,39 +841,39 @@ type hostPathSymlinkVolume struct { f *framework.Framework } -var _ testsuites.TestDriver = &hostPathSymlinkDriver{} -var _ testsuites.PreprovisionedVolumeTestDriver = &hostPathSymlinkDriver{} -var _ testsuites.InlineVolumeTestDriver = &hostPathSymlinkDriver{} +var _ storageframework.TestDriver = &hostPathSymlinkDriver{} +var _ storageframework.PreprovisionedVolumeTestDriver = &hostPathSymlinkDriver{} +var _ storageframework.InlineVolumeTestDriver = &hostPathSymlinkDriver{} // InitHostPathSymlinkDriver returns hostPathSymlinkDriver that implements TestDriver interface -func InitHostPathSymlinkDriver() testsuites.TestDriver { +func InitHostPathSymlinkDriver() storageframework.TestDriver { return &hostPathSymlinkDriver{ - driverInfo: testsuites.DriverInfo{ + driverInfo: storageframework.DriverInfo{ Name: "hostPathSymlink", InTreePluginName: "kubernetes.io/host-path", - MaxFileSize: testpatterns.FileSizeMedium, + MaxFileSize: storageframework.FileSizeMedium, SupportedFsType: sets.NewString( "", // Default fsType ), TopologyKeys: []string{v1.LabelHostname}, - Capabilities: map[testsuites.Capability]bool{ - testsuites.CapPersistence: true, - testsuites.CapMultiPODs: true, - testsuites.CapSingleNodeVolume: true, - testsuites.CapTopology: true, + Capabilities: map[storageframework.Capability]bool{ + storageframework.CapPersistence: true, + storageframework.CapMultiPODs: true, + storageframework.CapSingleNodeVolume: true, + storageframework.CapTopology: true, }, }, } } -func (h *hostPathSymlinkDriver) GetDriverInfo() *testsuites.DriverInfo { +func (h *hostPathSymlinkDriver) GetDriverInfo() *storageframework.DriverInfo { return &h.driverInfo } -func (h *hostPathSymlinkDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +func (h *hostPathSymlinkDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { } -func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { +func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { hv, ok := e2evolume.(*hostPathSymlinkVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to Hostpath Symlink test volume") @@ -889,15 +888,15 @@ func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, e2 } } -func (h *hostPathSymlinkDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { - return &testsuites.PerTestConfig{ +func (h *hostPathSymlinkDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { + return &storageframework.PerTestConfig{ Driver: h, Prefix: "hostpathsymlink", Framework: f, }, func() {} } -func (h *hostPathSymlinkDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { +func (h *hostPathSymlinkDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { f := config.Framework cs := f.ClientSet @@ -984,39 +983,39 @@ func (v *hostPathSymlinkVolume) DeleteVolume() { // emptydir type emptydirDriver struct { - driverInfo testsuites.DriverInfo + driverInfo storageframework.DriverInfo } -var _ testsuites.TestDriver = &emptydirDriver{} -var _ testsuites.PreprovisionedVolumeTestDriver = &emptydirDriver{} -var _ testsuites.InlineVolumeTestDriver = &emptydirDriver{} +var _ storageframework.TestDriver = &emptydirDriver{} +var _ storageframework.PreprovisionedVolumeTestDriver = &emptydirDriver{} +var _ storageframework.InlineVolumeTestDriver = &emptydirDriver{} // InitEmptydirDriver returns emptydirDriver that implements TestDriver interface -func InitEmptydirDriver() testsuites.TestDriver { +func InitEmptydirDriver() storageframework.TestDriver { return &emptydirDriver{ - driverInfo: testsuites.DriverInfo{ + driverInfo: storageframework.DriverInfo{ Name: "emptydir", InTreePluginName: "kubernetes.io/empty-dir", - MaxFileSize: testpatterns.FileSizeMedium, + MaxFileSize: storageframework.FileSizeMedium, SupportedFsType: sets.NewString( "", // Default fsType ), - Capabilities: map[testsuites.Capability]bool{ - testsuites.CapExec: true, - testsuites.CapSingleNodeVolume: true, + Capabilities: map[storageframework.Capability]bool{ + storageframework.CapExec: true, + storageframework.CapSingleNodeVolume: true, }, }, } } -func (e *emptydirDriver) GetDriverInfo() *testsuites.DriverInfo { +func (e *emptydirDriver) GetDriverInfo() *storageframework.DriverInfo { return &e.driverInfo } -func (e *emptydirDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +func (e *emptydirDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { } -func (e *emptydirDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { +func (e *emptydirDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { // emptydir doesn't support readOnly volume if readOnly { return nil @@ -1026,12 +1025,12 @@ func (e *emptydirDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume } } -func (e *emptydirDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { +func (e *emptydirDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { return nil } -func (e *emptydirDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { - return &testsuites.PerTestConfig{ +func (e *emptydirDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { + return &storageframework.PerTestConfig{ Driver: e, Prefix: "emptydir", Framework: f, @@ -1044,7 +1043,7 @@ func (e *emptydirDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes // and that the usual OpenStack authentication env. variables are set // (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least). type cinderDriver struct { - driverInfo testsuites.DriverInfo + driverInfo storageframework.DriverInfo } type cinderVolume struct { @@ -1052,19 +1051,19 @@ type cinderVolume struct { volumeID string } -var _ testsuites.TestDriver = &cinderDriver{} -var _ testsuites.PreprovisionedVolumeTestDriver = &cinderDriver{} -var _ testsuites.InlineVolumeTestDriver = &cinderDriver{} -var _ testsuites.PreprovisionedPVTestDriver = &cinderDriver{} -var _ testsuites.DynamicPVTestDriver = &cinderDriver{} +var _ storageframework.TestDriver = &cinderDriver{} +var _ storageframework.PreprovisionedVolumeTestDriver = &cinderDriver{} +var _ storageframework.InlineVolumeTestDriver = &cinderDriver{} +var _ storageframework.PreprovisionedPVTestDriver = &cinderDriver{} +var _ storageframework.DynamicPVTestDriver = &cinderDriver{} // InitCinderDriver returns cinderDriver that implements TestDriver interface -func InitCinderDriver() testsuites.TestDriver { +func InitCinderDriver() storageframework.TestDriver { return &cinderDriver{ - driverInfo: testsuites.DriverInfo{ + driverInfo: storageframework.DriverInfo{ Name: "cinder", InTreePluginName: "kubernetes.io/cinder", - MaxFileSize: testpatterns.FileSizeMedium, + MaxFileSize: storageframework.FileSizeMedium, SupportedSizeRange: e2evolume.SizeRange{ Min: "1Gi", }, @@ -1072,29 +1071,29 @@ func InitCinderDriver() testsuites.TestDriver { "", // Default fsType ), TopologyKeys: []string{v1.LabelFailureDomainBetaZone}, - Capabilities: map[testsuites.Capability]bool{ - testsuites.CapPersistence: true, - testsuites.CapFsGroup: true, - testsuites.CapExec: true, - testsuites.CapBlock: true, + Capabilities: map[storageframework.Capability]bool{ + storageframework.CapPersistence: true, + storageframework.CapFsGroup: true, + storageframework.CapExec: true, + storageframework.CapBlock: true, // Cinder supports volume limits, but the test creates large // number of volumes and times out test suites. - testsuites.CapVolumeLimits: false, - testsuites.CapTopology: true, + storageframework.CapVolumeLimits: false, + storageframework.CapTopology: true, }, }, } } -func (c *cinderDriver) GetDriverInfo() *testsuites.DriverInfo { +func (c *cinderDriver) GetDriverInfo() *storageframework.DriverInfo { return &c.driverInfo } -func (c *cinderDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +func (c *cinderDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { e2eskipper.SkipUnlessProviderIs("openstack") } -func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { +func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { cv, ok := e2evolume.(*cinderVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume") @@ -1110,7 +1109,7 @@ func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume t return &volSource } -func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { +func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { cv, ok := e2evolume.(*cinderVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to Cinder test volume") @@ -1126,7 +1125,7 @@ func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e return &pvSource, nil } -func (c *cinderDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { +func (c *cinderDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := "kubernetes.io/cinder" parameters := map[string]string{} if fsType != "" { @@ -1135,18 +1134,18 @@ func (c *cinderDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTes ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-sc", c.driverInfo.Name) - return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) + return storageframework.GetStorageClass(provisioner, parameters, nil, ns, suffix) } -func (c *cinderDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { - return &testsuites.PerTestConfig{ +func (c *cinderDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { + return &storageframework.PerTestConfig{ Driver: c, Prefix: "cinder", Framework: f, }, func() {} } -func (c *cinderDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { +func (c *cinderDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { f := config.Framework ns := f.Namespace @@ -1214,21 +1213,21 @@ func (v *cinderVolume) DeleteVolume() { // GCE type gcePdDriver struct { - driverInfo testsuites.DriverInfo + driverInfo storageframework.DriverInfo } type gcePdVolume struct { volumeName string } -var _ testsuites.TestDriver = &gcePdDriver{} -var _ testsuites.PreprovisionedVolumeTestDriver = &gcePdDriver{} -var _ testsuites.InlineVolumeTestDriver = &gcePdDriver{} -var _ testsuites.PreprovisionedPVTestDriver = &gcePdDriver{} -var _ testsuites.DynamicPVTestDriver = &gcePdDriver{} +var _ storageframework.TestDriver = &gcePdDriver{} +var _ storageframework.PreprovisionedVolumeTestDriver = &gcePdDriver{} +var _ storageframework.InlineVolumeTestDriver = &gcePdDriver{} +var _ storageframework.PreprovisionedPVTestDriver = &gcePdDriver{} +var _ storageframework.DynamicPVTestDriver = &gcePdDriver{} // InitGcePdDriver returns gcePdDriver that implements TestDriver interface -func InitGcePdDriver() testsuites.TestDriver { +func InitGcePdDriver() storageframework.TestDriver { supportedTypes := sets.NewString( "", // Default fsType "ext2", @@ -1237,28 +1236,28 @@ func InitGcePdDriver() testsuites.TestDriver { "xfs", ) return &gcePdDriver{ - driverInfo: testsuites.DriverInfo{ + driverInfo: storageframework.DriverInfo{ Name: "gcepd", InTreePluginName: "kubernetes.io/gce-pd", - MaxFileSize: testpatterns.FileSizeMedium, + MaxFileSize: storageframework.FileSizeMedium, SupportedSizeRange: e2evolume.SizeRange{ Min: "1Gi", }, SupportedFsType: supportedTypes, SupportedMountOption: sets.NewString("debug", "nouid32"), TopologyKeys: []string{v1.LabelFailureDomainBetaZone}, - Capabilities: map[testsuites.Capability]bool{ - testsuites.CapPersistence: true, - testsuites.CapFsGroup: true, - testsuites.CapBlock: true, - testsuites.CapExec: true, - testsuites.CapMultiPODs: true, - testsuites.CapControllerExpansion: true, - testsuites.CapNodeExpansion: true, + Capabilities: map[storageframework.Capability]bool{ + storageframework.CapPersistence: true, + storageframework.CapFsGroup: true, + storageframework.CapBlock: true, + storageframework.CapExec: true, + storageframework.CapMultiPODs: true, + storageframework.CapControllerExpansion: true, + storageframework.CapNodeExpansion: true, // GCE supports volume limits, but the test creates large // number of volumes and times out test suites. - testsuites.CapVolumeLimits: false, - testsuites.CapTopology: true, + storageframework.CapVolumeLimits: false, + storageframework.CapTopology: true, }, }, } @@ -1268,46 +1267,46 @@ func InitGcePdDriver() testsuites.TestDriver { // In current test structure, it first initialize the driver and then set up // the new framework, so we cannot get the correct OS here and select which file system is supported. // So here uses a separate Windows in-tree gce pd driver -func InitWindowsGcePdDriver() testsuites.TestDriver { +func InitWindowsGcePdDriver() storageframework.TestDriver { supportedTypes := sets.NewString( "ntfs", ) return &gcePdDriver{ - driverInfo: testsuites.DriverInfo{ + driverInfo: storageframework.DriverInfo{ Name: "windows-gcepd", InTreePluginName: "kubernetes.io/gce-pd", - MaxFileSize: testpatterns.FileSizeMedium, + MaxFileSize: storageframework.FileSizeMedium, SupportedSizeRange: e2evolume.SizeRange{ Min: "1Gi", }, SupportedFsType: supportedTypes, TopologyKeys: []string{v1.LabelZoneFailureDomain}, - Capabilities: map[testsuites.Capability]bool{ - testsuites.CapControllerExpansion: false, - testsuites.CapPersistence: true, - testsuites.CapExec: true, - testsuites.CapMultiPODs: true, + Capabilities: map[storageframework.Capability]bool{ + storageframework.CapControllerExpansion: false, + storageframework.CapPersistence: true, + storageframework.CapExec: true, + storageframework.CapMultiPODs: true, // GCE supports volume limits, but the test creates large // number of volumes and times out test suites. - testsuites.CapVolumeLimits: false, - testsuites.CapTopology: true, + storageframework.CapVolumeLimits: false, + storageframework.CapTopology: true, }, }, } } -func (g *gcePdDriver) GetDriverInfo() *testsuites.DriverInfo { +func (g *gcePdDriver) GetDriverInfo() *storageframework.DriverInfo { return &g.driverInfo } -func (g *gcePdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +func (g *gcePdDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { e2eskipper.SkipUnlessProviderIs("gce", "gke") if pattern.FeatureTag == "[sig-windows]" { e2eskipper.SkipUnlessNodeOSDistroIs("windows") } } -func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { +func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { gv, ok := e2evolume.(*gcePdVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume") volSource := v1.VolumeSource{ @@ -1322,7 +1321,7 @@ func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume te return &volSource } -func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { +func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { gv, ok := e2evolume.(*gcePdVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume") pvSource := v1.PersistentVolumeSource{ @@ -1337,7 +1336,7 @@ func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2 return &pvSource, nil } -func (g *gcePdDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { +func (g *gcePdDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := "kubernetes.io/gce-pd" parameters := map[string]string{} if fsType != "" { @@ -1347,11 +1346,11 @@ func (g *gcePdDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTest suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name) delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer - return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix) + return storageframework.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix) } -func (g *gcePdDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { - config := &testsuites.PerTestConfig{ +func (g *gcePdDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { + config := &storageframework.PerTestConfig{ Driver: g, Prefix: "gcepd", Framework: f, @@ -1368,9 +1367,9 @@ func (g *gcePdDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestCo } -func (g *gcePdDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { +func (g *gcePdDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { zone := getInlineVolumeZone(config.Framework) - if volType == testpatterns.InlineVolume { + if volType == storageframework.InlineVolume { // PD will be created in framework.TestContext.CloudConfig.Zone zone, // so pods should be also scheduled there. config.ClientNodeSelection = e2epod.NodeSelection{ @@ -1393,7 +1392,7 @@ func (v *gcePdVolume) DeleteVolume() { // vSphere type vSphereDriver struct { - driverInfo testsuites.DriverInfo + driverInfo storageframework.DriverInfo } type vSphereVolume struct { @@ -1401,19 +1400,19 @@ type vSphereVolume struct { nodeInfo *vspheretest.NodeInfo } -var _ testsuites.TestDriver = &vSphereDriver{} -var _ testsuites.PreprovisionedVolumeTestDriver = &vSphereDriver{} -var _ testsuites.InlineVolumeTestDriver = &vSphereDriver{} -var _ testsuites.PreprovisionedPVTestDriver = &vSphereDriver{} -var _ testsuites.DynamicPVTestDriver = &vSphereDriver{} +var _ storageframework.TestDriver = &vSphereDriver{} +var _ storageframework.PreprovisionedVolumeTestDriver = &vSphereDriver{} +var _ storageframework.InlineVolumeTestDriver = &vSphereDriver{} +var _ storageframework.PreprovisionedPVTestDriver = &vSphereDriver{} +var _ storageframework.DynamicPVTestDriver = &vSphereDriver{} // InitVSphereDriver returns vSphereDriver that implements TestDriver interface -func InitVSphereDriver() testsuites.TestDriver { +func InitVSphereDriver() storageframework.TestDriver { return &vSphereDriver{ - driverInfo: testsuites.DriverInfo{ + driverInfo: storageframework.DriverInfo{ Name: "vsphere", InTreePluginName: "kubernetes.io/vsphere-volume", - MaxFileSize: testpatterns.FileSizeMedium, + MaxFileSize: storageframework.FileSizeMedium, SupportedSizeRange: e2evolume.SizeRange{ Min: "1Gi", }, @@ -1422,25 +1421,25 @@ func InitVSphereDriver() testsuites.TestDriver { "ext4", ), TopologyKeys: []string{v1.LabelFailureDomainBetaZone}, - Capabilities: map[testsuites.Capability]bool{ - testsuites.CapPersistence: true, - testsuites.CapFsGroup: true, - testsuites.CapExec: true, - testsuites.CapMultiPODs: true, - testsuites.CapTopology: true, + Capabilities: map[storageframework.Capability]bool{ + storageframework.CapPersistence: true, + storageframework.CapFsGroup: true, + storageframework.CapExec: true, + storageframework.CapMultiPODs: true, + storageframework.CapTopology: true, }, }, } } -func (v *vSphereDriver) GetDriverInfo() *testsuites.DriverInfo { +func (v *vSphereDriver) GetDriverInfo() *storageframework.DriverInfo { return &v.driverInfo } -func (v *vSphereDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +func (v *vSphereDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { e2eskipper.SkipUnlessProviderIs("vsphere") } -func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { +func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { vsv, ok := e2evolume.(*vSphereVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume") @@ -1460,7 +1459,7 @@ func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume return &volSource } -func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { +func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { vsv, ok := e2evolume.(*vSphereVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume") @@ -1480,7 +1479,7 @@ func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, return &pvSource, nil } -func (v *vSphereDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { +func (v *vSphereDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := "kubernetes.io/vsphere-volume" parameters := map[string]string{} if fsType != "" { @@ -1489,18 +1488,18 @@ func (v *vSphereDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTe ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-sc", v.driverInfo.Name) - return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) + return storageframework.GetStorageClass(provisioner, parameters, nil, ns, suffix) } -func (v *vSphereDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { - return &testsuites.PerTestConfig{ +func (v *vSphereDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { + return &storageframework.PerTestConfig{ Driver: v, Prefix: "vsphere", Framework: f, }, func() {} } -func (v *vSphereDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { +func (v *vSphereDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { f := config.Framework vspheretest.Bootstrap(f) nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo() @@ -1518,26 +1517,26 @@ func (v *vSphereVolume) DeleteVolume() { // Azure Disk type azureDiskDriver struct { - driverInfo testsuites.DriverInfo + driverInfo storageframework.DriverInfo } type azureDiskVolume struct { volumeName string } -var _ testsuites.TestDriver = &azureDiskDriver{} -var _ testsuites.PreprovisionedVolumeTestDriver = &azureDiskDriver{} -var _ testsuites.InlineVolumeTestDriver = &azureDiskDriver{} -var _ testsuites.PreprovisionedPVTestDriver = &azureDiskDriver{} -var _ testsuites.DynamicPVTestDriver = &azureDiskDriver{} +var _ storageframework.TestDriver = &azureDiskDriver{} +var _ storageframework.PreprovisionedVolumeTestDriver = &azureDiskDriver{} +var _ storageframework.InlineVolumeTestDriver = &azureDiskDriver{} +var _ storageframework.PreprovisionedPVTestDriver = &azureDiskDriver{} +var _ storageframework.DynamicPVTestDriver = &azureDiskDriver{} // InitAzureDiskDriver returns azureDiskDriver that implements TestDriver interface -func InitAzureDiskDriver() testsuites.TestDriver { +func InitAzureDiskDriver() storageframework.TestDriver { return &azureDiskDriver{ - driverInfo: testsuites.DriverInfo{ + driverInfo: storageframework.DriverInfo{ Name: "azure-disk", InTreePluginName: "kubernetes.io/azure-disk", - MaxFileSize: testpatterns.FileSizeMedium, + MaxFileSize: storageframework.FileSizeMedium, SupportedSizeRange: e2evolume.SizeRange{ Min: "1Gi", }, @@ -1547,30 +1546,30 @@ func InitAzureDiskDriver() testsuites.TestDriver { "xfs", ), TopologyKeys: []string{v1.LabelFailureDomainBetaZone}, - Capabilities: map[testsuites.Capability]bool{ - testsuites.CapPersistence: true, - testsuites.CapFsGroup: true, - testsuites.CapBlock: true, - testsuites.CapExec: true, - testsuites.CapMultiPODs: true, + Capabilities: map[storageframework.Capability]bool{ + storageframework.CapPersistence: true, + storageframework.CapFsGroup: true, + storageframework.CapBlock: true, + storageframework.CapExec: true, + storageframework.CapMultiPODs: true, // Azure supports volume limits, but the test creates large // number of volumes and times out test suites. - testsuites.CapVolumeLimits: false, - testsuites.CapTopology: true, + storageframework.CapVolumeLimits: false, + storageframework.CapTopology: true, }, }, } } -func (a *azureDiskDriver) GetDriverInfo() *testsuites.DriverInfo { +func (a *azureDiskDriver) GetDriverInfo() *storageframework.DriverInfo { return &a.driverInfo } -func (a *azureDiskDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +func (a *azureDiskDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { e2eskipper.SkipUnlessProviderIs("azure") } -func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { +func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { av, ok := e2evolume.(*azureDiskVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume") diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):] @@ -1590,7 +1589,7 @@ func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, e2evolum return &volSource } -func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { +func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { av, ok := e2evolume.(*azureDiskVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume") @@ -1611,7 +1610,7 @@ func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string return &pvSource, nil } -func (a *azureDiskDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { +func (a *azureDiskDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := "kubernetes.io/azure-disk" parameters := map[string]string{} if fsType != "" { @@ -1621,21 +1620,21 @@ func (a *azureDiskDriver) GetDynamicProvisionStorageClass(config *testsuites.Per suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name) delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer - return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix) + return storageframework.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix) } -func (a *azureDiskDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { - return &testsuites.PerTestConfig{ +func (a *azureDiskDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { + return &storageframework.PerTestConfig{ Driver: a, Prefix: "azure", Framework: f, }, func() {} } -func (a *azureDiskDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { +func (a *azureDiskDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { ginkgo.By("creating a test azure disk volume") zone := getInlineVolumeZone(config.Framework) - if volType == testpatterns.InlineVolume { + if volType == storageframework.InlineVolume { // PD will be created in framework.TestContext.CloudConfig.Zone zone, // so pods should be also scheduled there. config.ClientNodeSelection = e2epod.NodeSelection{ @@ -1657,27 +1656,27 @@ func (v *azureDiskVolume) DeleteVolume() { // AWS type awsDriver struct { - driverInfo testsuites.DriverInfo + driverInfo storageframework.DriverInfo } type awsVolume struct { volumeName string } -var _ testsuites.TestDriver = &awsDriver{} +var _ storageframework.TestDriver = &awsDriver{} -var _ testsuites.PreprovisionedVolumeTestDriver = &awsDriver{} -var _ testsuites.InlineVolumeTestDriver = &awsDriver{} -var _ testsuites.PreprovisionedPVTestDriver = &awsDriver{} -var _ testsuites.DynamicPVTestDriver = &awsDriver{} +var _ storageframework.PreprovisionedVolumeTestDriver = &awsDriver{} +var _ storageframework.InlineVolumeTestDriver = &awsDriver{} +var _ storageframework.PreprovisionedPVTestDriver = &awsDriver{} +var _ storageframework.DynamicPVTestDriver = &awsDriver{} // InitAwsDriver returns awsDriver that implements TestDriver interface -func InitAwsDriver() testsuites.TestDriver { +func InitAwsDriver() storageframework.TestDriver { return &awsDriver{ - driverInfo: testsuites.DriverInfo{ + driverInfo: storageframework.DriverInfo{ Name: "aws", InTreePluginName: "kubernetes.io/aws-ebs", - MaxFileSize: testpatterns.FileSizeMedium, + MaxFileSize: storageframework.FileSizeMedium, SupportedSizeRange: e2evolume.SizeRange{ Min: "1Gi", }, @@ -1689,32 +1688,32 @@ func InitAwsDriver() testsuites.TestDriver { ), SupportedMountOption: sets.NewString("debug", "nouid32"), TopologyKeys: []string{v1.LabelFailureDomainBetaZone}, - Capabilities: map[testsuites.Capability]bool{ - testsuites.CapPersistence: true, - testsuites.CapFsGroup: true, - testsuites.CapBlock: true, - testsuites.CapExec: true, - testsuites.CapMultiPODs: true, - testsuites.CapControllerExpansion: true, - testsuites.CapNodeExpansion: true, + Capabilities: map[storageframework.Capability]bool{ + storageframework.CapPersistence: true, + storageframework.CapFsGroup: true, + storageframework.CapBlock: true, + storageframework.CapExec: true, + storageframework.CapMultiPODs: true, + storageframework.CapControllerExpansion: true, + storageframework.CapNodeExpansion: true, // AWS supports volume limits, but the test creates large // number of volumes and times out test suites. - testsuites.CapVolumeLimits: false, - testsuites.CapTopology: true, + storageframework.CapVolumeLimits: false, + storageframework.CapTopology: true, }, }, } } -func (a *awsDriver) GetDriverInfo() *testsuites.DriverInfo { +func (a *awsDriver) GetDriverInfo() *storageframework.DriverInfo { return &a.driverInfo } -func (a *awsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +func (a *awsDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { e2eskipper.SkipUnlessProviderIs("aws") } -func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) *v1.VolumeSource { +func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { av, ok := e2evolume.(*awsVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume") volSource := v1.VolumeSource{ @@ -1729,7 +1728,7 @@ func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume test return &volSource } -func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { +func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { av, ok := e2evolume.(*awsVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume") pvSource := v1.PersistentVolumeSource{ @@ -1744,7 +1743,7 @@ func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2ev return &pvSource, nil } -func (a *awsDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { +func (a *awsDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := "kubernetes.io/aws-ebs" parameters := map[string]string{} if fsType != "" { @@ -1754,11 +1753,11 @@ func (a *awsDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestCo suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name) delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer - return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix) + return storageframework.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix) } -func (a *awsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { - config := &testsuites.PerTestConfig{ +func (a *awsDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { + config := &storageframework.PerTestConfig{ Driver: a, Prefix: "aws", Framework: f, @@ -1774,9 +1773,9 @@ func (a *awsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConf return config, func() {} } -func (a *awsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { +func (a *awsDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { zone := getInlineVolumeZone(config.Framework) - if volType == testpatterns.InlineVolume { + if volType == storageframework.InlineVolume { // PD will be created in framework.TestContext.CloudConfig.Zone zone, // so pods should be also scheduled there. config.ClientNodeSelection = e2epod.NodeSelection{ @@ -1799,7 +1798,7 @@ func (v *awsVolume) DeleteVolume() { // local type localDriver struct { - driverInfo testsuites.DriverInfo + driverInfo storageframework.DriverInfo node *v1.Node hostExec utils.HostExec // volumeType represents local volume type we are testing, e.g. tmpfs, @@ -1815,22 +1814,22 @@ type localVolume struct { var ( // capabilities - defaultLocalVolumeCapabilities = map[testsuites.Capability]bool{ - testsuites.CapPersistence: true, - testsuites.CapFsGroup: true, - testsuites.CapBlock: false, - testsuites.CapExec: true, - testsuites.CapMultiPODs: true, - testsuites.CapSingleNodeVolume: true, + defaultLocalVolumeCapabilities = map[storageframework.Capability]bool{ + storageframework.CapPersistence: true, + storageframework.CapFsGroup: true, + storageframework.CapBlock: false, + storageframework.CapExec: true, + storageframework.CapMultiPODs: true, + storageframework.CapSingleNodeVolume: true, } - localVolumeCapabitilies = map[utils.LocalVolumeType]map[testsuites.Capability]bool{ + localVolumeCapabitilies = map[utils.LocalVolumeType]map[storageframework.Capability]bool{ utils.LocalVolumeBlock: { - testsuites.CapPersistence: true, - testsuites.CapFsGroup: true, - testsuites.CapBlock: true, - testsuites.CapExec: true, - testsuites.CapMultiPODs: true, - testsuites.CapSingleNodeVolume: true, + storageframework.CapPersistence: true, + storageframework.CapFsGroup: true, + storageframework.CapBlock: true, + storageframework.CapExec: true, + storageframework.CapMultiPODs: true, + storageframework.CapSingleNodeVolume: true, }, } // fstype @@ -1843,16 +1842,16 @@ var ( ), } // max file size - defaultLocalVolumeMaxFileSize = testpatterns.FileSizeSmall + defaultLocalVolumeMaxFileSize = storageframework.FileSizeSmall localVolumeMaxFileSizes = map[utils.LocalVolumeType]int64{} ) -var _ testsuites.TestDriver = &localDriver{} -var _ testsuites.PreprovisionedVolumeTestDriver = &localDriver{} -var _ testsuites.PreprovisionedPVTestDriver = &localDriver{} +var _ storageframework.TestDriver = &localDriver{} +var _ storageframework.PreprovisionedVolumeTestDriver = &localDriver{} +var _ storageframework.PreprovisionedPVTestDriver = &localDriver{} // InitLocalDriverWithVolumeType initializes the local driver based on the volume type. -func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func() testsuites.TestDriver { +func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func() storageframework.TestDriver { maxFileSize := defaultLocalVolumeMaxFileSize if maxFileSizeByVolType, ok := localVolumeMaxFileSizes[volumeType]; ok { maxFileSize = maxFileSizeByVolType @@ -1865,7 +1864,7 @@ func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func() test if capabilitiesByType, ok := localVolumeCapabitilies[volumeType]; ok { capabilities = capabilitiesByType } - return func() testsuites.TestDriver { + return func() storageframework.TestDriver { // custom tag to distinguish from tests of other volume types featureTag := fmt.Sprintf("[LocalVolumeType: %s]", volumeType) // For GCE Local SSD volumes, we must run serially @@ -1873,7 +1872,7 @@ func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func() test featureTag += " [Serial]" } return &localDriver{ - driverInfo: testsuites.DriverInfo{ + driverInfo: storageframework.DriverInfo{ Name: "local", InTreePluginName: "kubernetes.io/local-volume", FeatureTag: featureTag, @@ -1886,14 +1885,14 @@ func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func() test } } -func (l *localDriver) GetDriverInfo() *testsuites.DriverInfo { +func (l *localDriver) GetDriverInfo() *storageframework.DriverInfo { return &l.driverInfo } -func (l *localDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +func (l *localDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { } -func (l *localDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { +func (l *localDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { var err error l.node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet) framework.ExpectNoError(err) @@ -1915,7 +1914,7 @@ func (l *localDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestCo } } - return &testsuites.PerTestConfig{ + return &storageframework.PerTestConfig{ Driver: l, Prefix: "local", Framework: f, @@ -1925,9 +1924,9 @@ func (l *localDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestCo } } -func (l *localDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { +func (l *localDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { switch volType { - case testpatterns.PreprovisionedPV: + case storageframework.PreprovisionedPV: node := l.node // assign this to schedule pod on this node config.ClientNodeSelection = e2epod.NodeSelection{Name: node.Name} @@ -1971,7 +1970,7 @@ func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity } } -func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { +func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { lv, ok := e2evolume.(*localVolume) framework.ExpectEqual(ok, true, "Failed to cast test volume to local test volume") return &v1.PersistentVolumeSource{ diff --git a/test/e2e/storage/external/BUILD b/test/e2e/storage/external/BUILD index 2dd0d19e770..76efd4dd5b7 100644 --- a/test/e2e/storage/external/BUILD +++ b/test/e2e/storage/external/BUILD @@ -19,7 +19,7 @@ go_library( "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/skipper:go_default_library", "//test/e2e/framework/volume:go_default_library", - "//test/e2e/storage/testpatterns:go_default_library", + "//test/e2e/storage/framework:go_default_library", "//test/e2e/storage/testsuites:go_default_library", "//test/e2e/storage/utils:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", @@ -36,7 +36,7 @@ go_test( deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//test/e2e/framework/volume:go_default_library", - "//test/e2e/storage/testsuites:go_default_library", + "//test/e2e/storage/framework:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", ], ) diff --git a/test/e2e/storage/external/external.go b/test/e2e/storage/external/external.go index 4b13b9da9a0..0a1180b8cec 100644 --- a/test/e2e/storage/external/external.go +++ b/test/e2e/storage/external/external.go @@ -39,7 +39,7 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -55,7 +55,7 @@ type driverDefinition struct { // for details. The only field with a non-zero default is the list of // supported file systems (SupportedFsType): it is set so that tests using // the default file system are enabled. - DriverInfo testsuites.DriverInfo + DriverInfo storageframework.DriverInfo // StorageClass must be set to enable dynamic provisioning tests. // The default is to not run those tests. @@ -171,9 +171,9 @@ func AddDriverDefinition(filename string) error { return errors.Errorf("%q: DriverInfo.Name not set", filename) } - description := "External Storage " + testsuites.GetDriverNameWithFeatureTags(driver) + description := "External Storage " + storageframework.GetDriverNameWithFeatureTags(driver) ginkgo.Describe(description, func() { - testsuites.DefineTestSuite(driver, testsuites.CSISuites) + storageframework.DefineTestSuites(driver, testsuites.CSISuites) }) return nil @@ -189,7 +189,7 @@ func loadDriverDefinition(filename string) (*driverDefinition, error) { } // Some reasonable defaults follow. driver := &driverDefinition{ - DriverInfo: testsuites.DriverInfo{ + DriverInfo: storageframework.DriverInfo{ SupportedFsType: sets.NewString( "", // Default fsType ), @@ -206,20 +206,20 @@ func loadDriverDefinition(filename string) (*driverDefinition, error) { return driver, nil } -var _ testsuites.TestDriver = &driverDefinition{} +var _ storageframework.TestDriver = &driverDefinition{} // We have to implement the interface because dynamic PV may or may // not be supported. driverDefinition.SkipUnsupportedTest checks that // based on the actual driver definition. -var _ testsuites.DynamicPVTestDriver = &driverDefinition{} +var _ storageframework.DynamicPVTestDriver = &driverDefinition{} // Same for snapshotting. -var _ testsuites.SnapshottableTestDriver = &driverDefinition{} +var _ storageframework.SnapshottableTestDriver = &driverDefinition{} // And for ephemeral volumes. -var _ testsuites.EphemeralTestDriver = &driverDefinition{} +var _ storageframework.EphemeralTestDriver = &driverDefinition{} -var _ testsuites.CustomTimeoutsTestDriver = &driverDefinition{} +var _ storageframework.CustomTimeoutsTestDriver = &driverDefinition{} // runtime.DecodeInto needs a runtime.Object but doesn't do any // deserialization of it and therefore none of the methods below need @@ -234,21 +234,21 @@ func (d *driverDefinition) GetObjectKind() schema.ObjectKind { return nil } -func (d *driverDefinition) GetDriverInfo() *testsuites.DriverInfo { +func (d *driverDefinition) GetDriverInfo() *storageframework.DriverInfo { return &d.DriverInfo } -func (d *driverDefinition) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +func (d *driverDefinition) SkipUnsupportedTest(pattern storageframework.TestPattern) { supported := false // TODO (?): add support for more volume types switch pattern.VolType { case "": supported = true - case testpatterns.DynamicPV: + case storageframework.DynamicPV: if d.StorageClass.FromName || d.StorageClass.FromFile != "" || d.StorageClass.FromExistingClassName != "" { supported = true } - case testpatterns.CSIInlineVolume: + case storageframework.CSIInlineVolume: supported = len(d.InlineVolumes) != 0 } if !supported { @@ -257,7 +257,7 @@ func (d *driverDefinition) SkipUnsupportedTest(pattern testpatterns.TestPattern) } -func (d *driverDefinition) GetDynamicProvisionStorageClass(e2econfig *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { +func (d *driverDefinition) GetDynamicProvisionStorageClass(e2econfig *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { var ( sc *storagev1.StorageClass err error @@ -295,7 +295,7 @@ func (d *driverDefinition) GetDynamicProvisionStorageClass(e2econfig *testsuites // reconsidered if we eventually need to move in-tree storage tests out. sc.Parameters["csi.storage.k8s.io/fstype"] = fsType } - return testsuites.CopyStorageClass(sc, f.Namespace.Name, "e2e-sc") + return storageframework.CopyStorageClass(sc, f.Namespace.Name, "e2e-sc") } func (d *driverDefinition) GetTimeouts() *framework.TimeoutContext { @@ -348,7 +348,7 @@ func loadSnapshotClass(filename string) (*unstructured.Unstructured, error) { return snapshotClass, nil } -func (d *driverDefinition) GetSnapshotClass(e2econfig *testsuites.PerTestConfig) *unstructured.Unstructured { +func (d *driverDefinition) GetSnapshotClass(e2econfig *storageframework.PerTestConfig) *unstructured.Unstructured { if !d.SnapshotClass.FromName && d.SnapshotClass.FromFile == "" && d.SnapshotClass.FromExistingClassName == "" { e2eskipper.Skipf("Driver %q does not support snapshotting - skipping", d.DriverInfo.Name) } @@ -363,7 +363,7 @@ func (d *driverDefinition) GetSnapshotClass(e2econfig *testsuites.PerTestConfig) case d.SnapshotClass.FromName: // Do nothing (just use empty parameters) case d.SnapshotClass.FromExistingClassName != "": - snapshotClass, err := f.DynamicClient.Resource(testsuites.SnapshotClassGVR).Get(context.TODO(), d.SnapshotClass.FromExistingClassName, metav1.GetOptions{}) + snapshotClass, err := f.DynamicClient.Resource(utils.SnapshotClassGVR).Get(context.TODO(), d.SnapshotClass.FromExistingClassName, metav1.GetOptions{}) framework.ExpectNoError(err, "getting snapshot class %s", d.SnapshotClass.FromExistingClassName) if params, ok := snapshotClass.Object["parameters"].(map[string]interface{}); ok { @@ -390,10 +390,10 @@ func (d *driverDefinition) GetSnapshotClass(e2econfig *testsuites.PerTestConfig) } } - return testsuites.GetSnapshotClass(snapshotter, parameters, ns, suffix) + return utils.GenerateSnapshotClassSpec(snapshotter, parameters, ns, suffix) } -func (d *driverDefinition) GetVolume(e2econfig *testsuites.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) { +func (d *driverDefinition) GetVolume(e2econfig *storageframework.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) { if len(d.InlineVolumes) == 0 { e2eskipper.Skipf("%s does not have any InlineVolumeAttributes defined", d.DriverInfo.Name) } @@ -401,12 +401,12 @@ func (d *driverDefinition) GetVolume(e2econfig *testsuites.PerTestConfig, volume return e2evolume.Attributes, e2evolume.Shared, e2evolume.ReadOnly } -func (d *driverDefinition) GetCSIDriverName(e2econfig *testsuites.PerTestConfig) string { +func (d *driverDefinition) GetCSIDriverName(e2econfig *storageframework.PerTestConfig) string { return d.DriverInfo.Name } -func (d *driverDefinition) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { - e2econfig := &testsuites.PerTestConfig{ +func (d *driverDefinition) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { + e2econfig := &storageframework.PerTestConfig{ Driver: d, Prefix: "external", Framework: f, diff --git a/test/e2e/storage/external/external_test.go b/test/e2e/storage/external/external_test.go index 82be68370db..95035067a00 100644 --- a/test/e2e/storage/external/external_test.go +++ b/test/e2e/storage/external/external_test.go @@ -23,12 +23,12 @@ import ( "k8s.io/apimachinery/pkg/util/sets" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - "k8s.io/kubernetes/test/e2e/storage/testsuites" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" ) func TestDriverParameter(t *testing.T) { expected := &driverDefinition{ - DriverInfo: testsuites.DriverInfo{ + DriverInfo: storageframework.DriverInfo{ Name: "foo.example.com", SupportedFsType: sets.NewString( "", // Default fsType diff --git a/test/e2e/storage/external/testdata/example.yaml b/test/e2e/storage/external/testdata/example.yaml new file mode 100644 index 00000000000..7d485e0a3aa --- /dev/null +++ b/test/e2e/storage/external/testdata/example.yaml @@ -0,0 +1,21 @@ +StorageClass: + FromExistingClassName: example +DriverInfo: + Name: example + RequiredAccessModes: + - ReadWriteOnce + Capabilities: + persistence: true + multipods: true + exec: true + block: true + fsGroup: true + topology: true + controllerExpansion: true + nodeExpansion: true + volumeLimits: false + snapshotDataSource: true + StressTestOptions: + NumPods: 10 + NumRestarts: 20 + NumSnapshots: 10 diff --git a/test/e2e/storage/framework/BUILD b/test/e2e/storage/framework/BUILD new file mode 100644 index 00000000000..e25c9c24747 --- /dev/null +++ b/test/e2e/storage/framework/BUILD @@ -0,0 +1,51 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "driver_operations.go", + "snapshot_resource.go", + "testconfig.go", + "testdriver.go", + "testpattern.go", + "testsuite.go", + "volume_resource.go", + ], + importpath = "k8s.io/kubernetes/test/e2e/storage/framework", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/storage/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library", + "//test/e2e/framework:go_default_library", + "//test/e2e/framework/pod:go_default_library", + "//test/e2e/framework/pv:go_default_library", + "//test/e2e/framework/skipper:go_default_library", + "//test/e2e/framework/volume:go_default_library", + "//test/e2e/storage/utils:go_default_library", + "//vendor/github.com/onsi/ginkgo:go_default_library", + "//vendor/github.com/pkg/errors:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/test/e2e/storage/testsuites/driveroperations.go b/test/e2e/storage/framework/driver_operations.go similarity index 70% rename from test/e2e/storage/testsuites/driveroperations.go rename to test/e2e/storage/framework/driver_operations.go index 0ec560da73e..79c5dac44ec 100644 --- a/test/e2e/storage/testsuites/driveroperations.go +++ b/test/e2e/storage/framework/driver_operations.go @@ -14,17 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package testsuites +package framework import ( "fmt" storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apiserver/pkg/storage/names" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" ) // GetDriverNameWithFeatureTags returns driver name with feature tags @@ -38,15 +36,13 @@ func GetDriverNameWithFeatureTags(driver TestDriver) string { } // CreateVolume creates volume for test unless dynamicPV or CSI ephemeral inline volume test -func CreateVolume(driver TestDriver, config *PerTestConfig, volType testpatterns.TestVolType) TestVolume { +func CreateVolume(driver TestDriver, config *PerTestConfig, volType TestVolType) TestVolume { switch volType { - case testpatterns.InlineVolume, testpatterns.PreprovisionedPV: + case InlineVolume, PreprovisionedPV: if pDriver, ok := driver.(PreprovisionedVolumeTestDriver); ok { return pDriver.CreateVolume(config, volType) } - case testpatterns.CSIInlineVolume, - testpatterns.GenericEphemeralVolume, - testpatterns.DynamicPV: + case CSIInlineVolume, GenericEphemeralVolume, DynamicPV: // No need to create volume default: framework.Failf("Invalid volType specified: %v", volType) @@ -91,29 +87,3 @@ func GetStorageClass( VolumeBindingMode: bindingMode, } } - -// GetSnapshotClass constructs a new SnapshotClass instance -// with a unique name that is based on namespace + suffix. -func GetSnapshotClass( - snapshotter string, - parameters map[string]string, - ns string, - suffix string, -) *unstructured.Unstructured { - snapshotClass := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "VolumeSnapshotClass", - "apiVersion": snapshotAPIVersion, - "metadata": map[string]interface{}{ - // Name must be unique, so let's base it on namespace name and use GenerateName - // TODO(#96234): Remove unnecessary suffix. - "name": names.SimpleNameGenerator.GenerateName(ns + "-" + suffix), - }, - "driver": snapshotter, - "parameters": parameters, - "deletionPolicy": "Delete", - }, - } - - return snapshotClass -} diff --git a/test/e2e/storage/framework/snapshot_resource.go b/test/e2e/storage/framework/snapshot_resource.go new file mode 100644 index 00000000000..b8d01b0440d --- /dev/null +++ b/test/e2e/storage/framework/snapshot_resource.go @@ -0,0 +1,333 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "context" + "fmt" + + "github.com/onsi/ginkgo" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" +) + +// SnapshotResource represents a snapshot class, a snapshot and its bound snapshot contents for a specific test case +type SnapshotResource struct { + Config *PerTestConfig + Pattern TestPattern + + Vs *unstructured.Unstructured + Vscontent *unstructured.Unstructured + Vsclass *unstructured.Unstructured +} + +// CreateSnapshot creates a VolumeSnapshotClass with given SnapshotDeletionPolicy and a VolumeSnapshot +// from the VolumeSnapshotClass using a dynamic client. +// Returns the unstructured VolumeSnapshotClass and VolumeSnapshot objects. +func CreateSnapshot(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern TestPattern, pvcName string, pvcNamespace string, timeouts *framework.TimeoutContext) (*unstructured.Unstructured, *unstructured.Unstructured) { + defer ginkgo.GinkgoRecover() + var err error + if pattern.SnapshotType != DynamicCreatedSnapshot && pattern.SnapshotType != PreprovisionedCreatedSnapshot { + err = fmt.Errorf("SnapshotType must be set to either DynamicCreatedSnapshot or PreprovisionedCreatedSnapshot") + framework.ExpectNoError(err) + } + dc := config.Framework.DynamicClient + + ginkgo.By("creating a SnapshotClass") + sclass := sDriver.GetSnapshotClass(config) + if sclass == nil { + framework.Failf("Failed to get snapshot class based on test config") + } + sclass.Object["deletionPolicy"] = pattern.SnapshotDeletionPolicy.String() + + sclass, err = dc.Resource(utils.SnapshotClassGVR).Create(context.TODO(), sclass, metav1.CreateOptions{}) + framework.ExpectNoError(err) + + sclass, err = dc.Resource(utils.SnapshotClassGVR).Get(context.TODO(), sclass.GetName(), metav1.GetOptions{}) + framework.ExpectNoError(err) + + ginkgo.By("creating a dynamic VolumeSnapshot") + // prepare a dynamically provisioned volume snapshot with certain data + snapshot := getSnapshot(pvcName, pvcNamespace, sclass.GetName()) + + snapshot, err = dc.Resource(utils.SnapshotGVR).Namespace(snapshot.GetNamespace()).Create(context.TODO(), snapshot, metav1.CreateOptions{}) + framework.ExpectNoError(err) + + return sclass, snapshot +} + +// CreateSnapshotResource creates a snapshot resource for the current test. It knows how to deal with +// different test pattern snapshot provisioning and deletion policy +func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern TestPattern, pvcName string, pvcNamespace string, timeouts *framework.TimeoutContext) *SnapshotResource { + var err error + r := SnapshotResource{ + Config: config, + Pattern: pattern, + } + r.Vsclass, r.Vs = CreateSnapshot(sDriver, config, pattern, pvcName, pvcNamespace, timeouts) + + dc := r.Config.Framework.DynamicClient + + r.Vscontent = utils.GetSnapshotContentFromSnapshot(dc, r.Vs) + + if pattern.SnapshotType == PreprovisionedCreatedSnapshot { + // prepare a pre-provisioned VolumeSnapshotContent with certain data + // Because this could be run with an external CSI driver, we have no way + // to pre-provision the snapshot as we normally would using their API. + // We instead dynamically take a snapshot (above step), delete the old snapshot, + // and create another snapshot using the first snapshot's snapshot handle. + + ginkgo.By("updating the snapshot content deletion policy to retain") + r.Vscontent.Object["spec"].(map[string]interface{})["deletionPolicy"] = "Retain" + + r.Vscontent, err = dc.Resource(utils.SnapshotContentGVR).Update(context.TODO(), r.Vscontent, metav1.UpdateOptions{}) + framework.ExpectNoError(err) + + ginkgo.By("recording the volume handle and snapshotHandle") + snapshotHandle := r.Vscontent.Object["status"].(map[string]interface{})["snapshotHandle"].(string) + framework.Logf("Recording snapshot handle: %s", snapshotHandle) + csiDriverName := r.Vsclass.Object["driver"].(string) + + // If the deletion policy is retain on vscontent: + // when vs is deleted vscontent will not be deleted + // when the vscontent is manually deleted then the underlying snapshot resource will not be deleted. + // We exploit this to create a snapshot resource from which we can create a preprovisioned snapshot + ginkgo.By("deleting the snapshot and snapshot content") + err = dc.Resource(utils.SnapshotGVR).Namespace(r.Vs.GetNamespace()).Delete(context.TODO(), r.Vs.GetName(), metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + err = nil + } + framework.ExpectNoError(err) + + ginkgo.By("checking the Snapshot has been deleted") + err = utils.WaitForNamespacedGVRDeletion(dc, utils.SnapshotGVR, r.Vs.GetName(), r.Vs.GetNamespace(), framework.Poll, timeouts.SnapshotDelete) + framework.ExpectNoError(err) + + err = dc.Resource(utils.SnapshotContentGVR).Delete(context.TODO(), r.Vscontent.GetName(), metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + err = nil + } + framework.ExpectNoError(err) + + ginkgo.By("checking the Snapshot content has been deleted") + err = utils.WaitForGVRDeletion(dc, utils.SnapshotContentGVR, r.Vscontent.GetName(), framework.Poll, timeouts.SnapshotDelete) + framework.ExpectNoError(err) + + ginkgo.By("creating a snapshot content with the snapshot handle") + uuid := uuid.NewUUID() + + snapName := getPreProvisionedSnapshotName(uuid) + snapcontentName := getPreProvisionedSnapshotContentName(uuid) + + r.Vscontent = getPreProvisionedSnapshotContent(snapcontentName, snapName, pvcNamespace, snapshotHandle, pattern.SnapshotDeletionPolicy.String(), csiDriverName) + r.Vscontent, err = dc.Resource(utils.SnapshotContentGVR).Create(context.TODO(), r.Vscontent, metav1.CreateOptions{}) + framework.ExpectNoError(err) + + ginkgo.By("creating a snapshot with that snapshot content") + r.Vs = getPreProvisionedSnapshot(snapName, pvcNamespace, snapcontentName) + r.Vs, err = dc.Resource(utils.SnapshotGVR).Namespace(r.Vs.GetNamespace()).Create(context.TODO(), r.Vs, metav1.CreateOptions{}) + framework.ExpectNoError(err) + + err = utils.WaitForSnapshotReady(dc, r.Vs.GetNamespace(), r.Vs.GetName(), framework.Poll, timeouts.SnapshotCreate) + framework.ExpectNoError(err) + + ginkgo.By("getting the snapshot and snapshot content") + r.Vs, err = dc.Resource(utils.SnapshotGVR).Namespace(r.Vs.GetNamespace()).Get(context.TODO(), r.Vs.GetName(), metav1.GetOptions{}) + framework.ExpectNoError(err) + + r.Vscontent, err = dc.Resource(utils.SnapshotContentGVR).Get(context.TODO(), r.Vscontent.GetName(), metav1.GetOptions{}) + framework.ExpectNoError(err) + } + return &r +} + +// CleanupResource cleans up the snapshot resource and ignores not found errors +func (sr *SnapshotResource) CleanupResource(timeouts *framework.TimeoutContext) error { + var err error + var cleanupErrs []error + + dc := sr.Config.Framework.DynamicClient + + if sr.Vs != nil { + framework.Logf("deleting snapshot %q/%q", sr.Vs.GetNamespace(), sr.Vs.GetName()) + + sr.Vs, err = dc.Resource(utils.SnapshotGVR).Namespace(sr.Vs.GetNamespace()).Get(context.TODO(), sr.Vs.GetName(), metav1.GetOptions{}) + switch { + case err == nil: + snapshotStatus := sr.Vs.Object["status"].(map[string]interface{}) + snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string) + framework.Logf("received snapshotStatus %v", snapshotStatus) + framework.Logf("snapshotContentName %s", snapshotContentName) + + boundVsContent, err := dc.Resource(utils.SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{}) + switch { + case err == nil: + if boundVsContent.Object["spec"].(map[string]interface{})["deletionPolicy"] != "Delete" { + // The purpose of this block is to prevent physical snapshotContent leaks. + // We must update the SnapshotContent to have Delete Deletion policy, + // or else the physical snapshot content will be leaked. + boundVsContent.Object["spec"].(map[string]interface{})["deletionPolicy"] = "Delete" + boundVsContent, err = dc.Resource(utils.SnapshotContentGVR).Update(context.TODO(), boundVsContent, metav1.UpdateOptions{}) + framework.ExpectNoError(err) + } + err = dc.Resource(utils.SnapshotGVR).Namespace(sr.Vs.GetNamespace()).Delete(context.TODO(), sr.Vs.GetName(), metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + err = nil + } + framework.ExpectNoError(err) + + err = utils.WaitForGVRDeletion(dc, utils.SnapshotContentGVR, boundVsContent.GetName(), framework.Poll, timeouts.SnapshotDelete) + framework.ExpectNoError(err) + + case apierrors.IsNotFound(err): + // the volume snapshot is not bound to snapshot content yet + err = dc.Resource(utils.SnapshotGVR).Namespace(sr.Vs.GetNamespace()).Delete(context.TODO(), sr.Vs.GetName(), metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + err = nil + } + framework.ExpectNoError(err) + + err = utils.WaitForNamespacedGVRDeletion(dc, utils.SnapshotGVR, sr.Vs.GetName(), sr.Vs.GetNamespace(), framework.Poll, timeouts.SnapshotDelete) + framework.ExpectNoError(err) + default: + cleanupErrs = append(cleanupErrs, err) + } + case apierrors.IsNotFound(err): + // Hope that the underlying snapshot content and resource is gone already + default: + cleanupErrs = append(cleanupErrs, err) + } + } + if sr.Vscontent != nil { + framework.Logf("deleting snapshot content %q", sr.Vscontent.GetName()) + + sr.Vscontent, err = dc.Resource(utils.SnapshotContentGVR).Get(context.TODO(), sr.Vscontent.GetName(), metav1.GetOptions{}) + switch { + case err == nil: + if sr.Vscontent.Object["spec"].(map[string]interface{})["deletionPolicy"] != "Delete" { + // The purpose of this block is to prevent physical snapshotContent leaks. + // We must update the SnapshotContent to have Delete Deletion policy, + // or else the physical snapshot content will be leaked. + sr.Vscontent.Object["spec"].(map[string]interface{})["deletionPolicy"] = "Delete" + sr.Vscontent, err = dc.Resource(utils.SnapshotContentGVR).Update(context.TODO(), sr.Vscontent, metav1.UpdateOptions{}) + framework.ExpectNoError(err) + } + err = dc.Resource(utils.SnapshotContentGVR).Delete(context.TODO(), sr.Vscontent.GetName(), metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + err = nil + } + framework.ExpectNoError(err) + + err = utils.WaitForGVRDeletion(dc, utils.SnapshotContentGVR, sr.Vscontent.GetName(), framework.Poll, timeouts.SnapshotDelete) + framework.ExpectNoError(err) + case apierrors.IsNotFound(err): + // Hope the underlying physical snapshot resource has been deleted already + default: + cleanupErrs = append(cleanupErrs, err) + } + } + if sr.Vsclass != nil { + framework.Logf("deleting snapshot class %q", sr.Vsclass.GetName()) + // typically this snapshot class has already been deleted + err = dc.Resource(utils.SnapshotClassGVR).Delete(context.TODO(), sr.Vsclass.GetName(), metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + framework.Failf("Error deleting snapshot class %q. Error: %v", sr.Vsclass.GetName(), err) + } + err = utils.WaitForGVRDeletion(dc, utils.SnapshotClassGVR, sr.Vsclass.GetName(), framework.Poll, timeouts.SnapshotDelete) + framework.ExpectNoError(err) + } + return utilerrors.NewAggregate(cleanupErrs) +} + +func getSnapshot(claimName string, ns, snapshotClassName string) *unstructured.Unstructured { + snapshot := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "VolumeSnapshot", + "apiVersion": utils.SnapshotAPIVersion, + "metadata": map[string]interface{}{ + "generateName": "snapshot-", + "namespace": ns, + }, + "spec": map[string]interface{}{ + "volumeSnapshotClassName": snapshotClassName, + "source": map[string]interface{}{ + "persistentVolumeClaimName": claimName, + }, + }, + }, + } + + return snapshot +} +func getPreProvisionedSnapshot(snapName, ns, snapshotContentName string) *unstructured.Unstructured { + snapshot := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "VolumeSnapshot", + "apiVersion": utils.SnapshotAPIVersion, + "metadata": map[string]interface{}{ + "name": snapName, + "namespace": ns, + }, + "spec": map[string]interface{}{ + "source": map[string]interface{}{ + "volumeSnapshotContentName": snapshotContentName, + }, + }, + }, + } + + return snapshot +} +func getPreProvisionedSnapshotContent(snapcontentName, snapshotName, snapshotNamespace, snapshotHandle, deletionPolicy, csiDriverName string) *unstructured.Unstructured { + snapshotContent := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "VolumeSnapshotContent", + "apiVersion": utils.SnapshotAPIVersion, + "metadata": map[string]interface{}{ + "name": snapcontentName, + }, + "spec": map[string]interface{}{ + "source": map[string]interface{}{ + "snapshotHandle": snapshotHandle, + }, + "volumeSnapshotRef": map[string]interface{}{ + "name": snapshotName, + "namespace": snapshotNamespace, + }, + "driver": csiDriverName, + "deletionPolicy": deletionPolicy, + }, + }, + } + + return snapshotContent +} + +func getPreProvisionedSnapshotContentName(uuid types.UID) string { + return fmt.Sprintf("pre-provisioned-snapcontent-%s", string(uuid)) +} + +func getPreProvisionedSnapshotName(uuid types.UID) string { + return fmt.Sprintf("pre-provisioned-snapshot-%s", string(uuid)) +} diff --git a/test/e2e/storage/framework/testconfig.go b/test/e2e/storage/framework/testconfig.go new file mode 100644 index 00000000000..8a56b71501b --- /dev/null +++ b/test/e2e/storage/framework/testconfig.go @@ -0,0 +1,78 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" +) + +// PerTestConfig represents parameters that control test execution. +// One instance gets allocated for each test and is then passed +// via pointer to functions involved in the test. +type PerTestConfig struct { + // The test driver for the test. + Driver TestDriver + + // Some short word that gets inserted into dynamically + // generated entities (pods, paths) as first part of the name + // to make debugging easier. Can be the same for different + // tests inside the test suite. + Prefix string + + // The framework instance allocated for the current test. + Framework *framework.Framework + + // If non-empty, Pods using a volume will be scheduled + // according to the NodeSelection. Otherwise Kubernetes will + // pick a node. + ClientNodeSelection e2epod.NodeSelection + + // Some test drivers initialize a storage server. This is + // the configuration that then has to be used to run tests. + // The values above are ignored for such tests. + ServerConfig *e2evolume.TestConfig + + // Some drivers run in their own namespace + DriverNamespace *v1.Namespace +} + +// GetUniqueDriverName returns unique driver name that can be used parallelly in tests +func (config *PerTestConfig) GetUniqueDriverName() string { + return config.Driver.GetDriverInfo().Name + "-" + config.Framework.UniqueName +} + +// ConvertTestConfig returns a framework test config with the +// parameters specified for the testsuite or (if available) the +// dynamically created config for the volume server. +// +// This is done because TestConfig is the public API for +// the testsuites package whereas volume.TestConfig is merely +// an implementation detail. It contains fields that have no effect. +func ConvertTestConfig(in *PerTestConfig) e2evolume.TestConfig { + if in.ServerConfig != nil { + return *in.ServerConfig + } + + return e2evolume.TestConfig{ + Namespace: in.Framework.Namespace.Name, + Prefix: in.Prefix, + ClientNodeSelection: in.ClientNodeSelection, + } +} diff --git a/test/e2e/storage/testsuites/testdriver.go b/test/e2e/storage/framework/testdriver.go similarity index 86% rename from test/e2e/storage/testsuites/testdriver.go rename to test/e2e/storage/framework/testdriver.go index 5618264d92b..4e80b3e97ae 100644 --- a/test/e2e/storage/testsuites/testdriver.go +++ b/test/e2e/storage/framework/testdriver.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package testsuites +package framework import ( v1 "k8s.io/api/core/v1" @@ -22,9 +22,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/test/e2e/framework" - e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" ) // TestDriver represents an interface for a driver to be tested in TestSuite. @@ -44,7 +42,7 @@ type TestDriver interface { // expensive resources like framework.Framework. Tests that // depend on a connection to the cluster can be done in // PrepareTest once the framework is ready. - SkipUnsupportedTest(testpatterns.TestPattern) + SkipUnsupportedTest(TestPattern) // PrepareTest is called at test execution time each time a new test case is about to start. // It sets up all necessary resources and returns the per-test configuration @@ -63,7 +61,7 @@ type TestVolume interface { type PreprovisionedVolumeTestDriver interface { TestDriver // CreateVolume creates a pre-provisioned volume of the desired volume type. - CreateVolume(config *PerTestConfig, volumeType testpatterns.TestVolType) TestVolume + CreateVolume(config *PerTestConfig, volumeType TestVolType) TestVolume } // InlineVolumeTestDriver represents an interface for a TestDriver that supports InlineVolume @@ -135,7 +133,8 @@ type CustomTimeoutsTestDriver interface { GetTimeouts() *framework.TimeoutContext } -func getDriverTimeouts(driver TestDriver) *framework.TimeoutContext { +// GetDriverTimeouts returns the timeout of the driver operation +func GetDriverTimeouts(driver TestDriver) *framework.TimeoutContext { if d, ok := driver.(CustomTimeoutsTestDriver); ok { return d.GetTimeouts() } @@ -225,38 +224,3 @@ type VolumeSnapshotStressTestOptions struct { // Number of snapshots to create for each volume. NumSnapshots int } - -// PerTestConfig represents parameters that control test execution. -// One instance gets allocated for each test and is then passed -// via pointer to functions involved in the test. -type PerTestConfig struct { - // The test driver for the test. - Driver TestDriver - - // Some short word that gets inserted into dynamically - // generated entities (pods, paths) as first part of the name - // to make debugging easier. Can be the same for different - // tests inside the test suite. - Prefix string - - // The framework instance allocated for the current test. - Framework *framework.Framework - - // If non-empty, Pods using a volume will be scheduled - // according to the NodeSelection. Otherwise Kubernetes will - // pick a node. - ClientNodeSelection e2epod.NodeSelection - - // Some test drivers initialize a storage server. This is - // the configuration that then has to be used to run tests. - // The values above are ignored for such tests. - ServerConfig *e2evolume.TestConfig - - // Some drivers run in their own namespace - DriverNamespace *v1.Namespace -} - -// GetUniqueDriverName returns unique driver name that can be used parallelly in tests -func (config *PerTestConfig) GetUniqueDriverName() string { - return config.Driver.GetDriverInfo().Name + "-" + config.Framework.UniqueName -} diff --git a/test/e2e/storage/testpatterns/testpattern.go b/test/e2e/storage/framework/testpattern.go similarity index 99% rename from test/e2e/storage/testpatterns/testpattern.go rename to test/e2e/storage/framework/testpattern.go index cf45318a034..8ee2e3c2207 100644 --- a/test/e2e/storage/testpatterns/testpattern.go +++ b/test/e2e/storage/framework/testpattern.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package testpatterns +package framework import ( v1 "k8s.io/api/core/v1" diff --git a/test/e2e/storage/framework/testsuite.go b/test/e2e/storage/framework/testsuite.go new file mode 100644 index 00000000000..9ea99485a74 --- /dev/null +++ b/test/e2e/storage/framework/testsuite.go @@ -0,0 +1,129 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "fmt" + + "github.com/onsi/ginkgo" + + "k8s.io/kubernetes/test/e2e/framework" + e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" +) + +// TestSuite represents an interface for a set of tests which works with TestDriver. +// Each testsuite should implement this interface. +// All the functions except GetTestSuiteInfo() should not be called directly. Instead, +// use RegisterTests() to register the tests in a more standard way. +type TestSuite interface { + GetTestSuiteInfo() TestSuiteInfo + // DefineTests defines tests of the testpattern for the driver. + // Called inside a Ginkgo context that reflects the current driver and test pattern, + // so the test suite can define tests directly with ginkgo.It. + DefineTests(TestDriver, TestPattern) + // SkipUnsupportedTests will skip the test suite based on the given TestPattern, TestDriver + // Testsuite should check if the given pattern and driver works for the "whole testsuite" + // Testcase specific check should happen inside defineTests + SkipUnsupportedTests(TestDriver, TestPattern) +} + +// RegisterTests register the driver + pattern combination to the inside TestSuite +// This function actually register tests inside testsuite +func RegisterTests(suite TestSuite, driver TestDriver, pattern TestPattern) { + tsInfo := suite.GetTestSuiteInfo() + testName := fmt.Sprintf("[Testpattern: %s]%s %s%s", pattern.Name, pattern.FeatureTag, tsInfo.Name, tsInfo.FeatureTag) + ginkgo.Context(testName, func() { + ginkgo.BeforeEach(func() { + // skip all the invalid combination of driver and pattern + SkipInvalidDriverPatternCombination(driver, pattern) + // skip the unsupported test pattern and driver combination specific for this TestSuite + suite.SkipUnsupportedTests(driver, pattern) + }) + // actually define the tests + // at this step the testsuite should not worry about if the pattern and driver + // does not fit for the whole testsuite. But driver&pattern check + // might still needed for specific independent test cases. + suite.DefineTests(driver, pattern) + }) +} + +// DefineTestSuites defines tests for all testpatterns and all testSuites for a driver +func DefineTestSuites(driver TestDriver, tsInits []func() TestSuite) { + for _, testSuiteInit := range tsInits { + suite := testSuiteInit() + for _, pattern := range suite.GetTestSuiteInfo().TestPatterns { + RegisterTests(suite, driver, pattern) + } + } +} + +// TestSuiteInfo represents a set of parameters for TestSuite +type TestSuiteInfo struct { + Name string // name of the TestSuite + FeatureTag string // featureTag for the TestSuite + TestPatterns []TestPattern // Slice of TestPattern for the TestSuite + SupportedSizeRange e2evolume.SizeRange // Size range supported by the test suite +} + +// SkipInvalidDriverPatternCombination will skip tests if the combination of driver, and testpattern +// is not compatible to be tested. This function will be called in the RegisterTests() to make +// sure all the testsuites we defined are valid. +// +// Whether it needs to be skipped is checked by following steps: +// 0. Check with driver SkipUnsupportedTest +// 1. Check if volType is supported by driver from its interface +// 2. Check if fsType is supported +// +// Test suites can also skip tests inside their own skipUnsupportedTests function or in +// individual tests. +func SkipInvalidDriverPatternCombination(driver TestDriver, pattern TestPattern) { + dInfo := driver.GetDriverInfo() + var isSupported bool + + // 0. Check with driver specific logic + driver.SkipUnsupportedTest(pattern) + + // 1. Check if Whether volType is supported by driver from its interface + switch pattern.VolType { + case InlineVolume: + _, isSupported = driver.(InlineVolumeTestDriver) + case PreprovisionedPV: + _, isSupported = driver.(PreprovisionedPVTestDriver) + case DynamicPV, GenericEphemeralVolume: + _, isSupported = driver.(DynamicPVTestDriver) + case CSIInlineVolume: + _, isSupported = driver.(EphemeralTestDriver) + default: + isSupported = false + } + + if !isSupported { + e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType) + } + + // 2. Check if fsType is supported + if !dInfo.SupportedFsType.Has(pattern.FsType) { + e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.FsType) + } + if pattern.FsType == "xfs" && framework.NodeOSDistroIs("windows") { + e2eskipper.Skipf("Distro doesn't support xfs -- skipping") + } + if pattern.FsType == "ntfs" && !framework.NodeOSDistroIs("windows") { + e2eskipper.Skipf("Distro %s doesn't support ntfs -- skipping", framework.TestContext.NodeOSDistro) + } +} diff --git a/test/e2e/storage/framework/volume_resource.go b/test/e2e/storage/framework/volume_resource.go new file mode 100644 index 00000000000..85cabd107e3 --- /dev/null +++ b/test/e2e/storage/framework/volume_resource.go @@ -0,0 +1,317 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "context" + "fmt" + "time" + + "github.com/onsi/ginkgo" + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/kubernetes/test/e2e/framework" + e2epv "k8s.io/kubernetes/test/e2e/framework/pv" + e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" + storageutils "k8s.io/kubernetes/test/e2e/storage/utils" +) + +// VolumeResource is a generic implementation of TestResource that wil be able to +// be used in most of TestSuites. +// See volume_io.go or volumes.go in test/e2e/storage/testsuites/ for how to use this resource. +// Also, see subpath.go in the same directory for how to extend and use it. +type VolumeResource struct { + Config *PerTestConfig + Pattern TestPattern + VolSource *v1.VolumeSource + Pvc *v1.PersistentVolumeClaim + Pv *v1.PersistentVolume + Sc *storagev1.StorageClass + + Volume TestVolume +} + +// CreateVolumeResource constructs a VolumeResource for the current test. It knows how to deal with +// different test pattern volume types. +func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern TestPattern, testVolumeSizeRange e2evolume.SizeRange) *VolumeResource { + r := VolumeResource{ + Config: config, + Pattern: pattern, + } + dInfo := driver.GetDriverInfo() + f := config.Framework + cs := f.ClientSet + + // Create volume for pre-provisioned volume tests + r.Volume = CreateVolume(driver, config, pattern.VolType) + + switch pattern.VolType { + case InlineVolume: + framework.Logf("Creating resource for inline volume") + if iDriver, ok := driver.(InlineVolumeTestDriver); ok { + r.VolSource = iDriver.GetVolumeSource(false, pattern.FsType, r.Volume) + } + case PreprovisionedPV: + framework.Logf("Creating resource for pre-provisioned PV") + if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok { + pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, pattern.FsType, r.Volume) + if pvSource != nil { + r.Pv, r.Pvc = createPVCPV(f, dInfo.Name, pvSource, volumeNodeAffinity, pattern.VolMode, dInfo.RequiredAccessModes) + r.VolSource = storageutils.CreateVolumeSource(r.Pvc.Name, false /* readOnly */) + } + } + case DynamicPV, GenericEphemeralVolume: + framework.Logf("Creating resource for dynamic PV") + if dDriver, ok := driver.(DynamicPVTestDriver); ok { + var err error + driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange + claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange) + framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange) + framework.Logf("Using claimSize:%s, test suite supported size:%v, driver(%s) supported size:%v ", claimSize, testVolumeSizeRange, dDriver.GetDriverInfo().Name, testVolumeSizeRange) + r.Sc = dDriver.GetDynamicProvisionStorageClass(r.Config, pattern.FsType) + + if pattern.BindingMode != "" { + r.Sc.VolumeBindingMode = &pattern.BindingMode + } + r.Sc.AllowVolumeExpansion = &pattern.AllowExpansion + + ginkgo.By("creating a StorageClass " + r.Sc.Name) + + r.Sc, err = cs.StorageV1().StorageClasses().Create(context.TODO(), r.Sc, metav1.CreateOptions{}) + framework.ExpectNoError(err) + + switch pattern.VolType { + case DynamicPV: + r.Pv, r.Pvc = createPVCPVFromDynamicProvisionSC( + f, dInfo.Name, claimSize, r.Sc, pattern.VolMode, dInfo.RequiredAccessModes) + r.VolSource = storageutils.CreateVolumeSource(r.Pvc.Name, false /* readOnly */) + case GenericEphemeralVolume: + driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange + claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange) + framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange) + r.VolSource = createEphemeralVolumeSource(r.Sc.Name, dInfo.RequiredAccessModes, claimSize, false /* readOnly */) + } + } + case CSIInlineVolume: + framework.Logf("Creating resource for CSI ephemeral inline volume") + if eDriver, ok := driver.(EphemeralTestDriver); ok { + attributes, _, _ := eDriver.GetVolume(config, 0) + r.VolSource = &v1.VolumeSource{ + CSI: &v1.CSIVolumeSource{ + Driver: eDriver.GetCSIDriverName(config), + VolumeAttributes: attributes, + }, + } + } + default: + framework.Failf("VolumeResource doesn't support: %s", pattern.VolType) + } + + if r.VolSource == nil { + e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType) + } + + return &r +} + +func createEphemeralVolumeSource(scName string, accessModes []v1.PersistentVolumeAccessMode, claimSize string, readOnly bool) *v1.VolumeSource { + if len(accessModes) == 0 { + accessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce} + } + return &v1.VolumeSource{ + Ephemeral: &v1.EphemeralVolumeSource{ + VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{ + Spec: v1.PersistentVolumeClaimSpec{ + StorageClassName: &scName, + AccessModes: accessModes, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: resource.MustParse(claimSize), + }, + }, + }, + }, + ReadOnly: readOnly, + }, + } +} + +// CleanupResource cleans up VolumeResource +func (r *VolumeResource) CleanupResource() error { + f := r.Config.Framework + var cleanUpErrs []error + if r.Pvc != nil || r.Pv != nil { + switch r.Pattern.VolType { + case PreprovisionedPV: + ginkgo.By("Deleting pv and pvc") + if errs := e2epv.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.Pv, r.Pvc); len(errs) != 0 { + framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs)) + } + case DynamicPV: + ginkgo.By("Deleting pvc") + // We only delete the PVC so that PV (and disk) can be cleaned up by dynamic provisioner + if r.Pv != nil && r.Pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete { + framework.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isnt %v", + r.Pv.Name, v1.PersistentVolumeReclaimDelete) + } + if r.Pvc != nil { + cs := f.ClientSet + pv := r.Pv + if pv == nil && r.Pvc.Name != "" { + // This happens for late binding. Check whether we have a volume now that we need to wait for. + pvc, err := cs.CoreV1().PersistentVolumeClaims(r.Pvc.Namespace).Get(context.TODO(), r.Pvc.Name, metav1.GetOptions{}) + switch { + case err == nil: + if pvc.Spec.VolumeName != "" { + pv, err = cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) + if err != nil { + cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to find PV %v", pvc.Spec.VolumeName)) + } + } + case apierrors.IsNotFound(err): + // Without the PVC, we cannot locate the corresponding PV. Let's + // hope that it is gone. + default: + cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to find PVC %v", r.Pvc.Name)) + } + } + + err := e2epv.DeletePersistentVolumeClaim(f.ClientSet, r.Pvc.Name, f.Namespace.Name) + if err != nil { + cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to delete PVC %v", r.Pvc.Name)) + } + + if pv != nil { + err = e2epv.WaitForPersistentVolumeDeleted(f.ClientSet, pv.Name, 5*time.Second, 5*time.Minute) + if err != nil { + cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, + "Persistent Volume %v not deleted by dynamic provisioner", pv.Name)) + } + } + } + default: + framework.Failf("Found PVC (%v) or PV (%v) but not running Preprovisioned or Dynamic test pattern", r.Pvc, r.Pv) + } + } + + if r.Sc != nil { + ginkgo.By("Deleting sc") + if err := storageutils.DeleteStorageClass(f.ClientSet, r.Sc.Name); err != nil { + cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to delete StorageClass %v", r.Sc.Name)) + } + } + + // Cleanup volume for pre-provisioned volume tests + if r.Volume != nil { + if err := storageutils.TryFunc(r.Volume.DeleteVolume); err != nil { + cleanUpErrs = append(cleanUpErrs, errors.Wrap(err, "Failed to delete Volume")) + } + } + return utilerrors.NewAggregate(cleanUpErrs) +} + +func createPVCPV( + f *framework.Framework, + name string, + pvSource *v1.PersistentVolumeSource, + volumeNodeAffinity *v1.VolumeNodeAffinity, + volMode v1.PersistentVolumeMode, + accessModes []v1.PersistentVolumeAccessMode, +) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) { + pvConfig := e2epv.PersistentVolumeConfig{ + NamePrefix: fmt.Sprintf("%s-", name), + StorageClassName: f.Namespace.Name, + PVSource: *pvSource, + NodeAffinity: volumeNodeAffinity, + AccessModes: accessModes, + } + + pvcConfig := e2epv.PersistentVolumeClaimConfig{ + StorageClassName: &f.Namespace.Name, + AccessModes: accessModes, + } + + if volMode != "" { + pvConfig.VolumeMode = &volMode + pvcConfig.VolumeMode = &volMode + } + + framework.Logf("Creating PVC and PV") + pv, pvc, err := e2epv.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false) + framework.ExpectNoError(err, "PVC, PV creation failed") + + err = e2epv.WaitOnPVandPVC(f.ClientSet, f.Timeouts, f.Namespace.Name, pv, pvc) + framework.ExpectNoError(err, "PVC, PV failed to bind") + + return pv, pvc +} + +func createPVCPVFromDynamicProvisionSC( + f *framework.Framework, + name string, + claimSize string, + sc *storagev1.StorageClass, + volMode v1.PersistentVolumeMode, + accessModes []v1.PersistentVolumeAccessMode, +) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) { + cs := f.ClientSet + ns := f.Namespace.Name + + ginkgo.By("creating a claim") + pvcCfg := e2epv.PersistentVolumeClaimConfig{ + NamePrefix: name, + ClaimSize: claimSize, + StorageClassName: &(sc.Name), + AccessModes: accessModes, + VolumeMode: &volMode, + } + + pvc := e2epv.MakePersistentVolumeClaim(pvcCfg, ns) + + var err error + pvc, err = e2epv.CreatePVC(cs, ns, pvc) + framework.ExpectNoError(err) + + if !isDelayedBinding(sc) { + err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, f.Timeouts.ClaimProvision) + framework.ExpectNoError(err) + } + + pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + + var pv *v1.PersistentVolume + if !isDelayedBinding(sc) { + pv, err = cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + } + + return pv, pvc +} + +func isDelayedBinding(sc *storagev1.StorageClass) bool { + if sc.VolumeBindingMode != nil { + return *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer + } + return false +} diff --git a/test/e2e/storage/host_path_type.go b/test/e2e/storage/host_path_type.go index ef2e9143dab..0abe44570f4 100644 --- a/test/e2e/storage/host_path_type.go +++ b/test/e2e/storage/host_path_type.go @@ -28,6 +28,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2eevents "k8s.io/kubernetes/test/e2e/framework/events" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -263,7 +264,7 @@ var _ = utils.SIGDescribe("HostPathType Character Device [Slow]", func() { targetCharDev = path.Join(hostBaseDir, "achardev") ginkgo.By("Create a character device for further testing") cmd := fmt.Sprintf("mknod %s c 89 1", path.Join(mountBaseDir, "achardev")) - stdout, stderr, err := utils.PodExec(f, basePod, cmd) + stdout, stderr, err := e2evolume.PodExec(f, basePod, cmd) framework.ExpectNoError(err, "command: %q, stdout: %s\nstderr: %s", cmd, stdout, stderr) }) @@ -332,7 +333,7 @@ var _ = utils.SIGDescribe("HostPathType Block Device [Slow]", func() { targetBlockDev = path.Join(hostBaseDir, "ablkdev") ginkgo.By("Create a block device for further testing") cmd := fmt.Sprintf("mknod %s b 89 1", path.Join(mountBaseDir, "ablkdev")) - stdout, stderr, err := utils.PodExec(f, basePod, cmd) + stdout, stderr, err := e2evolume.PodExec(f, basePod, cmd) framework.ExpectNoError(err, "command %q: stdout: %s\nstderr: %s", cmd, stdout, stderr) }) diff --git a/test/e2e/storage/in_tree_volumes.go b/test/e2e/storage/in_tree_volumes.go index 3c557c66b4d..b5572837d0e 100644 --- a/test/e2e/storage/in_tree_volumes.go +++ b/test/e2e/storage/in_tree_volumes.go @@ -19,12 +19,13 @@ package storage import ( "github.com/onsi/ginkgo" "k8s.io/kubernetes/test/e2e/storage/drivers" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" ) // List of testDrivers to be executed in below loop -var testDrivers = []func() testsuites.TestDriver{ +var testDrivers = []func() storageframework.TestDriver{ drivers.InitNFSDriver, drivers.InitGlusterFSDriver, drivers.InitISCSIDriver, @@ -54,8 +55,8 @@ var _ = utils.SIGDescribe("In-tree Volumes", func() { for _, initDriver := range testDrivers { curDriver := initDriver() - ginkgo.Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() { - testsuites.DefineTestSuite(curDriver, testsuites.BaseSuites) + ginkgo.Context(storageframework.GetDriverNameWithFeatureTags(curDriver), func() { + storageframework.DefineTestSuites(curDriver, testsuites.BaseSuites) }) } }) diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index 241ff497626..fb4e2ee09cc 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -45,6 +45,7 @@ import ( e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -1077,7 +1078,7 @@ func testReadFileContent(f *framework.Framework, testFileDir string, testFile st // Execute a read or write command in a pod. // Fail on error func podRWCmdExec(f *framework.Framework, pod *v1.Pod, cmd string) string { - stdout, stderr, err := utils.PodExec(f, pod, cmd) + stdout, stderr, err := e2evolume.PodExec(f, pod, cmd) framework.Logf("podRWCmdExec cmd: %q, out: %q, stderr: %q, err: %v", cmd, stdout, stderr, err) framework.ExpectNoError(err) return stdout diff --git a/test/e2e/storage/testpatterns/BUILD b/test/e2e/storage/testpatterns/BUILD deleted file mode 100644 index 1374bf7e04c..00000000000 --- a/test/e2e/storage/testpatterns/BUILD +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["testpattern.go"], - importpath = "k8s.io/kubernetes/test/e2e/storage/testpatterns", - visibility = ["//visibility:public"], - deps = [ - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/storage/v1:go_default_library", - "//test/e2e/framework/volume:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/test/e2e/storage/testsuites/BUILD b/test/e2e/storage/testsuites/BUILD index 508772790ab..d7cca9c0f09 100644 --- a/test/e2e/storage/testsuites/BUILD +++ b/test/e2e/storage/testsuites/BUILD @@ -1,11 +1,10 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ "base.go", "disruptive.go", - "driveroperations.go", "ephemeral.go", "fsgroupchangepolicy.go", "multivolume.go", @@ -13,7 +12,6 @@ go_library( "snapshottable.go", "snapshottable_stress.go", "subpath.go", - "testdriver.go", "topology.go", "volume_expand.go", "volume_io.go", @@ -36,14 +34,10 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library", "//staging/src/k8s.io/client-go/dynamic:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/component-base/metrics/testutil:go_default_library", @@ -57,13 +51,11 @@ go_library( "//test/e2e/framework/pv:go_default_library", "//test/e2e/framework/skipper:go_default_library", "//test/e2e/framework/volume:go_default_library", - "//test/e2e/storage/podlogs:go_default_library", - "//test/e2e/storage/testpatterns:go_default_library", + "//test/e2e/storage/framework:go_default_library", "//test/e2e/storage/utils:go_default_library", "//test/utils/image:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", - "//vendor/github.com/pkg/errors:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library", ], ) @@ -81,16 +73,3 @@ filegroup( tags = ["automanaged"], visibility = ["//visibility:public"], ) - -go_test( - name = "go_default_test", - srcs = [ - "api_test.go", - "base_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//test/e2e/framework/volume:go_default_library", - "//test/e2e/storage/testpatterns:go_default_library", - ], -) diff --git a/test/e2e/storage/testsuites/api_test.go b/test/e2e/storage/testsuites/api_test.go deleted file mode 100644 index 53f60579f97..00000000000 --- a/test/e2e/storage/testsuites/api_test.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package testsuites_test is used intentionally to ensure that the -// code below only has access to exported names. It doesn't have any -// actual test. That the custom volume test suite defined below -// compile is the test. -// -// It's needed because we don't have any in-tree volume test -// suite implementations that aren't in the "testuites" package itself. -// We don't need this for the "TestDriver" interface because there -// we have implementations in a separate package. -package testsuites_test - -import ( - e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" - "k8s.io/kubernetes/test/e2e/storage/testsuites" -) - -type fakeSuite struct { -} - -func (f *fakeSuite) GetTestSuiteInfo() testsuites.TestSuiteInfo { - return testsuites.TestSuiteInfo{ - Name: "fake", - FeatureTag: "", - TestPatterns: []testpatterns.TestPattern{testpatterns.DefaultFsDynamicPV}, - SupportedSizeRange: e2evolume.SizeRange{Min: "1Mi", Max: "1Gi"}, - } -} - -func (f *fakeSuite) DefineTests(testsuites.TestDriver, testpatterns.TestPattern) { -} - -func (f *fakeSuite) SkipRedundantSuite(testsuites.TestDriver, testpatterns.TestPattern) { -} - -var _ testsuites.TestSuite = &fakeSuite{} diff --git a/test/e2e/storage/testsuites/base.go b/test/e2e/storage/testsuites/base.go index c4cb285185c..46efe308330 100644 --- a/test/e2e/storage/testsuites/base.go +++ b/test/e2e/storage/testsuites/base.go @@ -19,41 +19,20 @@ package testsuites import ( "context" "flag" - "fmt" - "math" - "regexp" "strings" - "time" - "github.com/onsi/ginkgo" - "github.com/pkg/errors" - - v1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/types" - utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" "k8s.io/component-base/metrics/testutil" csitrans "k8s.io/csi-translation-lib" "k8s.io/kubernetes/test/e2e/framework" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" - e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - "k8s.io/kubernetes/test/e2e/storage/podlogs" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" ) -var ( - migratedPlugins *string - minValidSize = "1Ki" - maxValidSize = "10Ei" -) +var migratedPlugins *string func init() { migratedPlugins = flag.String("storage.migratedPlugins", "", "comma separated list of in-tree plugin names of form 'kubernetes.io/{pluginName}' migrated to CSI") @@ -73,7 +52,7 @@ type migrationOpCheck struct { } // BaseSuites is a list of storage test suites that work for in-tree and CSI drivers -var BaseSuites = []func() TestSuite{ +var BaseSuites = []func() storageframework.TestSuite{ InitVolumesTestSuite, InitVolumeIOTestSuite, InitVolumeModeTestSuite, @@ -95,586 +74,6 @@ var CSISuites = append(BaseSuites, InitSnapshottableStressTestSuite, ) -// TestSuite represents an interface for a set of tests which works with TestDriver -type TestSuite interface { - // GetTestSuiteInfo returns the TestSuiteInfo for this TestSuite - GetTestSuiteInfo() TestSuiteInfo - // DefineTests defines tests of the testpattern for the driver. - // Called inside a Ginkgo context that reflects the current driver and test pattern, - // so the test suite can define tests directly with ginkgo.It. - DefineTests(TestDriver, testpatterns.TestPattern) - // SkipRedundantSuite will skip the test suite based on the given TestPattern and TestDriver - SkipRedundantSuite(TestDriver, testpatterns.TestPattern) -} - -// TestSuiteInfo represents a set of parameters for TestSuite -type TestSuiteInfo struct { - Name string // name of the TestSuite - FeatureTag string // featureTag for the TestSuite - TestPatterns []testpatterns.TestPattern // Slice of TestPattern for the TestSuite - SupportedSizeRange e2evolume.SizeRange // Size range supported by the test suite -} - -func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string { - tsInfo := suite.GetTestSuiteInfo() - return fmt.Sprintf("[Testpattern: %s]%s %s%s", pattern.Name, pattern.FeatureTag, tsInfo.Name, tsInfo.FeatureTag) -} - -// DefineTestSuite defines tests for all testpatterns and all testSuites for a driver -func DefineTestSuite(driver TestDriver, tsInits []func() TestSuite) { - for _, testSuiteInit := range tsInits { - suite := testSuiteInit() - for _, pattern := range suite.GetTestSuiteInfo().TestPatterns { - p := pattern - ginkgo.Context(getTestNameStr(suite, p), func() { - ginkgo.BeforeEach(func() { - // Skip unsupported tests to avoid unnecessary resource initialization - suite.SkipRedundantSuite(driver, p) - skipUnsupportedTest(driver, p) - }) - suite.DefineTests(driver, p) - }) - } - } -} - -// skipUnsupportedTest will skip tests if the combination of driver, and testpattern -// is not suitable to be tested. -// Whether it needs to be skipped is checked by following steps: -// 1. Check if Whether SnapshotType is supported by driver from its interface -// 2. Check if Whether volType is supported by driver from its interface -// 3. Check if fsType is supported -// 4. Check with driver specific logic -// -// Test suites can also skip tests inside their own DefineTests function or in -// individual tests. -func skipUnsupportedTest(driver TestDriver, pattern testpatterns.TestPattern) { - dInfo := driver.GetDriverInfo() - var isSupported bool - - // 0. Check with driver specific logic - driver.SkipUnsupportedTest(pattern) - - // 1. Check if Whether volType is supported by driver from its interface - switch pattern.VolType { - case testpatterns.InlineVolume: - _, isSupported = driver.(InlineVolumeTestDriver) - case testpatterns.PreprovisionedPV: - _, isSupported = driver.(PreprovisionedPVTestDriver) - case testpatterns.DynamicPV, testpatterns.GenericEphemeralVolume: - _, isSupported = driver.(DynamicPVTestDriver) - case testpatterns.CSIInlineVolume: - _, isSupported = driver.(EphemeralTestDriver) - default: - isSupported = false - } - - if !isSupported { - e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType) - } - - // 2. Check if fsType is supported - if !dInfo.SupportedFsType.Has(pattern.FsType) { - e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.FsType) - } - if pattern.FsType == "xfs" && framework.NodeOSDistroIs("windows") { - e2eskipper.Skipf("Distro doesn't support xfs -- skipping") - } - if pattern.FsType == "ntfs" && !framework.NodeOSDistroIs("windows") { - e2eskipper.Skipf("Distro %s doesn't support ntfs -- skipping", framework.TestContext.NodeOSDistro) - } -} - -// VolumeResource is a generic implementation of TestResource that wil be able to -// be used in most of TestSuites. -// See volume_io.go or volumes.go in test/e2e/storage/testsuites/ for how to use this resource. -// Also, see subpath.go in the same directory for how to extend and use it. -type VolumeResource struct { - Config *PerTestConfig - Pattern testpatterns.TestPattern - VolSource *v1.VolumeSource - Pvc *v1.PersistentVolumeClaim - Pv *v1.PersistentVolume - Sc *storagev1.StorageClass - - Volume TestVolume -} - -// CreateVolumeResource constructs a VolumeResource for the current test. It knows how to deal with -// different test pattern volume types. -func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, testVolumeSizeRange e2evolume.SizeRange) *VolumeResource { - r := VolumeResource{ - Config: config, - Pattern: pattern, - } - dInfo := driver.GetDriverInfo() - f := config.Framework - cs := f.ClientSet - - // Create volume for pre-provisioned volume tests - r.Volume = CreateVolume(driver, config, pattern.VolType) - - switch pattern.VolType { - case testpatterns.InlineVolume: - framework.Logf("Creating resource for inline volume") - if iDriver, ok := driver.(InlineVolumeTestDriver); ok { - r.VolSource = iDriver.GetVolumeSource(false, pattern.FsType, r.Volume) - } - case testpatterns.PreprovisionedPV: - framework.Logf("Creating resource for pre-provisioned PV") - if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok { - pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, pattern.FsType, r.Volume) - if pvSource != nil { - r.Pv, r.Pvc = createPVCPV(f, dInfo.Name, pvSource, volumeNodeAffinity, pattern.VolMode, dInfo.RequiredAccessModes) - r.VolSource = createVolumeSource(r.Pvc.Name, false /* readOnly */) - } - } - case testpatterns.DynamicPV, testpatterns.GenericEphemeralVolume: - framework.Logf("Creating resource for dynamic PV") - if dDriver, ok := driver.(DynamicPVTestDriver); ok { - var err error - driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange - claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange) - framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange) - framework.Logf("Using claimSize:%s, test suite supported size:%v, driver(%s) supported size:%v ", - claimSize, testVolumeSizeRange, dDriver.GetDriverInfo().Name, driverVolumeSizeRange) - r.Sc = dDriver.GetDynamicProvisionStorageClass(r.Config, pattern.FsType) - - if pattern.BindingMode != "" { - r.Sc.VolumeBindingMode = &pattern.BindingMode - } - r.Sc.AllowVolumeExpansion = &pattern.AllowExpansion - - ginkgo.By("creating a StorageClass " + r.Sc.Name) - - r.Sc, err = cs.StorageV1().StorageClasses().Create(context.TODO(), r.Sc, metav1.CreateOptions{}) - framework.ExpectNoError(err) - - switch pattern.VolType { - case testpatterns.DynamicPV: - r.Pv, r.Pvc = createPVCPVFromDynamicProvisionSC( - f, dInfo.Name, claimSize, r.Sc, pattern.VolMode, dInfo.RequiredAccessModes) - r.VolSource = createVolumeSource(r.Pvc.Name, false /* readOnly */) - case testpatterns.GenericEphemeralVolume: - driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange - claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange) - framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange) - r.VolSource = createEphemeralVolumeSource(r.Sc.Name, dInfo.RequiredAccessModes, claimSize, false /* readOnly */) - } - } - case testpatterns.CSIInlineVolume: - framework.Logf("Creating resource for CSI ephemeral inline volume") - if eDriver, ok := driver.(EphemeralTestDriver); ok { - attributes, _, _ := eDriver.GetVolume(config, 0) - r.VolSource = &v1.VolumeSource{ - CSI: &v1.CSIVolumeSource{ - Driver: eDriver.GetCSIDriverName(config), - VolumeAttributes: attributes, - }, - } - } - default: - framework.Failf("VolumeResource doesn't support: %s", pattern.VolType) - } - - if r.VolSource == nil { - e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType) - } - - return &r -} - -func createVolumeSource(pvcName string, readOnly bool) *v1.VolumeSource { - return &v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: pvcName, - ReadOnly: readOnly, - }, - } -} - -func createEphemeralVolumeSource(scName string, accessModes []v1.PersistentVolumeAccessMode, claimSize string, readOnly bool) *v1.VolumeSource { - if len(accessModes) == 0 { - accessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce} - } - return &v1.VolumeSource{ - Ephemeral: &v1.EphemeralVolumeSource{ - VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{ - Spec: v1.PersistentVolumeClaimSpec{ - StorageClassName: &scName, - AccessModes: accessModes, - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceStorage: resource.MustParse(claimSize), - }, - }, - }, - }, - ReadOnly: readOnly, - }, - } -} - -// CleanupResource cleans up VolumeResource -func (r *VolumeResource) CleanupResource() error { - f := r.Config.Framework - var cleanUpErrs []error - if r.Pvc != nil || r.Pv != nil { - switch r.Pattern.VolType { - case testpatterns.PreprovisionedPV: - ginkgo.By("Deleting pv and pvc") - if errs := e2epv.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.Pv, r.Pvc); len(errs) != 0 { - framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs)) - } - case testpatterns.DynamicPV: - ginkgo.By("Deleting pvc") - // We only delete the PVC so that PV (and disk) can be cleaned up by dynamic provisioner - if r.Pv != nil && r.Pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete { - framework.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isnt %v", - r.Pv.Name, v1.PersistentVolumeReclaimDelete) - } - if r.Pvc != nil { - cs := f.ClientSet - pv := r.Pv - if pv == nil && r.Pvc.Name != "" { - // This happens for late binding. Check whether we have a volume now that we need to wait for. - pvc, err := cs.CoreV1().PersistentVolumeClaims(r.Pvc.Namespace).Get(context.TODO(), r.Pvc.Name, metav1.GetOptions{}) - switch { - case err == nil: - if pvc.Spec.VolumeName != "" { - pv, err = cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) - if err != nil { - cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to find PV %v", pvc.Spec.VolumeName)) - } - } - case apierrors.IsNotFound(err): - // Without the PVC, we cannot locate the corresponding PV. Let's - // hope that it is gone. - default: - cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to find PVC %v", r.Pvc.Name)) - } - } - - err := e2epv.DeletePersistentVolumeClaim(f.ClientSet, r.Pvc.Name, f.Namespace.Name) - if err != nil { - cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to delete PVC %v", r.Pvc.Name)) - } - - if pv != nil { - err = e2epv.WaitForPersistentVolumeDeleted(f.ClientSet, pv.Name, 5*time.Second, 5*time.Minute) - if err != nil { - cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, - "Persistent Volume %v not deleted by dynamic provisioner", pv.Name)) - } - } - } - default: - framework.Failf("Found PVC (%v) or PV (%v) but not running Preprovisioned or Dynamic test pattern", r.Pvc, r.Pv) - } - } - - if r.Sc != nil { - ginkgo.By("Deleting sc") - if err := deleteStorageClass(f.ClientSet, r.Sc.Name); err != nil { - cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to delete StorageClass %v", r.Sc.Name)) - } - } - - // Cleanup volume for pre-provisioned volume tests - if r.Volume != nil { - if err := tryFunc(r.Volume.DeleteVolume); err != nil { - cleanUpErrs = append(cleanUpErrs, errors.Wrap(err, "Failed to delete Volume")) - } - } - return utilerrors.NewAggregate(cleanUpErrs) -} - -func createPVCPV( - f *framework.Framework, - name string, - pvSource *v1.PersistentVolumeSource, - volumeNodeAffinity *v1.VolumeNodeAffinity, - volMode v1.PersistentVolumeMode, - accessModes []v1.PersistentVolumeAccessMode, -) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) { - pvConfig := e2epv.PersistentVolumeConfig{ - NamePrefix: fmt.Sprintf("%s-", name), - StorageClassName: f.Namespace.Name, - PVSource: *pvSource, - NodeAffinity: volumeNodeAffinity, - AccessModes: accessModes, - } - - pvcConfig := e2epv.PersistentVolumeClaimConfig{ - StorageClassName: &f.Namespace.Name, - AccessModes: accessModes, - } - - if volMode != "" { - pvConfig.VolumeMode = &volMode - pvcConfig.VolumeMode = &volMode - } - - framework.Logf("Creating PVC and PV") - pv, pvc, err := e2epv.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false) - framework.ExpectNoError(err, "PVC, PV creation failed") - - err = e2epv.WaitOnPVandPVC(f.ClientSet, f.Timeouts, f.Namespace.Name, pv, pvc) - framework.ExpectNoError(err, "PVC, PV failed to bind") - - return pv, pvc -} - -func createPVCPVFromDynamicProvisionSC( - f *framework.Framework, - name string, - claimSize string, - sc *storagev1.StorageClass, - volMode v1.PersistentVolumeMode, - accessModes []v1.PersistentVolumeAccessMode, -) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) { - cs := f.ClientSet - ns := f.Namespace.Name - - ginkgo.By("creating a claim") - pvcCfg := e2epv.PersistentVolumeClaimConfig{ - NamePrefix: name, - ClaimSize: claimSize, - StorageClassName: &(sc.Name), - AccessModes: accessModes, - VolumeMode: &volMode, - } - - pvc := e2epv.MakePersistentVolumeClaim(pvcCfg, ns) - - var err error - pvc, err = e2epv.CreatePVC(cs, ns, pvc) - framework.ExpectNoError(err) - - if !isDelayedBinding(sc) { - err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, f.Timeouts.ClaimProvision) - framework.ExpectNoError(err) - } - - pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) - framework.ExpectNoError(err) - - var pv *v1.PersistentVolume - if !isDelayedBinding(sc) { - pv, err = cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) - framework.ExpectNoError(err) - } - - return pv, pvc -} - -func isDelayedBinding(sc *storagev1.StorageClass) bool { - if sc.VolumeBindingMode != nil { - return *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer - } - return false -} - -// deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found" -func deleteStorageClass(cs clientset.Interface, className string) error { - err := cs.StorageV1().StorageClasses().Delete(context.TODO(), className, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - return err - } - return nil -} - -// convertTestConfig returns a framework test config with the -// parameters specified for the testsuite or (if available) the -// dynamically created config for the volume server. -// -// This is done because TestConfig is the public API for -// the testsuites package whereas volume.TestConfig is merely -// an implementation detail. It contains fields that have no effect, -// which makes it unsuitable for use in the testsuits public API. -func convertTestConfig(in *PerTestConfig) e2evolume.TestConfig { - if in.ServerConfig != nil { - return *in.ServerConfig - } - - return e2evolume.TestConfig{ - Namespace: in.Framework.Namespace.Name, - Prefix: in.Prefix, - ClientNodeSelection: in.ClientNodeSelection, - } -} - -// getSizeRangesIntersection takes two instances of storage size ranges and determines the -// intersection of the intervals (if it exists) and return the minimum of the intersection -// to be used as the claim size for the test. -// if value not set, that means there's no minimum or maximum size limitation and we set default size for it. -func getSizeRangesIntersection(first e2evolume.SizeRange, second e2evolume.SizeRange) (string, error) { - var firstMin, firstMax, secondMin, secondMax resource.Quantity - var err error - - //if SizeRange is not set, assign a minimum or maximum size - if len(first.Min) == 0 { - first.Min = minValidSize - } - if len(first.Max) == 0 { - first.Max = maxValidSize - } - if len(second.Min) == 0 { - second.Min = minValidSize - } - if len(second.Max) == 0 { - second.Max = maxValidSize - } - - if firstMin, err = resource.ParseQuantity(first.Min); err != nil { - return "", err - } - if firstMax, err = resource.ParseQuantity(first.Max); err != nil { - return "", err - } - if secondMin, err = resource.ParseQuantity(second.Min); err != nil { - return "", err - } - if secondMax, err = resource.ParseQuantity(second.Max); err != nil { - return "", err - } - - interSectionStart := math.Max(float64(firstMin.Value()), float64(secondMin.Value())) - intersectionEnd := math.Min(float64(firstMax.Value()), float64(secondMax.Value())) - - // the minimum of the intersection shall be returned as the claim size - var intersectionMin resource.Quantity - - if intersectionEnd-interSectionStart >= 0 { //have intersection - intersectionMin = *resource.NewQuantity(int64(interSectionStart), "BinarySI") //convert value to BinarySI format. E.g. 5Gi - // return the minimum of the intersection as the claim size - return intersectionMin.String(), nil - } - return "", fmt.Errorf("intersection of size ranges %+v, %+v is null", first, second) -} - -func getSnapshot(claimName string, ns, snapshotClassName string) *unstructured.Unstructured { - snapshot := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "VolumeSnapshot", - "apiVersion": snapshotAPIVersion, - "metadata": map[string]interface{}{ - "generateName": "snapshot-", - "namespace": ns, - }, - "spec": map[string]interface{}{ - "volumeSnapshotClassName": snapshotClassName, - "source": map[string]interface{}{ - "persistentVolumeClaimName": claimName, - }, - }, - }, - } - - return snapshot -} -func getPreProvisionedSnapshot(snapName, ns, snapshotContentName string) *unstructured.Unstructured { - snapshot := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "VolumeSnapshot", - "apiVersion": snapshotAPIVersion, - "metadata": map[string]interface{}{ - "name": snapName, - "namespace": ns, - }, - "spec": map[string]interface{}{ - "source": map[string]interface{}{ - "volumeSnapshotContentName": snapshotContentName, - }, - }, - }, - } - - return snapshot -} -func getPreProvisionedSnapshotContent(snapcontentName, snapshotName, snapshotNamespace, snapshotHandle, deletionPolicy, csiDriverName string) *unstructured.Unstructured { - snapshotContent := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "VolumeSnapshotContent", - "apiVersion": snapshotAPIVersion, - "metadata": map[string]interface{}{ - "name": snapcontentName, - }, - "spec": map[string]interface{}{ - "source": map[string]interface{}{ - "snapshotHandle": snapshotHandle, - }, - "volumeSnapshotRef": map[string]interface{}{ - "name": snapshotName, - "namespace": snapshotNamespace, - }, - "driver": csiDriverName, - "deletionPolicy": deletionPolicy, - }, - }, - } - - return snapshotContent -} - -func getPreProvisionedSnapshotContentName(uuid types.UID) string { - return fmt.Sprintf("pre-provisioned-snapcontent-%s", string(uuid)) -} - -func getPreProvisionedSnapshotName(uuid types.UID) string { - return fmt.Sprintf("pre-provisioned-snapshot-%s", string(uuid)) -} - -// StartPodLogs begins capturing log output and events from current -// and future pods running in the namespace of the framework. That -// ends when the returned cleanup function is called. -// -// The output goes to log files (when using --report-dir, as in the -// CI) or the output stream (otherwise). -func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func() { - ctx, cancel := context.WithCancel(context.Background()) - cs := f.ClientSet - - ns := driverNamespace.Name - - to := podlogs.LogOutput{ - StatusWriter: ginkgo.GinkgoWriter, - } - if framework.TestContext.ReportDir == "" { - to.LogWriter = ginkgo.GinkgoWriter - } else { - test := ginkgo.CurrentGinkgoTestDescription() - // Clean up each individual component text such that - // it contains only characters that are valid as file - // name. - reg := regexp.MustCompile("[^a-zA-Z0-9_-]+") - var components []string - for _, component := range test.ComponentTexts { - components = append(components, reg.ReplaceAllString(component, "_")) - } - // We end the prefix with a slash to ensure that all logs - // end up in a directory named after the current test. - // - // Each component name maps to a directory. This - // avoids cluttering the root artifact directory and - // keeps each directory name smaller (the full test - // name at one point exceeded 256 characters, which was - // too much for some filesystems). - to.LogPathPrefix = framework.TestContext.ReportDir + "/" + - strings.Join(components, "/") + "/" - } - podlogs.CopyAllLogs(ctx, cs, ns, to) - - // pod events are something that the framework already collects itself - // after a failed test. Logging them live is only useful for interactive - // debugging, not when we collect reports. - if framework.TestContext.ReportDir == "" { - podlogs.WatchPods(ctx, cs, ns, ginkgo.GinkgoWriter) - } - - return cancel -} - func getVolumeOpsFromMetricsForPlugin(ms testutil.Metrics, pluginName string) opCounts { totOps := opCounts{} @@ -826,23 +225,9 @@ func (moc *migrationOpCheck) validateMigrationVolumeOpCounts() { } // Skip skipVolTypes patterns if the driver supports dynamic provisioning -func skipVolTypePatterns(pattern testpatterns.TestPattern, driver TestDriver, skipVolTypes map[testpatterns.TestVolType]bool) { - _, supportsProvisioning := driver.(DynamicPVTestDriver) +func skipVolTypePatterns(pattern storageframework.TestPattern, driver storageframework.TestDriver, skipVolTypes map[storageframework.TestVolType]bool) { + _, supportsProvisioning := driver.(storageframework.DynamicPVTestDriver) if supportsProvisioning && skipVolTypes[pattern.VolType] { e2eskipper.Skipf("Driver supports dynamic provisioning, skipping %s pattern", pattern.VolType) } } - -func tryFunc(f func()) error { - var err error - if f == nil { - return nil - } - defer func() { - if recoverError := recover(); recoverError != nil { - err = fmt.Errorf("%v", recoverError) - } - }() - f() - return err -} diff --git a/test/e2e/storage/testsuites/disruptive.go b/test/e2e/storage/testsuites/disruptive.go index aff514119ae..6b98eea532e 100644 --- a/test/e2e/storage/testsuites/disruptive.go +++ b/test/e2e/storage/testsuites/disruptive.go @@ -25,64 +25,67 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" - e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" "k8s.io/kubernetes/test/e2e/storage/utils" + storageutils "k8s.io/kubernetes/test/e2e/storage/utils" ) type disruptiveTestSuite struct { - tsInfo TestSuiteInfo + tsInfo storageframework.TestSuiteInfo } -var _ TestSuite = &disruptiveTestSuite{} - -// InitDisruptiveTestSuite returns subPathTestSuite that implements TestSuite interface -func InitDisruptiveTestSuite() TestSuite { +// InitCustomDisruptiveTestSuite returns subPathTestSuite that implements TestSuite interface +// using custom test patterns +func InitCustomDisruptiveTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite { return &disruptiveTestSuite{ - tsInfo: TestSuiteInfo{ - Name: "disruptive", - FeatureTag: "[Disruptive][LinuxOnly]", - TestPatterns: []testpatterns.TestPattern{ - // FSVolMode is already covered in subpath testsuite - testpatterns.DefaultFsInlineVolume, - testpatterns.FsVolModePreprovisionedPV, - testpatterns.FsVolModeDynamicPV, - testpatterns.BlockVolModePreprovisionedPV, - testpatterns.BlockVolModeDynamicPV, - }, + tsInfo: storageframework.TestSuiteInfo{ + Name: "disruptive", + FeatureTag: "[Disruptive][LinuxOnly]", + TestPatterns: patterns, }, } } -func (s *disruptiveTestSuite) GetTestSuiteInfo() TestSuiteInfo { + +// InitDisruptiveTestSuite returns subPathTestSuite that implements TestSuite interface +// using test suite default patterns +func InitDisruptiveTestSuite() storageframework.TestSuite { + testPatterns := []storageframework.TestPattern{ + // FSVolMode is already covered in subpath testsuite + storageframework.DefaultFsInlineVolume, + storageframework.FsVolModePreprovisionedPV, + storageframework.FsVolModeDynamicPV, + storageframework.BlockVolModePreprovisionedPV, + storageframework.BlockVolModeDynamicPV, + } + return InitCustomDisruptiveTestSuite(testPatterns) +} + +func (s *disruptiveTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo { return s.tsInfo } -func (s *disruptiveTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) { - skipVolTypePatterns(pattern, driver, testpatterns.NewVolTypeMap(testpatterns.PreprovisionedPV)) +func (s *disruptiveTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { + skipVolTypePatterns(pattern, driver, storageframework.NewVolTypeMap(storageframework.PreprovisionedPV)) } -func (s *disruptiveTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) { +func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { type local struct { - config *PerTestConfig + config *storageframework.PerTestConfig driverCleanup func() cs clientset.Interface ns *v1.Namespace // VolumeResource contains pv, pvc, sc, etc., owns cleaning that up - resource *VolumeResource + resource *storageframework.VolumeResource pod *v1.Pod } var l local - // No preconditions to test. Normally they would be in a BeforeEach here. - - // This intentionally comes after checking the preconditions because it - // registers its own BeforeEach which creates the namespace. Beware that it - // also registers an AfterEach which renders f unusable. Any code using + // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. - f := framework.NewFrameworkWithCustomTimeouts("disruptive", getDriverTimeouts(driver)) + f := framework.NewFrameworkWithCustomTimeouts("disruptive", storageframework.GetDriverTimeouts(driver)) init := func() { l = local{} @@ -92,12 +95,8 @@ func (s *disruptiveTestSuite) DefineTests(driver TestDriver, pattern testpattern // Now do the more expensive test initialization. l.config, l.driverCleanup = driver.PrepareTest(f) - if pattern.VolMode == v1.PersistentVolumeBlock && !driver.GetDriverInfo().Capabilities[CapBlock] { - e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", driver.GetDriverInfo().Name, pattern.VolMode) - } - testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange - l.resource = CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) + l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) } cleanup := func() { @@ -115,7 +114,7 @@ func (s *disruptiveTestSuite) DefineTests(driver TestDriver, pattern testpattern l.resource = nil } - errs = append(errs, tryFunc(l.driverCleanup)) + errs = append(errs, storageutils.TryFunc(l.driverCleanup)) l.driverCleanup = nil framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource") } @@ -155,7 +154,7 @@ func (s *disruptiveTestSuite) DefineTests(driver TestDriver, pattern testpattern var err error var pvcs []*v1.PersistentVolumeClaim var inlineSources []*v1.VolumeSource - if pattern.VolType == testpatterns.InlineVolume { + if pattern.VolType == storageframework.InlineVolume { inlineSources = append(inlineSources, l.resource.VolSource) } else { pvcs = append(pvcs, l.resource.Pvc) diff --git a/test/e2e/storage/testsuites/ephemeral.go b/test/e2e/storage/testsuites/ephemeral.go index fc77ba7332b..c1d24485a12 100644 --- a/test/e2e/storage/testsuites/ephemeral.go +++ b/test/e2e/storage/testsuites/ephemeral.go @@ -34,82 +34,74 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" storageutils "k8s.io/kubernetes/test/e2e/storage/utils" ) type ephemeralTestSuite struct { - tsInfo TestSuiteInfo + tsInfo storageframework.TestSuiteInfo } -var _ TestSuite = &ephemeralTestSuite{} - -// InitEphemeralTestSuite returns ephemeralTestSuite that implements TestSuite interface -func InitEphemeralTestSuite() TestSuite { - genericLateBinding := testpatterns.DefaultFsGenericEphemeralVolume - genericLateBinding.Name += " (late-binding)" - genericLateBinding.BindingMode = storagev1.VolumeBindingWaitForFirstConsumer - - genericImmediateBinding := testpatterns.DefaultFsGenericEphemeralVolume - genericImmediateBinding.Name += " (immediate-binding)" - genericImmediateBinding.BindingMode = storagev1.VolumeBindingImmediate - - patterns := []testpatterns.TestPattern{ - testpatterns.DefaultFsCSIEphemeralVolume, - genericLateBinding, - genericImmediateBinding, - } - +// InitCustomEphemeralTestSuite returns ephemeralTestSuite that implements TestSuite interface +// using custom test patterns +func InitCustomEphemeralTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite { return &ephemeralTestSuite{ - tsInfo: TestSuiteInfo{ + tsInfo: storageframework.TestSuiteInfo{ Name: "ephemeral", TestPatterns: patterns, }, } } -func (p *ephemeralTestSuite) GetTestSuiteInfo() TestSuiteInfo { +// InitEphemeralTestSuite returns ephemeralTestSuite that implements TestSuite interface +// using test suite default patterns +func InitEphemeralTestSuite() storageframework.TestSuite { + genericLateBinding := storageframework.DefaultFsGenericEphemeralVolume + genericLateBinding.Name += " (late-binding)" + genericLateBinding.BindingMode = storagev1.VolumeBindingWaitForFirstConsumer + + genericImmediateBinding := storageframework.DefaultFsGenericEphemeralVolume + genericImmediateBinding.Name += " (immediate-binding)" + genericImmediateBinding.BindingMode = storagev1.VolumeBindingImmediate + + patterns := []storageframework.TestPattern{ + storageframework.DefaultFsCSIEphemeralVolume, + genericLateBinding, + genericImmediateBinding, + } + + return InitCustomEphemeralTestSuite(patterns) +} + +func (p *ephemeralTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo { return p.tsInfo } -func (p *ephemeralTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) { +func (p *ephemeralTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { } -func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) { +func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { type local struct { - config *PerTestConfig + config *storageframework.PerTestConfig driverCleanup func() testCase *EphemeralTest - resource *VolumeResource + resource *storageframework.VolumeResource } var ( - dInfo = driver.GetDriverInfo() - eDriver EphemeralTestDriver + eDriver storageframework.EphemeralTestDriver l local ) - ginkgo.BeforeEach(func() { - ok := false - switch pattern.VolType { - case testpatterns.CSIInlineVolume: - eDriver, ok = driver.(EphemeralTestDriver) - case testpatterns.GenericEphemeralVolume: - _, ok = driver.(DynamicPVTestDriver) - } - if !ok { - e2eskipper.Skipf("Driver %s doesn't support %q volumes -- skipping", dInfo.Name, pattern.VolType) - } - }) - - // This intentionally comes after checking the preconditions because it - // registers its own BeforeEach which creates the namespace. Beware that it - // also registers an AfterEach which renders f unusable. Any code using + // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. - f := framework.NewFrameworkWithCustomTimeouts("ephemeral", getDriverTimeouts(driver)) + f := framework.NewFrameworkWithCustomTimeouts("ephemeral", storageframework.GetDriverTimeouts(driver)) init := func() { - if pattern.VolType == testpatterns.GenericEphemeralVolume { + if pattern.VolType == storageframework.CSIInlineVolume { + eDriver, _ = driver.(storageframework.EphemeralTestDriver) + } + if pattern.VolType == storageframework.GenericEphemeralVolume { enabled, err := GenericEphemeralVolumesEnabled(f.ClientSet, f.Timeouts, f.Namespace.Name) framework.ExpectNoError(err, "check GenericEphemeralVolume feature") if !enabled { @@ -121,10 +113,10 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns // Now do the more expensive test initialization. l.config, l.driverCleanup = driver.PrepareTest(f) - l.resource = CreateVolumeResource(driver, l.config, pattern, e2evolume.SizeRange{}) + l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, e2evolume.SizeRange{}) switch pattern.VolType { - case testpatterns.CSIInlineVolume: + case storageframework.CSIInlineVolume: l.testCase = &EphemeralTest{ Client: l.config.Framework.ClientSet, Timeouts: f.Timeouts, @@ -135,7 +127,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns return eDriver.GetVolume(l.config, volumeNumber) }, } - case testpatterns.GenericEphemeralVolume: + case storageframework.GenericEphemeralVolume: l.testCase = &EphemeralTest{ Client: l.config.Framework.ClientSet, Timeouts: f.Timeouts, @@ -149,7 +141,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns cleanup := func() { var cleanUpErrs []error cleanUpErrs = append(cleanUpErrs, l.resource.CleanupResource()) - cleanUpErrs = append(cleanUpErrs, tryFunc(l.driverCleanup)) + cleanUpErrs = append(cleanUpErrs, storageutils.TryFunc(l.driverCleanup)) err := utilerrors.NewAggregate(cleanUpErrs) framework.ExpectNoError(err, "while cleaning up") } @@ -160,7 +152,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns l.testCase.ReadOnly = true l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} { - storageutils.VerifyExecInPodSucceed(f, pod, "mount | grep /mnt/test | grep ro,") + e2evolume.VerifyExecInPodSucceed(f, pod, "mount | grep /mnt/test | grep ro,") return nil } l.testCase.TestEphemeral() @@ -172,7 +164,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns l.testCase.ReadOnly = false l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} { - storageutils.VerifyExecInPodSucceed(f, pod, "mount | grep /mnt/test | grep rw,") + e2evolume.VerifyExecInPodSucceed(f, pod, "mount | grep /mnt/test | grep rw,") return nil } l.testCase.TestEphemeral() @@ -205,8 +197,8 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns // visible in the other. if !readOnly && !shared { ginkgo.By("writing data in one pod and checking for it in the second") - storageutils.VerifyExecInPodSucceed(f, pod, "touch /mnt/test-0/hello-world") - storageutils.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]") + e2evolume.VerifyExecInPodSucceed(f, pod, "touch /mnt/test-0/hello-world") + e2evolume.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]") } defer StopPodAndDependents(f.ClientSet, f.Timeouts, pod2) @@ -218,7 +210,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns ginkgo.It("should support multiple inline ephemeral volumes", func() { if pattern.BindingMode == storagev1.VolumeBindingImmediate && - pattern.VolType == testpatterns.GenericEphemeralVolume { + pattern.VolType == storageframework.GenericEphemeralVolume { e2eskipper.Skipf("Multiple generic ephemeral volumes with immediate binding may cause pod startup failures when the volumes get created in separate topology segments.") } diff --git a/test/e2e/storage/testsuites/fsgroupchangepolicy.go b/test/e2e/storage/testsuites/fsgroupchangepolicy.go index 0d44c6da7f9..999d0e9ed58 100644 --- a/test/e2e/storage/testsuites/fsgroupchangepolicy.go +++ b/test/e2e/storage/testsuites/fsgroupchangepolicy.go @@ -27,7 +27,7 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" storageutils "k8s.io/kubernetes/test/e2e/storage/utils" utilpointer "k8s.io/utils/pointer" ) @@ -42,19 +42,17 @@ const ( ) type fsGroupChangePolicyTestSuite struct { - tsInfo TestSuiteInfo + tsInfo storageframework.TestSuiteInfo } -var _ TestSuite = &fsGroupChangePolicyTestSuite{} +var _ storageframework.TestSuite = &fsGroupChangePolicyTestSuite{} -// InitFsGroupChangePolicyTestSuite returns fsGroupChangePolicyTestSuite that implements TestSuite interface -func InitFsGroupChangePolicyTestSuite() TestSuite { +// InitCustomFsGroupChangePolicyTestSuite returns fsGroupChangePolicyTestSuite that implements TestSuite interface +func InitCustomFsGroupChangePolicyTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite { return &fsGroupChangePolicyTestSuite{ - tsInfo: TestSuiteInfo{ - Name: "fsgroupchangepolicy", - TestPatterns: []testpatterns.TestPattern{ - testpatterns.DefaultFsDynamicPV, - }, + tsInfo: storageframework.TestSuiteInfo{ + Name: "fsgroupchangepolicy", + TestPatterns: patterns, SupportedSizeRange: e2evolume.SizeRange{ Min: "1Mi", }, @@ -62,47 +60,51 @@ func InitFsGroupChangePolicyTestSuite() TestSuite { } } -func (s *fsGroupChangePolicyTestSuite) GetTestSuiteInfo() TestSuiteInfo { +// InitFsGroupChangePolicyTestSuite returns fsGroupChangePolicyTestSuite that implements TestSuite interface +func InitFsGroupChangePolicyTestSuite() storageframework.TestSuite { + patterns := []storageframework.TestPattern{ + storageframework.DefaultFsDynamicPV, + } + return InitCustomFsGroupChangePolicyTestSuite(patterns) +} + +func (s *fsGroupChangePolicyTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo { return s.tsInfo } -func (s *fsGroupChangePolicyTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) { - skipVolTypePatterns(pattern, driver, testpatterns.NewVolTypeMap(testpatterns.CSIInlineVolume, testpatterns.GenericEphemeralVolume)) +func (s *fsGroupChangePolicyTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { + skipVolTypePatterns(pattern, driver, storageframework.NewVolTypeMap(storageframework.CSIInlineVolume, storageframework.GenericEphemeralVolume)) + dInfo := driver.GetDriverInfo() + if !dInfo.Capabilities[storageframework.CapFsGroup] { + e2eskipper.Skipf("Driver %q does not support FsGroup - skipping", dInfo.Name) + } + + if pattern.VolMode == v1.PersistentVolumeBlock { + e2eskipper.Skipf("Test does not support non-filesystem volume mode - skipping") + } + + if pattern.VolType != storageframework.DynamicPV { + e2eskipper.Skipf("Suite %q does not support %v", s.tsInfo.Name, pattern.VolType) + } + + _, ok := driver.(storageframework.DynamicPVTestDriver) + if !ok { + e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType) + } } -func (s *fsGroupChangePolicyTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) { +func (s *fsGroupChangePolicyTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { type local struct { - config *PerTestConfig + config *storageframework.PerTestConfig driverCleanup func() - driver TestDriver - resource *VolumeResource + driver storageframework.TestDriver + resource *storageframework.VolumeResource } var l local - ginkgo.BeforeEach(func() { - dInfo := driver.GetDriverInfo() - if !dInfo.Capabilities[CapFsGroup] { - e2eskipper.Skipf("Driver %q does not support FsGroup - skipping", dInfo.Name) - } - if pattern.VolMode == v1.PersistentVolumeBlock { - e2eskipper.Skipf("Test does not support non-filesystem volume mode - skipping") - } - - if pattern.VolType != testpatterns.DynamicPV { - e2eskipper.Skipf("Suite %q does not support %v", s.tsInfo.Name, pattern.VolType) - } - - _, ok := driver.(DynamicPVTestDriver) - if !ok { - e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType) - } - }) - - // This intentionally comes after checking the preconditions because it - // registers its own BeforeEach which creates the namespace. Beware that it - // also registers an AfterEach which renders f unusable. Any code using + // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. - f := framework.NewFrameworkWithCustomTimeouts("fsgroupchangepolicy", getDriverTimeouts(driver)) + f := framework.NewFrameworkWithCustomTimeouts("fsgroupchangepolicy", storageframework.GetDriverTimeouts(driver)) init := func() { e2eskipper.SkipIfNodeOSDistroIs("windows") @@ -110,7 +112,7 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver TestDriver, pattern te l.driver = driver l.config, l.driverCleanup = driver.PrepareTest(f) testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange - l.resource = CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange) + l.resource = storageframework.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange) } cleanup := func() { @@ -123,7 +125,7 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver TestDriver, pattern te } if l.driverCleanup != nil { - errs = append(errs, tryFunc(l.driverCleanup)) + errs = append(errs, storageutils.TryFunc(l.driverCleanup)) l.driverCleanup = nil } @@ -252,15 +254,15 @@ func createPodAndVerifyContentGid(f *framework.Framework, podConfig *e2epod.Conf ginkgo.By(fmt.Sprintf("Creating a sub-directory and file, and verifying their ownership is %s", podFsGroup)) cmd := fmt.Sprintf("touch %s", rootDirFilePath) var err error - _, _, err = storageutils.PodExec(f, pod, cmd) + _, _, err = e2evolume.PodExec(f, pod, cmd) framework.ExpectNoError(err) storageutils.VerifyFilePathGidInPod(f, rootDirFilePath, podFsGroup, pod) cmd = fmt.Sprintf("mkdir %s", subdir) - _, _, err = storageutils.PodExec(f, pod, cmd) + _, _, err = e2evolume.PodExec(f, pod, cmd) framework.ExpectNoError(err) cmd = fmt.Sprintf("touch %s", subDirFilePath) - _, _, err = storageutils.PodExec(f, pod, cmd) + _, _, err = e2evolume.PodExec(f, pod, cmd) framework.ExpectNoError(err) storageutils.VerifyFilePathGidInPod(f, subDirFilePath, podFsGroup, pod) return pod diff --git a/test/e2e/storage/testsuites/multivolume.go b/test/e2e/storage/testsuites/multivolume.go index 7a2fb934ba8..5961477693b 100644 --- a/test/e2e/storage/testsuites/multivolume.go +++ b/test/e2e/storage/testsuites/multivolume.go @@ -32,28 +32,25 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" "k8s.io/kubernetes/test/e2e/storage/utils" + storageutils "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" ) type multiVolumeTestSuite struct { - tsInfo TestSuiteInfo + tsInfo storageframework.TestSuiteInfo } -var _ TestSuite = &multiVolumeTestSuite{} +var _ storageframework.TestSuite = &multiVolumeTestSuite{} -// InitMultiVolumeTestSuite returns multiVolumeTestSuite that implements TestSuite interface -func InitMultiVolumeTestSuite() TestSuite { +// InitCustomMultiVolumeTestSuite returns multiVolumeTestSuite that implements TestSuite interface +// using custom test patterns +func InitCustomMultiVolumeTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite { return &multiVolumeTestSuite{ - tsInfo: TestSuiteInfo{ - Name: "multiVolume [Slow]", - TestPatterns: []testpatterns.TestPattern{ - testpatterns.FsVolModePreprovisionedPV, - testpatterns.FsVolModeDynamicPV, - testpatterns.BlockVolModePreprovisionedPV, - testpatterns.BlockVolModeDynamicPV, - }, + tsInfo: storageframework.TestSuiteInfo{ + Name: "multiVolume [Slow]", + TestPatterns: patterns, SupportedSizeRange: e2evolume.SizeRange{ Min: "1Mi", }, @@ -61,23 +58,39 @@ func InitMultiVolumeTestSuite() TestSuite { } } -func (t *multiVolumeTestSuite) GetTestSuiteInfo() TestSuiteInfo { +// InitMultiVolumeTestSuite returns multiVolumeTestSuite that implements TestSuite interface +// using test suite default patterns +func InitMultiVolumeTestSuite() storageframework.TestSuite { + patterns := []storageframework.TestPattern{ + storageframework.FsVolModePreprovisionedPV, + storageframework.FsVolModeDynamicPV, + storageframework.BlockVolModePreprovisionedPV, + storageframework.BlockVolModeDynamicPV, + } + return InitCustomMultiVolumeTestSuite(patterns) +} + +func (t *multiVolumeTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo { return t.tsInfo } -func (t *multiVolumeTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) { - skipVolTypePatterns(pattern, driver, testpatterns.NewVolTypeMap(testpatterns.PreprovisionedPV)) +func (t *multiVolumeTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { + dInfo := driver.GetDriverInfo() + skipVolTypePatterns(pattern, driver, storageframework.NewVolTypeMap(storageframework.PreprovisionedPV)) + if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[storageframework.CapBlock] { + e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolMode) + } } -func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) { +func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { type local struct { - config *PerTestConfig + config *storageframework.PerTestConfig driverCleanup func() cs clientset.Interface ns *v1.Namespace - driver TestDriver - resources []*VolumeResource + driver storageframework.TestDriver + resources []*storageframework.VolumeResource migrationCheck *migrationOpCheck } @@ -86,18 +99,9 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter l local ) - ginkgo.BeforeEach(func() { - // Check preconditions. - if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[CapBlock] { - e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolMode) - } - }) - - // This intentionally comes after checking the preconditions because it - // registers its own BeforeEach which creates the namespace. Beware that it - // also registers an AfterEach which renders f unusable. Any code using + // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. - f := framework.NewFrameworkWithCustomTimeouts("multivolume", getDriverTimeouts(driver)) + f := framework.NewFrameworkWithCustomTimeouts("multivolume", storageframework.GetDriverTimeouts(driver)) init := func() { l = local{} @@ -116,7 +120,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter errs = append(errs, resource.CleanupResource()) } - errs = append(errs, tryFunc(l.driverCleanup)) + errs = append(errs, storageutils.TryFunc(l.driverCleanup)) l.driverCleanup = nil framework.ExpectNoError(errors.NewAggregate(errs), "while cleanup resource") l.migrationCheck.validateMigrationVolumeOpCounts() @@ -131,7 +135,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter // Currently, multiple volumes are not generally available for pre-provisoined volume, // because containerized storage servers, such as iSCSI and rbd, are just returning // a static volume inside container, not actually creating a new volume per request. - if pattern.VolType == testpatterns.PreprovisionedPV { + if pattern.VolType == storageframework.PreprovisionedPV { e2eskipper.Skipf("This test doesn't work with pre-provisioned volume -- skipping") } @@ -143,7 +147,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter for i := 0; i < numVols; i++ { testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - resource := CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) + resource := storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) l.resources = append(l.resources, resource) pvcs = append(pvcs, resource.Pvc) } @@ -161,7 +165,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter // Currently, multiple volumes are not generally available for pre-provisoined volume, // because containerized storage servers, such as iSCSI and rbd, are just returning // a static volume inside container, not actually creating a new volume per request. - if pattern.VolType == testpatterns.PreprovisionedPV { + if pattern.VolType == storageframework.PreprovisionedPV { e2eskipper.Skipf("This test doesn't work with pre-provisioned volume -- skipping") } @@ -169,8 +173,8 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter defer cleanup() // Check different-node test requirement - if l.driver.GetDriverInfo().Capabilities[CapSingleNodeVolume] { - e2eskipper.Skipf("Driver %s only supports %v -- skipping", l.driver.GetDriverInfo().Name, CapSingleNodeVolume) + if l.driver.GetDriverInfo().Capabilities[storageframework.CapSingleNodeVolume] { + e2eskipper.Skipf("Driver %s only supports %v -- skipping", l.driver.GetDriverInfo().Name, storageframework.CapSingleNodeVolume) } nodes, err := e2enode.GetReadySchedulableNodes(l.cs) framework.ExpectNoError(err) @@ -193,7 +197,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter for i := 0; i < numVols; i++ { testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - resource := CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) + resource := storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) l.resources = append(l.resources, resource) pvcs = append(pvcs, resource.Pvc) } @@ -215,7 +219,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter // Currently, multiple volumes are not generally available for pre-provisoined volume, // because containerized storage servers, such as iSCSI and rbd, are just returning // a static volume inside container, not actually creating a new volume per request. - if pattern.VolType == testpatterns.PreprovisionedPV { + if pattern.VolType == storageframework.PreprovisionedPV { e2eskipper.Skipf("This test doesn't work with pre-provisioned volume -- skipping") } @@ -232,7 +236,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter curPattern.VolMode = v1.PersistentVolumeFilesystem } testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - resource := CreateVolumeResource(driver, l.config, curPattern, testVolumeSizeRange) + resource := storageframework.CreateVolumeResource(driver, l.config, curPattern, testVolumeSizeRange) l.resources = append(l.resources, resource) pvcs = append(pvcs, resource.Pvc) } @@ -254,7 +258,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter // Currently, multiple volumes are not generally available for pre-provisoined volume, // because containerized storage servers, such as iSCSI and rbd, are just returning // a static volume inside container, not actually creating a new volume per request. - if pattern.VolType == testpatterns.PreprovisionedPV { + if pattern.VolType == storageframework.PreprovisionedPV { e2eskipper.Skipf("This test doesn't work with pre-provisioned volume -- skipping") } @@ -262,8 +266,8 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter defer cleanup() // Check different-node test requirement - if l.driver.GetDriverInfo().Capabilities[CapSingleNodeVolume] { - e2eskipper.Skipf("Driver %s only supports %v -- skipping", l.driver.GetDriverInfo().Name, CapSingleNodeVolume) + if l.driver.GetDriverInfo().Capabilities[storageframework.CapSingleNodeVolume] { + e2eskipper.Skipf("Driver %s only supports %v -- skipping", l.driver.GetDriverInfo().Name, storageframework.CapSingleNodeVolume) } nodes, err := e2enode.GetReadySchedulableNodes(l.cs) framework.ExpectNoError(err) @@ -291,7 +295,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter curPattern.VolMode = v1.PersistentVolumeFilesystem } testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - resource := CreateVolumeResource(driver, l.config, curPattern, testVolumeSizeRange) + resource := storageframework.CreateVolumeResource(driver, l.config, curPattern, testVolumeSizeRange) l.resources = append(l.resources, resource) pvcs = append(pvcs, resource.Pvc) } @@ -311,13 +315,13 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter numPods := 2 - if !l.driver.GetDriverInfo().Capabilities[CapMultiPODs] { + if !l.driver.GetDriverInfo().Capabilities[storageframework.CapMultiPODs] { e2eskipper.Skipf("Driver %q does not support multiple concurrent pods - skipping", dInfo.Name) } // Create volume testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - resource := CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange) + resource := storageframework.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange) l.resources = append(l.resources, resource) // Test access to the volume from pods on different node @@ -336,13 +340,13 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter numPods := 2 - if !l.driver.GetDriverInfo().Capabilities[CapMultiPODs] { + if !l.driver.GetDriverInfo().Capabilities[storageframework.CapMultiPODs] { e2eskipper.Skipf("Driver %q does not support multiple concurrent pods - skipping", dInfo.Name) } // Create volume testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - resource := CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange) + resource := storageframework.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange) l.resources = append(l.resources, resource) // Initialize the volume with a filesystem - it's going to be mounted as read-only below. @@ -364,8 +368,8 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter numPods := 2 - if !l.driver.GetDriverInfo().Capabilities[CapRWX] { - e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", l.driver.GetDriverInfo().Name, CapRWX) + if !l.driver.GetDriverInfo().Capabilities[storageframework.CapRWX] { + e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", l.driver.GetDriverInfo().Name, storageframework.CapRWX) } // Check different-node test requirement @@ -387,7 +391,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter // Create volume testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - resource := CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange) + resource := storageframework.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange) l.resources = append(l.resources, resource) // Test access to the volume from pods on different node @@ -420,7 +424,7 @@ func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, n index := i + 1 path := fmt.Sprintf("/mnt/volume%d", index) ginkgo.By(fmt.Sprintf("Checking if the volume%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode)) - utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, path) + e2evolume.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, path) if readSeedBase > 0 { ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index)) @@ -521,7 +525,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int for i, pod := range pods { index := i + 1 ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode)) - utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, path) + e2evolume.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, path) if readOnly { ginkgo.By("Skipping volume content checks, volume is read-only") @@ -557,7 +561,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int index := i + 1 // index of pod and index of pvc match, because pods are created above way ginkgo.By(fmt.Sprintf("Rechecking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode)) - utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, "/mnt/volume1") + e2evolume.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, "/mnt/volume1") if readOnly { ginkgo.By("Skipping volume content checks, volume is read-only") diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index d973e4fa9a8..e46f4e5fb84 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -38,7 +38,8 @@ import ( e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" + storageutils "k8s.io/kubernetes/test/e2e/storage/utils" ) // StorageClassTest represents parameters to be used by provisioning tests. @@ -62,21 +63,16 @@ type StorageClassTest struct { } type provisioningTestSuite struct { - tsInfo TestSuiteInfo + tsInfo storageframework.TestSuiteInfo } -var _ TestSuite = &provisioningTestSuite{} - -// InitProvisioningTestSuite returns provisioningTestSuite that implements TestSuite interface -func InitProvisioningTestSuite() TestSuite { +// InitCustomProvisioningTestSuite returns provisioningTestSuite that implements TestSuite interface +// using custom test patterns +func InitCustomProvisioningTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite { return &provisioningTestSuite{ - tsInfo: TestSuiteInfo{ - Name: "provisioning", - TestPatterns: []testpatterns.TestPattern{ - testpatterns.DefaultFsDynamicPV, - testpatterns.BlockVolModeDynamicPV, - testpatterns.NtfsDynamicPV, - }, + tsInfo: storageframework.TestSuiteInfo{ + Name: "provisioning", + TestPatterns: patterns, SupportedSizeRange: e2evolume.SizeRange{ Min: "1Mi", }, @@ -84,16 +80,35 @@ func InitProvisioningTestSuite() TestSuite { } } -func (p *provisioningTestSuite) GetTestSuiteInfo() TestSuiteInfo { +// InitProvisioningTestSuite returns provisioningTestSuite that implements TestSuite interface\ +// using test suite default patterns +func InitProvisioningTestSuite() storageframework.TestSuite { + patterns := []storageframework.TestPattern{ + storageframework.DefaultFsDynamicPV, + storageframework.BlockVolModeDynamicPV, + storageframework.NtfsDynamicPV, + } + return InitCustomProvisioningTestSuite(patterns) +} + +func (p *provisioningTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo { return p.tsInfo } -func (p *provisioningTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) { +func (p *provisioningTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { + // Check preconditions. + if pattern.VolType != storageframework.DynamicPV { + e2eskipper.Skipf("Suite %q does not support %v", p.tsInfo.Name, pattern.VolType) + } + dInfo := driver.GetDriverInfo() + if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[storageframework.CapBlock] { + e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolMode) + } } -func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) { +func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { type local struct { - config *PerTestConfig + config *storageframework.PerTestConfig driverCleanup func() testCase *StorageClassTest @@ -106,42 +121,24 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte } var ( dInfo = driver.GetDriverInfo() - dDriver DynamicPVTestDriver + dDriver storageframework.DynamicPVTestDriver l local ) - ginkgo.BeforeEach(func() { - // Check preconditions. - if pattern.VolType != testpatterns.DynamicPV { - e2eskipper.Skipf("Suite %q does not support %v", p.tsInfo.Name, pattern.VolType) - } - if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[CapBlock] { - e2eskipper.Skipf("Driver %q does not support block volumes - skipping", dInfo.Name) - } - - ok := false - dDriver, ok = driver.(DynamicPVTestDriver) - if !ok { - e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType) - } - }) - - // This intentionally comes after checking the preconditions because it - // registers its own BeforeEach which creates the namespace. Beware that it - // also registers an AfterEach which renders f unusable. Any code using + // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. - f := framework.NewFrameworkWithCustomTimeouts("provisioning", getDriverTimeouts(driver)) + f := framework.NewFrameworkWithCustomTimeouts("provisioning", storageframework.GetDriverTimeouts(driver)) init := func() { l = local{} - + dDriver, _ = driver.(storageframework.DynamicPVTestDriver) // Now do the more expensive test initialization. l.config, l.driverCleanup = driver.PrepareTest(f) l.migrationCheck = newMigrationOpCheck(f.ClientSet, dInfo.InTreePluginName) l.cs = l.config.Framework.ClientSet testVolumeSizeRange := p.GetTestSuiteInfo().SupportedSizeRange driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange - claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange) + claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange) framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange) l.sc = dDriver.GetDynamicProvisionStorageClass(l.config, pattern.FsType) @@ -172,7 +169,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte } cleanup := func() { - err := tryFunc(l.driverCleanup) + err := storageutils.TryFunc(l.driverCleanup) l.driverCleanup = nil framework.ExpectNoError(err, "while cleaning up driver") @@ -198,14 +195,14 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte }) ginkgo.It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() { - if !dInfo.Capabilities[CapSnapshotDataSource] { + if !dInfo.Capabilities[storageframework.CapSnapshotDataSource] { e2eskipper.Skipf("Driver %q does not support populate data from snapshot - skipping", dInfo.Name) } if !dInfo.SupportedFsType.Has(pattern.FsType) { e2eskipper.Skipf("Driver %q does not support %q fs type - skipping", dInfo.Name, pattern.FsType) } - sDriver, ok := driver.(SnapshottableTestDriver) + sDriver, ok := driver.(storageframework.SnapshottableTestDriver) if !ok { framework.Failf("Driver %q has CapSnapshotDataSource but does not implement SnapshottableTestDriver", dInfo.Name) } @@ -214,7 +211,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte defer cleanup() dc := l.config.Framework.DynamicClient - testConfig := convertTestConfig(l.config) + testConfig := storageframework.ConvertTestConfig(l.config) expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name) dataSource, cleanupFunc := prepareSnapshotDataSourceForProvisioning(f, testConfig, l.config, pattern, l.cs, dc, l.pvc, l.sc, sDriver, pattern.VolMode, expectedContent) defer cleanupFunc() @@ -224,7 +221,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte ginkgo.By("checking whether the created volume has the pre-populated data") tests := []e2evolume.Test{ { - Volume: *createVolumeSource(claim.Name, false /* readOnly */), + Volume: *storageutils.CreateVolumeSource(claim.Name, false /* readOnly */), Mode: pattern.VolMode, File: "index.html", ExpectedContent: expectedContent, @@ -236,13 +233,13 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte }) ginkgo.It("should provision storage with pvc data source", func() { - if !dInfo.Capabilities[CapPVCDataSource] { + if !dInfo.Capabilities[storageframework.CapPVCDataSource] { e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name) } init() defer cleanup() - testConfig := convertTestConfig(l.config) + testConfig := storageframework.ConvertTestConfig(l.config) expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name) dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent) defer dataSourceCleanup() @@ -252,7 +249,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte ginkgo.By("checking whether the created volume has the pre-populated data") tests := []e2evolume.Test{ { - Volume: *createVolumeSource(claim.Name, false /* readOnly */), + Volume: *storageutils.CreateVolumeSource(claim.Name, false /* readOnly */), Mode: pattern.VolMode, File: "index.html", ExpectedContent: expectedContent, @@ -265,17 +262,17 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte ginkgo.It("should provision storage with pvc data source in parallel [Slow]", func() { // Test cloning a single volume multiple times. - if !dInfo.Capabilities[CapPVCDataSource] { + if !dInfo.Capabilities[storageframework.CapPVCDataSource] { e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name) } - if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[CapBlock] { + if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[storageframework.CapBlock] { e2eskipper.Skipf("Driver %q does not support block volumes - skipping", dInfo.Name) } init() defer cleanup() - testConfig := convertTestConfig(l.config) + testConfig := storageframework.ConvertTestConfig(l.config) expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name) dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent) defer dataSourceCleanup() @@ -300,7 +297,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte ginkgo.By(fmt.Sprintf("checking whether the created volume %d has the pre-populated data", i)) tests := []e2evolume.Test{ { - Volume: *createVolumeSource(claim.Name, false /* readOnly */), + Volume: *storageutils.CreateVolumeSource(claim.Name, false /* readOnly */), Mode: pattern.VolMode, File: "index.html", ExpectedContent: expectedContent, @@ -567,7 +564,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P class, err := t.Client.StorageV1().StorageClasses().Create(context.TODO(), t.Class, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { - err = deleteStorageClass(t.Client, class.Name) + err = storageutils.DeleteStorageClass(t.Client, class.Name) framework.ExpectNoError(err, "While deleting storage class") }() @@ -788,13 +785,13 @@ func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeCl func prepareSnapshotDataSourceForProvisioning( f *framework.Framework, config e2evolume.TestConfig, - perTestConfig *PerTestConfig, - pattern testpatterns.TestPattern, + perTestConfig *storageframework.PerTestConfig, + pattern storageframework.TestPattern, client clientset.Interface, dynamicClient dynamic.Interface, initClaim *v1.PersistentVolumeClaim, class *storagev1.StorageClass, - sDriver SnapshottableTestDriver, + sDriver storageframework.SnapshottableTestDriver, mode v1.PersistentVolumeMode, injectContent string, ) (*v1.TypedLocalObjectReference, func()) { @@ -812,7 +809,7 @@ func prepareSnapshotDataSourceForProvisioning( // write namespace to the /mnt/test (= the volume). tests := []e2evolume.Test{ { - Volume: *createVolumeSource(updatedClaim.Name, false /* readOnly */), + Volume: *storageutils.CreateVolumeSource(updatedClaim.Name, false /* readOnly */), Mode: mode, File: "index.html", ExpectedContent: injectContent, @@ -820,7 +817,7 @@ func prepareSnapshotDataSourceForProvisioning( } e2evolume.InjectContent(f, config, nil, "", tests) - snapshotResource := CreateSnapshotResource(sDriver, perTestConfig, pattern, updatedClaim.GetName(), updatedClaim.GetNamespace(), f.Timeouts) + snapshotResource := storageframework.CreateSnapshotResource(sDriver, perTestConfig, pattern, updatedClaim.GetName(), updatedClaim.GetNamespace(), f.Timeouts) group := "snapshot.storage.k8s.io" dataSourceRef := &v1.TypedLocalObjectReference{ @@ -871,7 +868,7 @@ func preparePVCDataSourceForProvisioning( tests := []e2evolume.Test{ { - Volume: *createVolumeSource(sourcePVC.Name, false /* readOnly */), + Volume: *storageutils.CreateVolumeSource(sourcePVC.Name, false /* readOnly */), Mode: mode, File: "index.html", ExpectedContent: injectContent, diff --git a/test/e2e/storage/testsuites/snapshottable.go b/test/e2e/storage/testsuites/snapshottable.go index 0a98a99cc69..43732c6f6bf 100644 --- a/test/e2e/storage/testsuites/snapshottable.go +++ b/test/e2e/storage/testsuites/snapshottable.go @@ -28,9 +28,6 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/client-go/dynamic" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" @@ -38,50 +35,30 @@ import ( e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" "k8s.io/kubernetes/test/e2e/storage/utils" + storageutils "k8s.io/kubernetes/test/e2e/storage/utils" ) -// snapshot CRD api group -const snapshotGroup = "snapshot.storage.k8s.io" - -// snapshot CRD api version -const snapshotAPIVersion = "snapshot.storage.k8s.io/v1" - // data file name const datapath = "/mnt/test/data" -var ( - // SnapshotGVR is GroupVersionResource for volumesnapshots - SnapshotGVR = schema.GroupVersionResource{Group: snapshotGroup, Version: "v1", Resource: "volumesnapshots"} - // SnapshotClassGVR is GroupVersionResource for volumesnapshotclasses - SnapshotClassGVR = schema.GroupVersionResource{Group: snapshotGroup, Version: "v1", Resource: "volumesnapshotclasses"} - // SnapshotContentGVR is GroupVersionResource for volumesnapshotcontents - SnapshotContentGVR = schema.GroupVersionResource{Group: snapshotGroup, Version: "v1", Resource: "volumesnapshotcontents"} -) - type snapshottableTestSuite struct { - tsInfo TestSuiteInfo + tsInfo storageframework.TestSuiteInfo } -var _ TestSuite = &snapshottableTestSuite{} - var ( - sDriver SnapshottableTestDriver - dDriver DynamicPVTestDriver + sDriver storageframework.SnapshottableTestDriver + dDriver storageframework.DynamicPVTestDriver ) -// InitSnapshottableTestSuite returns snapshottableTestSuite that implements TestSuite interface -func InitSnapshottableTestSuite() TestSuite { +// InitCustomSnapshottableTestSuite returns snapshottableTestSuite that implements TestSuite interface +// using custom test patterns +func InitCustomSnapshottableTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite { return &snapshottableTestSuite{ - tsInfo: TestSuiteInfo{ - Name: "snapshottable", - TestPatterns: []testpatterns.TestPattern{ - testpatterns.DynamicSnapshotDelete, - testpatterns.DynamicSnapshotRetain, - testpatterns.PreprovisionedSnapshotDelete, - testpatterns.PreprovisionedSnapshotRetain, - }, + tsInfo: storageframework.TestSuiteInfo{ + Name: "snapshottable", + TestPatterns: patterns, SupportedSizeRange: e2evolume.SizeRange{ Min: "1Mi", }, @@ -90,38 +67,46 @@ func InitSnapshottableTestSuite() TestSuite { } } -func (s *snapshottableTestSuite) GetTestSuiteInfo() TestSuiteInfo { +// InitSnapshottableTestSuite returns snapshottableTestSuite that implements TestSuite interface +// using testsuite default patterns +func InitSnapshottableTestSuite() storageframework.TestSuite { + patterns := []storageframework.TestPattern{ + storageframework.DynamicSnapshotDelete, + storageframework.DynamicSnapshotRetain, + storageframework.PreprovisionedSnapshotDelete, + storageframework.PreprovisionedSnapshotRetain, + } + return InitCustomSnapshottableTestSuite(patterns) +} + +func (s *snapshottableTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo { return s.tsInfo } -func (s *snapshottableTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) { +func (s *snapshottableTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { + // Check preconditions. + dInfo := driver.GetDriverInfo() + ok := false + _, ok = driver.(storageframework.SnapshottableTestDriver) + if !dInfo.Capabilities[storageframework.CapSnapshotDataSource] || !ok { + e2eskipper.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name) + } + _, ok = driver.(storageframework.DynamicPVTestDriver) + if !ok { + e2eskipper.Skipf("Driver %q does not support dynamic provisioning - skipping", driver.GetDriverInfo().Name) + } } -func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) { - ginkgo.BeforeEach(func() { - // Check preconditions. - dInfo := driver.GetDriverInfo() - ok := false - sDriver, ok = driver.(SnapshottableTestDriver) - if !dInfo.Capabilities[CapSnapshotDataSource] || !ok { - e2eskipper.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name) - } - dDriver, ok = driver.(DynamicPVTestDriver) - if !ok { - e2eskipper.Skipf("Driver %q does not support dynamic provisioning - skipping", driver.GetDriverInfo().Name) - } - }) +func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { - // This intentionally comes after checking the preconditions because it - // registers its own BeforeEach which creates the namespace. Beware that it - // also registers an AfterEach which renders f unusable. Any code using + // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. f := framework.NewDefaultFramework("snapshotting") ginkgo.Describe("volume snapshot controller", func() { var ( err error - config *PerTestConfig + config *storageframework.PerTestConfig driverCleanup func() cleanupSteps []func() @@ -133,6 +118,8 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt originalMntTestData string ) init := func() { + sDriver, _ = driver.(storageframework.SnapshottableTestDriver) + dDriver, _ = driver.(storageframework.DynamicPVTestDriver) cleanupSteps = make([]func(), 0) // init snap class, create a source PV, PVC, Pod cs = f.ClientSet @@ -142,11 +129,11 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt config, driverCleanup = driver.PrepareTest(f) cleanupSteps = append(cleanupSteps, driverCleanup) - var volumeResource *VolumeResource + var volumeResource *storageframework.VolumeResource cleanupSteps = append(cleanupSteps, func() { framework.ExpectNoError(volumeResource.CleanupResource()) }) - volumeResource = CreateVolumeResource(dDriver, config, pattern, s.GetTestSuiteInfo().SupportedSizeRange) + volumeResource = storageframework.CreateVolumeResource(dDriver, config, pattern, s.GetTestSuiteInfo().SupportedSizeRange) pvc = volumeResource.Pvc sc = volumeResource.Sc @@ -181,7 +168,7 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt // Depending on how far the test executed, cleanup accordingly // Execute in reverse order, similar to defer stack for i := len(cleanupSteps) - 1; i >= 0; i-- { - err := tryFunc(cleanupSteps[i]) + err := storageutils.TryFunc(cleanupSteps[i]) framework.ExpectNoError(err, "while running cleanup steps") } @@ -201,11 +188,11 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt ) ginkgo.BeforeEach(func() { - var sr *SnapshotResource + var sr *storageframework.SnapshotResource cleanupSteps = append(cleanupSteps, func() { framework.ExpectNoError(sr.CleanupResource(f.Timeouts)) }) - sr = CreateSnapshotResource(sDriver, config, pattern, pvc.GetName(), pvc.GetNamespace(), f.Timeouts) + sr = storageframework.CreateSnapshotResource(sDriver, config, pattern, pvc.GetName(), pvc.GetNamespace(), f.Timeouts) vs = sr.Vs vscontent = sr.Vscontent vsc = sr.Vsclass @@ -213,13 +200,13 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt ginkgo.It("should check snapshot fields, check restore correctly works after modifying source data, check deletion", func() { ginkgo.By("checking the snapshot") // Get new copy of the snapshot - vs, err = dc.Resource(SnapshotGVR).Namespace(vs.GetNamespace()).Get(context.TODO(), vs.GetName(), metav1.GetOptions{}) + vs, err = dc.Resource(storageutils.SnapshotGVR).Namespace(vs.GetNamespace()).Get(context.TODO(), vs.GetName(), metav1.GetOptions{}) framework.ExpectNoError(err) // Get the bound snapshotContent snapshotStatus := vs.Object["status"].(map[string]interface{}) snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string) - vscontent, err = dc.Resource(SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{}) + vscontent, err = dc.Resource(storageutils.SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{}) framework.ExpectNoError(err) snapshotContentSpec := vscontent.Object["spec"].(map[string]interface{}) @@ -228,7 +215,7 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt // Check SnapshotContent properties ginkgo.By("checking the SnapshotContent") // PreprovisionedCreatedSnapshot do not need to set volume snapshot class name - if pattern.SnapshotType != testpatterns.PreprovisionedCreatedSnapshot { + if pattern.SnapshotType != storageframework.PreprovisionedCreatedSnapshot { framework.ExpectEqual(snapshotContentSpec["volumeSnapshotClassName"], vsc.GetName()) } framework.ExpectEqual(volumeSnapshotRef["name"], vs.GetName()) @@ -281,316 +268,20 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt framework.ExpectNoError(err) ginkgo.By("should delete the VolumeSnapshotContent according to its deletion policy") - err = DeleteAndWaitSnapshot(dc, vs.GetNamespace(), vs.GetName(), framework.Poll, f.Timeouts.SnapshotDelete) + err = storageutils.DeleteAndWaitSnapshot(dc, vs.GetNamespace(), vs.GetName(), framework.Poll, f.Timeouts.SnapshotDelete) framework.ExpectNoError(err) switch pattern.SnapshotDeletionPolicy { - case testpatterns.DeleteSnapshot: + case storageframework.DeleteSnapshot: ginkgo.By("checking the SnapshotContent has been deleted") - err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, vscontent.GetName(), framework.Poll, f.Timeouts.SnapshotDelete) + err = utils.WaitForGVRDeletion(dc, storageutils.SnapshotContentGVR, vscontent.GetName(), framework.Poll, f.Timeouts.SnapshotDelete) framework.ExpectNoError(err) - case testpatterns.RetainSnapshot: + case storageframework.RetainSnapshot: ginkgo.By("checking the SnapshotContent has not been deleted") - err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, vscontent.GetName(), 1*time.Second /* poll */, 30*time.Second /* timeout */) + err = utils.WaitForGVRDeletion(dc, storageutils.SnapshotContentGVR, vscontent.GetName(), 1*time.Second /* poll */, 30*time.Second /* timeout */) framework.ExpectError(err) } }) }) }) } - -// WaitForSnapshotReady waits for a VolumeSnapshot to be ready to use or until timeout occurs, whichever comes first. -func WaitForSnapshotReady(c dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error { - framework.Logf("Waiting up to %v for VolumeSnapshot %s to become ready", timeout, snapshotName) - - if successful := utils.WaitUntil(poll, timeout, func() bool { - snapshot, err := c.Resource(SnapshotGVR).Namespace(ns).Get(context.TODO(), snapshotName, metav1.GetOptions{}) - if err != nil { - framework.Logf("Failed to get snapshot %q, retrying in %v. Error: %v", snapshotName, poll, err) - return false - } - - status := snapshot.Object["status"] - if status == nil { - framework.Logf("VolumeSnapshot %s found but is not ready.", snapshotName) - return false - } - value := status.(map[string]interface{}) - if value["readyToUse"] == true { - framework.Logf("VolumeSnapshot %s found and is ready", snapshotName) - return true - } - - framework.Logf("VolumeSnapshot %s found but is not ready.", snapshotName) - return false - }); successful { - return nil - } - - return fmt.Errorf("VolumeSnapshot %s is not ready within %v", snapshotName, timeout) -} - -// DeleteAndWaitSnapshot deletes a VolumeSnapshot and waits for it to be deleted or until timeout occurs, whichever comes first -func DeleteAndWaitSnapshot(dc dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error { - var err error - ginkgo.By("deleting the snapshot") - err = dc.Resource(SnapshotGVR).Namespace(ns).Delete(context.TODO(), snapshotName, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - return err - } - - ginkgo.By("checking the Snapshot has been deleted") - err = utils.WaitForNamespacedGVRDeletion(dc, SnapshotGVR, ns, snapshotName, poll, timeout) - - return err -} - -// SnapshotResource represents a snapshot class, a snapshot and its bound snapshot contents for a specific test case -type SnapshotResource struct { - Config *PerTestConfig - Pattern testpatterns.TestPattern - - Vs *unstructured.Unstructured - Vscontent *unstructured.Unstructured - Vsclass *unstructured.Unstructured -} - -// CreateSnapshot creates a VolumeSnapshotClass with given SnapshotDeletionPolicy and a VolumeSnapshot -// from the VolumeSnapshotClass using a dynamic client. -// Returns the unstructured VolumeSnapshotClass and VolumeSnapshot objects. -func CreateSnapshot(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, pvcName string, pvcNamespace string, timeouts *framework.TimeoutContext) (*unstructured.Unstructured, *unstructured.Unstructured) { - defer ginkgo.GinkgoRecover() - var err error - if pattern.SnapshotType != testpatterns.DynamicCreatedSnapshot && pattern.SnapshotType != testpatterns.PreprovisionedCreatedSnapshot { - err = fmt.Errorf("SnapshotType must be set to either DynamicCreatedSnapshot or PreprovisionedCreatedSnapshot") - framework.ExpectNoError(err) - } - dc := config.Framework.DynamicClient - - ginkgo.By("creating a SnapshotClass") - sclass := sDriver.GetSnapshotClass(config) - if sclass == nil { - framework.Failf("Failed to get snapshot class based on test config") - } - sclass.Object["deletionPolicy"] = pattern.SnapshotDeletionPolicy.String() - - sclass, err = dc.Resource(SnapshotClassGVR).Create(context.TODO(), sclass, metav1.CreateOptions{}) - framework.ExpectNoError(err) - - sclass, err = dc.Resource(SnapshotClassGVR).Get(context.TODO(), sclass.GetName(), metav1.GetOptions{}) - framework.ExpectNoError(err) - - ginkgo.By("creating a dynamic VolumeSnapshot") - // prepare a dynamically provisioned volume snapshot with certain data - snapshot := getSnapshot(pvcName, pvcNamespace, sclass.GetName()) - - snapshot, err = dc.Resource(SnapshotGVR).Namespace(snapshot.GetNamespace()).Create(context.TODO(), snapshot, metav1.CreateOptions{}) - framework.ExpectNoError(err) - - return sclass, snapshot -} - -// GetSnapshotContentFromSnapshot returns the VolumeSnapshotContent object Bound to a -// given VolumeSnapshot -func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured.Unstructured) *unstructured.Unstructured { - defer ginkgo.GinkgoRecover() - err := WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout) - framework.ExpectNoError(err) - - vs, err := dc.Resource(SnapshotGVR).Namespace(snapshot.GetNamespace()).Get(context.TODO(), snapshot.GetName(), metav1.GetOptions{}) - - snapshotStatus := vs.Object["status"].(map[string]interface{}) - snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string) - framework.Logf("received snapshotStatus %v", snapshotStatus) - framework.Logf("snapshotContentName %s", snapshotContentName) - framework.ExpectNoError(err) - - vscontent, err := dc.Resource(SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{}) - framework.ExpectNoError(err) - - return vscontent - -} - -// CreateSnapshotResource creates a snapshot resource for the current test. It knows how to deal with -// different test pattern snapshot provisioning and deletion policy -func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, pvcName string, pvcNamespace string, timeouts *framework.TimeoutContext) *SnapshotResource { - var err error - r := SnapshotResource{ - Config: config, - Pattern: pattern, - } - r.Vsclass, r.Vs = CreateSnapshot(sDriver, config, pattern, pvcName, pvcNamespace, timeouts) - - dc := r.Config.Framework.DynamicClient - - r.Vscontent = GetSnapshotContentFromSnapshot(dc, r.Vs) - - if pattern.SnapshotType == testpatterns.PreprovisionedCreatedSnapshot { - // prepare a pre-provisioned VolumeSnapshotContent with certain data - // Because this could be run with an external CSI driver, we have no way - // to pre-provision the snapshot as we normally would using their API. - // We instead dynamically take a snapshot (above step), delete the old snapshot, - // and create another snapshot using the first snapshot's snapshot handle. - - ginkgo.By("updating the snapshot content deletion policy to retain") - r.Vscontent.Object["spec"].(map[string]interface{})["deletionPolicy"] = "Retain" - - r.Vscontent, err = dc.Resource(SnapshotContentGVR).Update(context.TODO(), r.Vscontent, metav1.UpdateOptions{}) - framework.ExpectNoError(err) - - ginkgo.By("recording the volume handle and snapshotHandle") - snapshotHandle := r.Vscontent.Object["status"].(map[string]interface{})["snapshotHandle"].(string) - framework.Logf("Recording snapshot handle: %s", snapshotHandle) - csiDriverName := r.Vsclass.Object["driver"].(string) - - // If the deletion policy is retain on vscontent: - // when vs is deleted vscontent will not be deleted - // when the vscontent is manually deleted then the underlying snapshot resource will not be deleted. - // We exploit this to create a snapshot resource from which we can create a preprovisioned snapshot - ginkgo.By("deleting the snapshot and snapshot content") - err = dc.Resource(SnapshotGVR).Namespace(r.Vs.GetNamespace()).Delete(context.TODO(), r.Vs.GetName(), metav1.DeleteOptions{}) - if apierrors.IsNotFound(err) { - err = nil - } - framework.ExpectNoError(err) - - ginkgo.By("checking the Snapshot has been deleted") - err = utils.WaitForNamespacedGVRDeletion(dc, SnapshotGVR, r.Vs.GetName(), r.Vs.GetNamespace(), framework.Poll, timeouts.SnapshotDelete) - framework.ExpectNoError(err) - - err = dc.Resource(SnapshotContentGVR).Delete(context.TODO(), r.Vscontent.GetName(), metav1.DeleteOptions{}) - if apierrors.IsNotFound(err) { - err = nil - } - framework.ExpectNoError(err) - - ginkgo.By("checking the Snapshot content has been deleted") - err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, r.Vscontent.GetName(), framework.Poll, timeouts.SnapshotDelete) - framework.ExpectNoError(err) - - ginkgo.By("creating a snapshot content with the snapshot handle") - uuid := uuid.NewUUID() - - snapName := getPreProvisionedSnapshotName(uuid) - snapcontentName := getPreProvisionedSnapshotContentName(uuid) - - r.Vscontent = getPreProvisionedSnapshotContent(snapcontentName, snapName, pvcNamespace, snapshotHandle, pattern.SnapshotDeletionPolicy.String(), csiDriverName) - r.Vscontent, err = dc.Resource(SnapshotContentGVR).Create(context.TODO(), r.Vscontent, metav1.CreateOptions{}) - framework.ExpectNoError(err) - - ginkgo.By("creating a snapshot with that snapshot content") - r.Vs = getPreProvisionedSnapshot(snapName, pvcNamespace, snapcontentName) - r.Vs, err = dc.Resource(SnapshotGVR).Namespace(r.Vs.GetNamespace()).Create(context.TODO(), r.Vs, metav1.CreateOptions{}) - framework.ExpectNoError(err) - - err = WaitForSnapshotReady(dc, r.Vs.GetNamespace(), r.Vs.GetName(), framework.Poll, timeouts.SnapshotCreate) - framework.ExpectNoError(err) - - ginkgo.By("getting the snapshot and snapshot content") - r.Vs, err = dc.Resource(SnapshotGVR).Namespace(r.Vs.GetNamespace()).Get(context.TODO(), r.Vs.GetName(), metav1.GetOptions{}) - framework.ExpectNoError(err) - - r.Vscontent, err = dc.Resource(SnapshotContentGVR).Get(context.TODO(), r.Vscontent.GetName(), metav1.GetOptions{}) - framework.ExpectNoError(err) - } - return &r -} - -// CleanupResource cleans up the snapshot resource and ignores not found errors -func (sr *SnapshotResource) CleanupResource(timeouts *framework.TimeoutContext) error { - var err error - var cleanupErrs []error - - dc := sr.Config.Framework.DynamicClient - - if sr.Vs != nil { - framework.Logf("deleting snapshot %q/%q", sr.Vs.GetNamespace(), sr.Vs.GetName()) - - sr.Vs, err = dc.Resource(SnapshotGVR).Namespace(sr.Vs.GetNamespace()).Get(context.TODO(), sr.Vs.GetName(), metav1.GetOptions{}) - switch { - case err == nil: - snapshotStatus := sr.Vs.Object["status"].(map[string]interface{}) - snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string) - framework.Logf("received snapshotStatus %v", snapshotStatus) - framework.Logf("snapshotContentName %s", snapshotContentName) - - boundVsContent, err := dc.Resource(SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{}) - switch { - case err == nil: - if boundVsContent.Object["spec"].(map[string]interface{})["deletionPolicy"] != "Delete" { - // The purpose of this block is to prevent physical snapshotContent leaks. - // We must update the SnapshotContent to have Delete Deletion policy, - // or else the physical snapshot content will be leaked. - boundVsContent.Object["spec"].(map[string]interface{})["deletionPolicy"] = "Delete" - boundVsContent, err = dc.Resource(SnapshotContentGVR).Update(context.TODO(), boundVsContent, metav1.UpdateOptions{}) - framework.ExpectNoError(err) - } - err = dc.Resource(SnapshotGVR).Namespace(sr.Vs.GetNamespace()).Delete(context.TODO(), sr.Vs.GetName(), metav1.DeleteOptions{}) - if apierrors.IsNotFound(err) { - err = nil - } - framework.ExpectNoError(err) - - err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, boundVsContent.GetName(), framework.Poll, timeouts.SnapshotDelete) - framework.ExpectNoError(err) - - case apierrors.IsNotFound(err): - // the volume snapshot is not bound to snapshot content yet - err = dc.Resource(SnapshotGVR).Namespace(sr.Vs.GetNamespace()).Delete(context.TODO(), sr.Vs.GetName(), metav1.DeleteOptions{}) - if apierrors.IsNotFound(err) { - err = nil - } - framework.ExpectNoError(err) - - err = utils.WaitForNamespacedGVRDeletion(dc, SnapshotGVR, sr.Vs.GetName(), sr.Vs.GetNamespace(), framework.Poll, timeouts.SnapshotDelete) - framework.ExpectNoError(err) - default: - cleanupErrs = append(cleanupErrs, err) - } - case apierrors.IsNotFound(err): - // Hope that the underlying snapshot content and resource is gone already - default: - cleanupErrs = append(cleanupErrs, err) - } - } - if sr.Vscontent != nil { - framework.Logf("deleting snapshot content %q", sr.Vscontent.GetName()) - - sr.Vscontent, err = dc.Resource(SnapshotContentGVR).Get(context.TODO(), sr.Vscontent.GetName(), metav1.GetOptions{}) - switch { - case err == nil: - if sr.Vscontent.Object["spec"].(map[string]interface{})["deletionPolicy"] != "Delete" { - // The purpose of this block is to prevent physical snapshotContent leaks. - // We must update the SnapshotContent to have Delete Deletion policy, - // or else the physical snapshot content will be leaked. - sr.Vscontent.Object["spec"].(map[string]interface{})["deletionPolicy"] = "Delete" - sr.Vscontent, err = dc.Resource(SnapshotContentGVR).Update(context.TODO(), sr.Vscontent, metav1.UpdateOptions{}) - framework.ExpectNoError(err) - } - err = dc.Resource(SnapshotContentGVR).Delete(context.TODO(), sr.Vscontent.GetName(), metav1.DeleteOptions{}) - if apierrors.IsNotFound(err) { - err = nil - } - framework.ExpectNoError(err) - - err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, sr.Vscontent.GetName(), framework.Poll, timeouts.SnapshotDelete) - framework.ExpectNoError(err) - case apierrors.IsNotFound(err): - // Hope the underlying physical snapshot resource has been deleted already - default: - cleanupErrs = append(cleanupErrs, err) - } - } - if sr.Vsclass != nil { - framework.Logf("deleting snapshot class %q", sr.Vsclass.GetName()) - // typically this snapshot class has already been deleted - err = dc.Resource(SnapshotClassGVR).Delete(context.TODO(), sr.Vsclass.GetName(), metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - framework.Failf("Error deleting snapshot class %q. Error: %v", sr.Vsclass.GetName(), err) - } - err = utils.WaitForGVRDeletion(dc, SnapshotClassGVR, sr.Vsclass.GetName(), framework.Poll, timeouts.SnapshotDelete) - framework.ExpectNoError(err) - } - return utilerrors.NewAggregate(cleanupErrs) -} diff --git a/test/e2e/storage/testsuites/snapshottable_stress.go b/test/e2e/storage/testsuites/snapshottable_stress.go index acccf953535..f006ac12f7a 100644 --- a/test/e2e/storage/testsuites/snapshottable_stress.go +++ b/test/e2e/storage/testsuites/snapshottable_stress.go @@ -33,21 +33,22 @@ import ( e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" + storageutils "k8s.io/kubernetes/test/e2e/storage/utils" ) type snapshottableStressTestSuite struct { - tsInfo TestSuiteInfo + tsInfo storageframework.TestSuiteInfo } type snapshottableStressTest struct { - config *PerTestConfig - testOptions VolumeSnapshotStressTestOptions + config *storageframework.PerTestConfig + testOptions storageframework.VolumeSnapshotStressTestOptions driverCleanup func() pods []*v1.Pod - volumes []*VolumeResource - snapshots []*SnapshotResource + volumes []*storageframework.VolumeResource + snapshots []*storageframework.SnapshotResource // Because we are appending snapshot resources in parallel goroutines. snapshotsMutex sync.Mutex @@ -57,17 +58,13 @@ type snapshottableStressTest struct { cancel context.CancelFunc } -var _ TestSuite = &snapshottableStressTestSuite{} - -// InitSnapshottableStressTestSuite returns snapshottableStressTestSuite that implements TestSuite interface -func InitSnapshottableStressTestSuite() TestSuite { +// InitCustomSnapshottableStressTestSuite returns snapshottableStressTestSuite that implements TestSuite interface +// using custom test patterns +func InitCustomSnapshottableStressTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite { return &snapshottableStressTestSuite{ - tsInfo: TestSuiteInfo{ - Name: "snapshottable-stress", - TestPatterns: []testpatterns.TestPattern{ - testpatterns.DynamicSnapshotDelete, - testpatterns.DynamicSnapshotRetain, - }, + tsInfo: storageframework.TestSuiteInfo{ + Name: "snapshottable-stress", + TestPatterns: patterns, SupportedSizeRange: e2evolume.SizeRange{ Min: "1Mi", }, @@ -76,55 +73,58 @@ func InitSnapshottableStressTestSuite() TestSuite { } } -func (t *snapshottableStressTestSuite) GetTestSuiteInfo() TestSuiteInfo { +// InitSnapshottableStressTestSuite returns snapshottableStressTestSuite that implements TestSuite interface +// using testsuite default patterns +func InitSnapshottableStressTestSuite() storageframework.TestSuite { + patterns := []storageframework.TestPattern{ + storageframework.DynamicSnapshotDelete, + storageframework.DynamicSnapshotRetain, + } + return InitCustomSnapshottableStressTestSuite(patterns) +} + +func (t *snapshottableStressTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo { return t.tsInfo } -func (t *snapshottableStressTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) { +func (t *snapshottableStressTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { + driverInfo := driver.GetDriverInfo() + var ok bool + if driverInfo.VolumeSnapshotStressTestOptions == nil { + e2eskipper.Skipf("Driver %s doesn't specify snapshot stress test options -- skipping", driverInfo.Name) + } + if driverInfo.VolumeSnapshotStressTestOptions.NumPods <= 0 { + framework.Failf("NumPods in snapshot stress test options must be a positive integer, received: %d", driverInfo.VolumeSnapshotStressTestOptions.NumPods) + } + if driverInfo.VolumeSnapshotStressTestOptions.NumSnapshots <= 0 { + framework.Failf("NumSnapshots in snapshot stress test options must be a positive integer, received: %d", driverInfo.VolumeSnapshotStressTestOptions.NumSnapshots) + } + _, ok = driver.(storageframework.SnapshottableTestDriver) + if !driverInfo.Capabilities[storageframework.CapSnapshotDataSource] || !ok { + e2eskipper.Skipf("Driver %q doesn't implement SnapshottableTestDriver - skipping", driverInfo.Name) + } + + _, ok = driver.(storageframework.DynamicPVTestDriver) + if !ok { + e2eskipper.Skipf("Driver %s doesn't implement DynamicPVTestDriver -- skipping", driverInfo.Name) + } } -func (t *snapshottableStressTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) { +func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { var ( - driverInfo *DriverInfo - snapshottableDriver SnapshottableTestDriver + driverInfo *storageframework.DriverInfo + snapshottableDriver storageframework.SnapshottableTestDriver cs clientset.Interface stressTest *snapshottableStressTest ) - // Check preconditions before setting up namespace via framework below. - ginkgo.BeforeEach(func() { - driverInfo = driver.GetDriverInfo() - if driverInfo.VolumeSnapshotStressTestOptions == nil { - e2eskipper.Skipf("Driver %s doesn't specify snapshot stress test options -- skipping", driverInfo.Name) - } - if driverInfo.VolumeSnapshotStressTestOptions.NumPods <= 0 { - framework.Failf("NumPods in snapshot stress test options must be a positive integer, received: %d", driverInfo.VolumeSnapshotStressTestOptions.NumPods) - } - if driverInfo.VolumeSnapshotStressTestOptions.NumSnapshots <= 0 { - framework.Failf("NumSnapshots in snapshot stress test options must be a positive integer, received: %d", driverInfo.VolumeSnapshotStressTestOptions.NumSnapshots) - } - - // Because we're initializing snapshottableDriver, both vars must exist. - ok := false - - snapshottableDriver, ok = driver.(SnapshottableTestDriver) - if !driverInfo.Capabilities[CapSnapshotDataSource] || !ok { - e2eskipper.Skipf("Driver %q doesn't implement SnapshottableTestDriver - skipping", driverInfo.Name) - } - - _, ok = driver.(DynamicPVTestDriver) - if !ok { - e2eskipper.Skipf("Driver %s doesn't implement DynamicPVTestDriver -- skipping", driverInfo.Name) - } - }) - - // This intentionally comes after checking the preconditions because it - // registers its own BeforeEach which creates the namespace. Beware that it - // also registers an AfterEach which renders f unusable. Any code using + // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. f := framework.NewDefaultFramework("snapshottable-stress") init := func() { + driverInfo = driver.GetDriverInfo() + snapshottableDriver, _ = driver.(storageframework.SnapshottableTestDriver) cs = f.ClientSet config, driverCleanup := driver.PrepareTest(f) ctx, cancel := context.WithCancel(context.Background()) @@ -132,8 +132,8 @@ func (t *snapshottableStressTestSuite) DefineTests(driver TestDriver, pattern te stressTest = &snapshottableStressTest{ config: config, driverCleanup: driverCleanup, - volumes: []*VolumeResource{}, - snapshots: []*SnapshotResource{}, + volumes: []*storageframework.VolumeResource{}, + snapshots: []*storageframework.SnapshotResource{}, pods: []*v1.Pod{}, testOptions: *driverInfo.VolumeSnapshotStressTestOptions, ctx: ctx, @@ -145,7 +145,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver TestDriver, pattern te for i := 0; i < stressTest.testOptions.NumPods; i++ { framework.Logf("Creating resources for pod %d/%d", i, stressTest.testOptions.NumPods-1) - volume := CreateVolumeResource(driver, stressTest.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange) + volume := storageframework.CreateVolumeResource(driver, stressTest.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange) stressTest.volumes = append(stressTest.volumes, volume) podConfig := e2epod.Config{ @@ -197,7 +197,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver TestDriver, pattern te for i, snapshot := range stressTest.snapshots { wg.Add(1) - go func(i int, snapshot *SnapshotResource) { + go func(i int, snapshot *storageframework.SnapshotResource) { defer ginkgo.GinkgoRecover() defer wg.Done() @@ -229,7 +229,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver TestDriver, pattern te for i, volume := range stressTest.volumes { wg.Add(1) - go func(i int, volume *VolumeResource) { + go func(i int, volume *storageframework.VolumeResource) { defer ginkgo.GinkgoRecover() defer wg.Done() @@ -242,7 +242,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver TestDriver, pattern te } wg.Wait() - errs = append(errs, tryFunc(stressTest.driverCleanup)) + errs = append(errs, storageutils.TryFunc(stressTest.driverCleanup)) framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resources") } @@ -275,7 +275,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver TestDriver, pattern te return default: framework.Logf("Pod-%d [%s], Iteration %d/%d", podIndex, pod.Name, snapshotIndex, stressTest.testOptions.NumSnapshots-1) - snapshot := CreateSnapshotResource(snapshottableDriver, stressTest.config, pattern, volume.Pvc.GetName(), volume.Pvc.GetNamespace(), f.Timeouts) + snapshot := storageframework.CreateSnapshotResource(snapshottableDriver, stressTest.config, pattern, volume.Pvc.GetName(), volume.Pvc.GetNamespace(), f.Timeouts) stressTest.snapshotsMutex.Lock() defer stressTest.snapshotsMutex.Unlock() stressTest.snapshots = append(stressTest.snapshots, snapshot) diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index e315b153a74..27e1c2bb2d4 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -39,8 +39,9 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" "k8s.io/kubernetes/test/e2e/storage/utils" + storageutils "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -55,22 +56,16 @@ var ( ) type subPathTestSuite struct { - tsInfo TestSuiteInfo + tsInfo storageframework.TestSuiteInfo } -var _ TestSuite = &subPathTestSuite{} - -// InitSubPathTestSuite returns subPathTestSuite that implements TestSuite interface -func InitSubPathTestSuite() TestSuite { +// InitCustomSubPathTestSuite returns subPathTestSuite that implements TestSuite interface +// using custom test patterns +func InitCustomSubPathTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite { return &subPathTestSuite{ - tsInfo: TestSuiteInfo{ - Name: "subPath", - TestPatterns: []testpatterns.TestPattern{ - testpatterns.DefaultFsInlineVolume, - testpatterns.DefaultFsPreprovisionedPV, - testpatterns.DefaultFsDynamicPV, - testpatterns.NtfsDynamicPV, - }, + tsInfo: storageframework.TestSuiteInfo{ + Name: "subPath", + TestPatterns: patterns, SupportedSizeRange: e2evolume.SizeRange{ Min: "1Mi", }, @@ -78,23 +73,35 @@ func InitSubPathTestSuite() TestSuite { } } -func (s *subPathTestSuite) GetTestSuiteInfo() TestSuiteInfo { +// InitSubPathTestSuite returns subPathTestSuite that implements TestSuite interface +// using testsuite default patterns +func InitSubPathTestSuite() storageframework.TestSuite { + patterns := []storageframework.TestPattern{ + storageframework.DefaultFsInlineVolume, + storageframework.DefaultFsPreprovisionedPV, + storageframework.DefaultFsDynamicPV, + storageframework.NtfsDynamicPV, + } + return InitCustomSubPathTestSuite(patterns) +} + +func (s *subPathTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo { return s.tsInfo } -func (s *subPathTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) { - skipVolTypePatterns(pattern, driver, testpatterns.NewVolTypeMap( - testpatterns.PreprovisionedPV, - testpatterns.InlineVolume)) +func (s *subPathTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { + skipVolTypePatterns(pattern, driver, storageframework.NewVolTypeMap( + storageframework.PreprovisionedPV, + storageframework.InlineVolume)) } -func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) { +func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { type local struct { - config *PerTestConfig + config *storageframework.PerTestConfig driverCleanup func() hostExec utils.HostExec - resource *VolumeResource + resource *storageframework.VolumeResource roVolSource *v1.VolumeSource pod *v1.Pod formatPod *v1.Pod @@ -106,13 +113,9 @@ func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T } var l local - // No preconditions to test. Normally they would be in a BeforeEach here. - - // This intentionally comes after checking the preconditions because it - // registers its own BeforeEach which creates the namespace. Beware that it - // also registers an AfterEach which renders f unusable. Any code using + // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. - f := framework.NewFrameworkWithCustomTimeouts("provisioning", getDriverTimeouts(driver)) + f := framework.NewFrameworkWithCustomTimeouts("provisioning", storageframework.GetDriverTimeouts(driver)) init := func() { l = local{} @@ -121,24 +124,24 @@ func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T l.config, l.driverCleanup = driver.PrepareTest(f) l.migrationCheck = newMigrationOpCheck(f.ClientSet, driver.GetDriverInfo().InTreePluginName) testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange - l.resource = CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) + l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) l.hostExec = utils.NewHostExec(f) // Setup subPath test dependent resource volType := pattern.VolType switch volType { - case testpatterns.InlineVolume: - if iDriver, ok := driver.(InlineVolumeTestDriver); ok { + case storageframework.InlineVolume: + if iDriver, ok := driver.(storageframework.InlineVolumeTestDriver); ok { l.roVolSource = iDriver.GetVolumeSource(true, pattern.FsType, l.resource.Volume) } - case testpatterns.PreprovisionedPV: + case storageframework.PreprovisionedPV: l.roVolSource = &v1.VolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ ClaimName: l.resource.Pvc.Name, ReadOnly: true, }, } - case testpatterns.DynamicPV: + case storageframework.DynamicPV: l.roVolSource = &v1.VolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ ClaimName: l.resource.Pvc.Name, @@ -175,7 +178,7 @@ func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T l.resource = nil } - errs = append(errs, tryFunc(l.driverCleanup)) + errs = append(errs, storageutils.TryFunc(l.driverCleanup)) l.driverCleanup = nil framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource") diff --git a/test/e2e/storage/testsuites/topology.go b/test/e2e/storage/testsuites/topology.go index 857af03e450..7a23f884599 100644 --- a/test/e2e/storage/testsuites/topology.go +++ b/test/e2e/storage/testsuites/topology.go @@ -35,84 +35,85 @@ import ( e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" + storageutils "k8s.io/kubernetes/test/e2e/storage/utils" ) type topologyTestSuite struct { - tsInfo TestSuiteInfo + tsInfo storageframework.TestSuiteInfo } type topologyTest struct { - config *PerTestConfig + config *storageframework.PerTestConfig driverCleanup func() migrationCheck *migrationOpCheck - resource VolumeResource + resource storageframework.VolumeResource pod *v1.Pod allTopologies []topology } type topology map[string]string -var _ TestSuite = &topologyTestSuite{} - -// InitTopologyTestSuite returns topologyTestSuite that implements TestSuite interface -func InitTopologyTestSuite() TestSuite { +// InitCustomTopologyTestSuite returns topologyTestSuite that implements TestSuite interface +// using custom test patterns +func InitCustomTopologyTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite { return &topologyTestSuite{ - tsInfo: TestSuiteInfo{ - Name: "topology", - TestPatterns: []testpatterns.TestPattern{ - testpatterns.TopologyImmediate, - testpatterns.TopologyDelayed, - }, + tsInfo: storageframework.TestSuiteInfo{ + Name: "topology", + TestPatterns: patterns, }, } } -func (t *topologyTestSuite) GetTestSuiteInfo() TestSuiteInfo { +// InitTopologyTestSuite returns topologyTestSuite that implements TestSuite interface +// using testsuite default patterns +func InitTopologyTestSuite() storageframework.TestSuite { + patterns := []storageframework.TestPattern{ + storageframework.TopologyImmediate, + storageframework.TopologyDelayed, + } + return InitCustomTopologyTestSuite(patterns) +} + +func (t *topologyTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo { return t.tsInfo } -func (t *topologyTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) { +func (t *topologyTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { + dInfo := driver.GetDriverInfo() + var ok bool + _, ok = driver.(storageframework.DynamicPVTestDriver) + if !ok { + e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType) + } + + if !dInfo.Capabilities[storageframework.CapTopology] { + e2eskipper.Skipf("Driver %q does not support topology - skipping", dInfo.Name) + } } -func (t *topologyTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) { +func (t *topologyTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { var ( dInfo = driver.GetDriverInfo() - dDriver DynamicPVTestDriver + dDriver storageframework.DynamicPVTestDriver cs clientset.Interface err error ) - ginkgo.BeforeEach(func() { - // Check preconditions. - ok := false - dDriver, ok = driver.(DynamicPVTestDriver) - if !ok { - e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType) - } - - if !dInfo.Capabilities[CapTopology] { - e2eskipper.Skipf("Driver %q does not support topology - skipping", dInfo.Name) - } - - }) - - // This intentionally comes after checking the preconditions because it - // registers its own BeforeEach which creates the namespace. Beware that it - // also registers an AfterEach which renders f unusable. Any code using + // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. - f := framework.NewFrameworkWithCustomTimeouts("topology", getDriverTimeouts(driver)) + f := framework.NewFrameworkWithCustomTimeouts("topology", storageframework.GetDriverTimeouts(driver)) init := func() topologyTest { - + dDriver, _ = driver.(storageframework.DynamicPVTestDriver) l := topologyTest{} // Now do the more expensive test initialization. l.config, l.driverCleanup = driver.PrepareTest(f) - l.resource = VolumeResource{ + l.resource = storageframework.VolumeResource{ Config: l.config, Pattern: pattern, } @@ -141,7 +142,7 @@ func (t *topologyTestSuite) DefineTests(driver TestDriver, pattern testpatterns. testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange - claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange) + claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange) framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange) l.resource.Pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ ClaimSize: claimSize, @@ -154,7 +155,7 @@ func (t *topologyTestSuite) DefineTests(driver TestDriver, pattern testpatterns. cleanup := func(l topologyTest) { t.CleanupResources(cs, &l) - err := tryFunc(l.driverCleanup) + err := storageutils.TryFunc(l.driverCleanup) l.driverCleanup = nil framework.ExpectNoError(err, "while cleaning up driver") diff --git a/test/e2e/storage/testsuites/volume_expand.go b/test/e2e/storage/testsuites/volume_expand.go index 04298b60cf0..f847397d757 100644 --- a/test/e2e/storage/testsuites/volume_expand.go +++ b/test/e2e/storage/testsuites/volume_expand.go @@ -34,7 +34,8 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" + storageutils "k8s.io/kubernetes/test/e2e/storage/utils" ) const ( @@ -52,24 +53,16 @@ const ( ) type volumeExpandTestSuite struct { - tsInfo TestSuiteInfo + tsInfo storageframework.TestSuiteInfo } -var _ TestSuite = &volumeExpandTestSuite{} - -// InitVolumeExpandTestSuite returns volumeExpandTestSuite that implements TestSuite interface -func InitVolumeExpandTestSuite() TestSuite { +// InitCustomVolumeExpandTestSuite returns volumeExpandTestSuite that implements TestSuite interface +// using custom test patterns +func InitCustomVolumeExpandTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite { return &volumeExpandTestSuite{ - tsInfo: TestSuiteInfo{ - Name: "volume-expand", - TestPatterns: []testpatterns.TestPattern{ - testpatterns.DefaultFsDynamicPV, - testpatterns.BlockVolModeDynamicPV, - testpatterns.DefaultFsDynamicPVAllowExpansion, - testpatterns.BlockVolModeDynamicPVAllowExpansion, - testpatterns.NtfsDynamicPV, - testpatterns.NtfsDynamicPVAllowExpansion, - }, + tsInfo: storageframework.TestSuiteInfo{ + Name: "volume-expand", + TestPatterns: patterns, SupportedSizeRange: e2evolume.SizeRange{ Min: "1Gi", }, @@ -77,19 +70,41 @@ func InitVolumeExpandTestSuite() TestSuite { } } -func (v *volumeExpandTestSuite) GetTestSuiteInfo() TestSuiteInfo { +// InitVolumeExpandTestSuite returns volumeExpandTestSuite that implements TestSuite interface +// using testsuite default patterns +func InitVolumeExpandTestSuite() storageframework.TestSuite { + patterns := []storageframework.TestPattern{ + storageframework.DefaultFsDynamicPV, + storageframework.BlockVolModeDynamicPV, + storageframework.DefaultFsDynamicPVAllowExpansion, + storageframework.BlockVolModeDynamicPVAllowExpansion, + storageframework.NtfsDynamicPV, + storageframework.NtfsDynamicPVAllowExpansion, + } + return InitCustomVolumeExpandTestSuite(patterns) +} + +func (v *volumeExpandTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo { return v.tsInfo } -func (v *volumeExpandTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) { +func (v *volumeExpandTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { + // Check preconditions. + if !driver.GetDriverInfo().Capabilities[storageframework.CapControllerExpansion] { + e2eskipper.Skipf("Driver %q does not support volume expansion - skipping", driver.GetDriverInfo().Name) + } + // Check preconditions. + if !driver.GetDriverInfo().Capabilities[storageframework.CapBlock] && pattern.VolMode == v1.PersistentVolumeBlock { + e2eskipper.Skipf("Driver %q does not support block volume mode - skipping", driver.GetDriverInfo().Name) + } } -func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) { +func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { type local struct { - config *PerTestConfig + config *storageframework.PerTestConfig driverCleanup func() - resource *VolumeResource + resource *storageframework.VolumeResource pod *v1.Pod pod2 *v1.Pod @@ -97,21 +112,9 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte } var l local - ginkgo.BeforeEach(func() { - // Check preconditions. - if !driver.GetDriverInfo().Capabilities[CapBlock] && pattern.VolMode == v1.PersistentVolumeBlock { - e2eskipper.Skipf("Driver %q does not support block volume mode - skipping", driver.GetDriverInfo().Name) - } - if !driver.GetDriverInfo().Capabilities[CapControllerExpansion] { - e2eskipper.Skipf("Driver %q does not support volume expansion - skipping", driver.GetDriverInfo().Name) - } - }) - - // This intentionally comes after checking the preconditions because it - // registers its own BeforeEach which creates the namespace. Beware that it - // also registers an AfterEach which renders f unusable. Any code using + // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. - f := framework.NewFrameworkWithCustomTimeouts("volume-expand", getDriverTimeouts(driver)) + f := framework.NewFrameworkWithCustomTimeouts("volume-expand", storageframework.GetDriverTimeouts(driver)) init := func() { l = local{} @@ -120,7 +123,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte l.config, l.driverCleanup = driver.PrepareTest(f) l.migrationCheck = newMigrationOpCheck(f.ClientSet, driver.GetDriverInfo().InTreePluginName) testVolumeSizeRange := v.GetTestSuiteInfo().SupportedSizeRange - l.resource = CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) + l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) } cleanup := func() { @@ -144,7 +147,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte l.resource = nil } - errs = append(errs, tryFunc(l.driverCleanup)) + errs = append(errs, storageutils.TryFunc(l.driverCleanup)) l.driverCleanup = nil framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource") l.migrationCheck.validateMigrationVolumeOpCounts() diff --git a/test/e2e/storage/testsuites/volume_io.go b/test/e2e/storage/testsuites/volume_io.go index 17064063f44..5df70a78537 100644 --- a/test/e2e/storage/testsuites/volume_io.go +++ b/test/e2e/storage/testsuites/volume_io.go @@ -40,37 +40,32 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" - "k8s.io/kubernetes/test/e2e/storage/utils" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" + storageutils "k8s.io/kubernetes/test/e2e/storage/utils" ) // MD5 hashes of the test file corresponding to each file size. // Test files are generated in testVolumeIO() // If test file generation algorithm changes, these must be recomputed. var md5hashes = map[int64]string{ - testpatterns.FileSizeSmall: "5c34c2813223a7ca05a3c2f38c0d1710", - testpatterns.FileSizeMedium: "f2fa202b1ffeedda5f3a58bd1ae81104", - testpatterns.FileSizeLarge: "8d763edc71bd16217664793b5a15e403", + storageframework.FileSizeSmall: "5c34c2813223a7ca05a3c2f38c0d1710", + storageframework.FileSizeMedium: "f2fa202b1ffeedda5f3a58bd1ae81104", + storageframework.FileSizeLarge: "8d763edc71bd16217664793b5a15e403", } const mountPath = "/opt" type volumeIOTestSuite struct { - tsInfo TestSuiteInfo + tsInfo storageframework.TestSuiteInfo } -var _ TestSuite = &volumeIOTestSuite{} - -// InitVolumeIOTestSuite returns volumeIOTestSuite that implements TestSuite interface -func InitVolumeIOTestSuite() TestSuite { +// InitCustomVolumeIOTestSuite returns volumeIOTestSuite that implements TestSuite interface +// using custom test patterns +func InitCustomVolumeIOTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite { return &volumeIOTestSuite{ - tsInfo: TestSuiteInfo{ - Name: "volumeIO", - TestPatterns: []testpatterns.TestPattern{ - testpatterns.DefaultFsInlineVolume, - testpatterns.DefaultFsPreprovisionedPV, - testpatterns.DefaultFsDynamicPV, - }, + tsInfo: storageframework.TestSuiteInfo{ + Name: "volumeIO", + TestPatterns: patterns, SupportedSizeRange: e2evolume.SizeRange{ Min: "1Mi", }, @@ -78,22 +73,33 @@ func InitVolumeIOTestSuite() TestSuite { } } -func (t *volumeIOTestSuite) GetTestSuiteInfo() TestSuiteInfo { +// InitVolumeIOTestSuite returns volumeIOTestSuite that implements TestSuite interface +// using testsuite default patterns +func InitVolumeIOTestSuite() storageframework.TestSuite { + patterns := []storageframework.TestPattern{ + storageframework.DefaultFsInlineVolume, + storageframework.DefaultFsPreprovisionedPV, + storageframework.DefaultFsDynamicPV, + } + return InitCustomVolumeIOTestSuite(patterns) +} + +func (t *volumeIOTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo { return t.tsInfo } -func (t *volumeIOTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) { - skipVolTypePatterns(pattern, driver, testpatterns.NewVolTypeMap( - testpatterns.PreprovisionedPV, - testpatterns.InlineVolume)) +func (t *volumeIOTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { + skipVolTypePatterns(pattern, driver, storageframework.NewVolTypeMap( + storageframework.PreprovisionedPV, + storageframework.InlineVolume)) } -func (t *volumeIOTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) { +func (t *volumeIOTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { type local struct { - config *PerTestConfig + config *storageframework.PerTestConfig driverCleanup func() - resource *VolumeResource + resource *storageframework.VolumeResource migrationCheck *migrationOpCheck } @@ -102,13 +108,9 @@ func (t *volumeIOTestSuite) DefineTests(driver TestDriver, pattern testpatterns. l local ) - // No preconditions to test. Normally they would be in a BeforeEach here. - - // This intentionally comes after checking the preconditions because it - // registers its own BeforeEach which creates the namespace. Beware that it - // also registers an AfterEach which renders f unusable. Any code using + // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. - f := framework.NewFrameworkWithCustomTimeouts("volumeio", getDriverTimeouts(driver)) + f := framework.NewFrameworkWithCustomTimeouts("volumeio", storageframework.GetDriverTimeouts(driver)) init := func() { l = local{} @@ -118,7 +120,7 @@ func (t *volumeIOTestSuite) DefineTests(driver TestDriver, pattern testpatterns. l.migrationCheck = newMigrationOpCheck(f.ClientSet, dInfo.InTreePluginName) testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - l.resource = CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) + l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) if l.resource.VolSource == nil { e2eskipper.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name) } @@ -133,7 +135,7 @@ func (t *volumeIOTestSuite) DefineTests(driver TestDriver, pattern testpatterns. } if l.driverCleanup != nil { - errs = append(errs, tryFunc(l.driverCleanup)) + errs = append(errs, storageutils.TryFunc(l.driverCleanup)) l.driverCleanup = nil } @@ -149,23 +151,23 @@ func (t *volumeIOTestSuite) DefineTests(driver TestDriver, pattern testpatterns. fileSizes := createFileSizes(dInfo.MaxFileSize) testFile := fmt.Sprintf("%s_io_test_%s", dInfo.Name, f.Namespace.Name) var fsGroup *int64 - if !framework.NodeOSDistroIs("windows") && dInfo.Capabilities[CapFsGroup] { + if !framework.NodeOSDistroIs("windows") && dInfo.Capabilities[storageframework.CapFsGroup] { fsGroupVal := int64(1234) fsGroup = &fsGroupVal } podSec := v1.PodSecurityContext{ FSGroup: fsGroup, } - err := testVolumeIO(f, cs, convertTestConfig(l.config), *l.resource.VolSource, &podSec, testFile, fileSizes) + err := testVolumeIO(f, cs, storageframework.ConvertTestConfig(l.config), *l.resource.VolSource, &podSec, testFile, fileSizes) framework.ExpectNoError(err) }) } func createFileSizes(maxFileSize int64) []int64 { allFileSizes := []int64{ - testpatterns.FileSizeSmall, - testpatterns.FileSizeMedium, - testpatterns.FileSizeLarge, + storageframework.FileSizeSmall, + storageframework.FileSizeMedium, + storageframework.FileSizeLarge, } fileSizes := []int64{} @@ -247,9 +249,9 @@ func makePodSpec(config e2evolume.TestConfig, initCmd string, volsrc v1.VolumeSo // Write `fsize` bytes to `fpath` in the pod, using dd and the `ddInput` file. func writeToFile(f *framework.Framework, pod *v1.Pod, fpath, ddInput string, fsize int64) error { ginkgo.By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath)) - loopCnt := fsize / testpatterns.MinFileSize - writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, ddInput, testpatterns.MinFileSize, fpath) - stdout, stderr, err := utils.PodExec(f, pod, writeCmd) + loopCnt := fsize / storageframework.MinFileSize + writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, ddInput, storageframework.MinFileSize, fpath) + stdout, stderr, err := e2evolume.PodExec(f, pod, writeCmd) if err != nil { return fmt.Errorf("error writing to volume using %q: %s\nstdout: %s\nstderr: %s", writeCmd, err, stdout, stderr) } @@ -259,7 +261,7 @@ func writeToFile(f *framework.Framework, pod *v1.Pod, fpath, ddInput string, fsi // Verify that the test file is the expected size and contains the expected content. func verifyFile(f *framework.Framework, pod *v1.Pod, fpath string, expectSize int64, ddInput string) error { ginkgo.By("verifying file size") - rtnstr, stderr, err := utils.PodExec(f, pod, fmt.Sprintf("stat -c %%s %s", fpath)) + rtnstr, stderr, err := e2evolume.PodExec(f, pod, fmt.Sprintf("stat -c %%s %s", fpath)) if err != nil || rtnstr == "" { return fmt.Errorf("unable to get file size via `stat %s`: %v\nstdout: %s\nstderr: %s", fpath, err, rtnstr, stderr) } @@ -272,7 +274,7 @@ func verifyFile(f *framework.Framework, pod *v1.Pod, fpath string, expectSize in } ginkgo.By("verifying file hash") - rtnstr, stderr, err = utils.PodExec(f, pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath)) + rtnstr, stderr, err = e2evolume.PodExec(f, pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath)) if err != nil { return fmt.Errorf("unable to test file hash via `md5sum %s`: %v\nstdout: %s\nstderr: %s", fpath, err, rtnstr, stderr) } @@ -293,7 +295,7 @@ func verifyFile(f *framework.Framework, pod *v1.Pod, fpath string, expectSize in // Delete `fpath` to save some disk space on host. Delete errors are logged but ignored. func deleteFile(f *framework.Framework, pod *v1.Pod, fpath string) { ginkgo.By(fmt.Sprintf("deleting test file %s...", fpath)) - stdout, stderr, err := utils.PodExec(f, pod, fmt.Sprintf("rm -f %s", fpath)) + stdout, stderr, err := e2evolume.PodExec(f, pod, fmt.Sprintf("rm -f %s", fpath)) if err != nil { // keep going, the test dir will be deleted when the volume is unmounted framework.Logf("unable to delete test file %s: %v\nerror ignored, continuing test\nstdout: %s\nstderr: %s", fpath, err, stdout, stderr) @@ -309,7 +311,7 @@ func deleteFile(f *framework.Framework, pod *v1.Pod, fpath string) { func testVolumeIO(f *framework.Framework, cs clientset.Interface, config e2evolume.TestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) { ddInput := filepath.Join(mountPath, fmt.Sprintf("%s-%s-dd_if", config.Prefix, config.Namespace)) writeBlk := strings.Repeat("abcdefghijklmnopqrstuvwxyz123456", 32) // 1KiB value - loopCnt := testpatterns.MinFileSize / int64(len(writeBlk)) + loopCnt := storageframework.MinFileSize / int64(len(writeBlk)) // initContainer cmd to create and fill dd's input file. The initContainer is used to create // the `dd` input file which is currently 1MiB. Rather than store a 1MiB go value, a loop is // used to create a 1MiB file in the target directory. @@ -346,8 +348,8 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config e2evolu // create files of the passed-in file sizes and verify test file size and content for _, fsize := range fsizes { // file sizes must be a multiple of `MinFileSize` - if math.Mod(float64(fsize), float64(testpatterns.MinFileSize)) != 0 { - fsize = fsize/testpatterns.MinFileSize + testpatterns.MinFileSize + if math.Mod(float64(fsize), float64(storageframework.MinFileSize)) != 0 { + fsize = fsize/storageframework.MinFileSize + storageframework.MinFileSize } fpath := filepath.Join(mountPath, fmt.Sprintf("%s-%d", file, fsize)) defer func() { diff --git a/test/e2e/storage/testsuites/volume_stress.go b/test/e2e/storage/testsuites/volume_stress.go index eca91f0b8c8..801e8186968 100644 --- a/test/e2e/storage/testsuites/volume_stress.go +++ b/test/e2e/storage/testsuites/volume_stress.go @@ -32,84 +32,87 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" + storageutils "k8s.io/kubernetes/test/e2e/storage/utils" ) type volumeStressTestSuite struct { - tsInfo TestSuiteInfo + tsInfo storageframework.TestSuiteInfo } type volumeStressTest struct { - config *PerTestConfig + config *storageframework.PerTestConfig driverCleanup func() migrationCheck *migrationOpCheck - resources []*VolumeResource + resources []*storageframework.VolumeResource pods []*v1.Pod // stop and wait for any async routines wg sync.WaitGroup ctx context.Context cancel context.CancelFunc - testOptions StressTestOptions + testOptions storageframework.StressTestOptions } -var _ TestSuite = &volumeStressTestSuite{} +var _ storageframework.TestSuite = &volumeStressTestSuite{} -// InitVolumeStressTestSuite returns volumeStressTestSuite that implements TestSuite interface -func InitVolumeStressTestSuite() TestSuite { +// InitCustomVolumeStressTestSuite returns volumeStressTestSuite that implements TestSuite interface +// using custom test patterns +func InitCustomVolumeStressTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite { return &volumeStressTestSuite{ - tsInfo: TestSuiteInfo{ - Name: "volume-stress", - TestPatterns: []testpatterns.TestPattern{ - testpatterns.DefaultFsDynamicPV, - testpatterns.BlockVolModeDynamicPV, - }, + tsInfo: storageframework.TestSuiteInfo{ + Name: "volume-stress", + TestPatterns: patterns, }, } } -func (t *volumeStressTestSuite) GetTestSuiteInfo() TestSuiteInfo { +// InitVolumeStressTestSuite returns volumeStressTestSuite that implements TestSuite interface +// using testsuite default patterns +func InitVolumeStressTestSuite() storageframework.TestSuite { + patterns := []storageframework.TestPattern{ + storageframework.DefaultFsDynamicPV, + storageframework.BlockVolModeDynamicPV, + } + return InitCustomVolumeStressTestSuite(patterns) +} + +func (t *volumeStressTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo { return t.tsInfo } -func (t *volumeStressTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) { +func (t *volumeStressTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { + dInfo := driver.GetDriverInfo() + if dInfo.StressTestOptions == nil { + e2eskipper.Skipf("Driver %s doesn't specify stress test options -- skipping", dInfo.Name) + } + if dInfo.StressTestOptions.NumPods <= 0 { + framework.Failf("NumPods in stress test options must be a positive integer, received: %d", dInfo.StressTestOptions.NumPods) + } + if dInfo.StressTestOptions.NumRestarts <= 0 { + framework.Failf("NumRestarts in stress test options must be a positive integer, received: %d", dInfo.StressTestOptions.NumRestarts) + } + + if _, ok := driver.(storageframework.DynamicPVTestDriver); !ok { + e2eskipper.Skipf("Driver %s doesn't implement DynamicPVTestDriver -- skipping", dInfo.Name) + } + if !driver.GetDriverInfo().Capabilities[storageframework.CapBlock] && pattern.VolMode == v1.PersistentVolumeBlock { + e2eskipper.Skipf("Driver %q does not support block volume mode - skipping", dInfo.Name) + } } -func (t *volumeStressTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) { +func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { var ( dInfo = driver.GetDriverInfo() cs clientset.Interface l *volumeStressTest ) - // Check preconditions before setting up namespace via framework below. - ginkgo.BeforeEach(func() { - if dInfo.StressTestOptions == nil { - e2eskipper.Skipf("Driver %s doesn't specify stress test options -- skipping", dInfo.Name) - } - if dInfo.StressTestOptions.NumPods <= 0 { - framework.Failf("NumPods in stress test options must be a positive integer, received: %d", dInfo.StressTestOptions.NumPods) - } - if dInfo.StressTestOptions.NumRestarts <= 0 { - framework.Failf("NumRestarts in stress test options must be a positive integer, received: %d", dInfo.StressTestOptions.NumRestarts) - } - - if _, ok := driver.(DynamicPVTestDriver); !ok { - e2eskipper.Skipf("Driver %s doesn't implement DynamicPVTestDriver -- skipping", dInfo.Name) - } - - if !driver.GetDriverInfo().Capabilities[CapBlock] && pattern.VolMode == v1.PersistentVolumeBlock { - e2eskipper.Skipf("Driver %q does not support block volume mode - skipping", dInfo.Name) - } - }) - - // This intentionally comes after checking the preconditions because it - // registers its own BeforeEach which creates the namespace. Beware that it - // also registers an AfterEach which renders f unusable. Any code using + // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. - f := framework.NewFrameworkWithCustomTimeouts("stress", getDriverTimeouts(driver)) + f := framework.NewFrameworkWithCustomTimeouts("stress", storageframework.GetDriverTimeouts(driver)) init := func() { cs = f.ClientSet @@ -118,7 +121,7 @@ func (t *volumeStressTestSuite) DefineTests(driver TestDriver, pattern testpatte // Now do the more expensive test initialization. l.config, l.driverCleanup = driver.PrepareTest(f) l.migrationCheck = newMigrationOpCheck(f.ClientSet, dInfo.InTreePluginName) - l.resources = []*VolumeResource{} + l.resources = []*storageframework.VolumeResource{} l.pods = []*v1.Pod{} l.testOptions = *dInfo.StressTestOptions l.ctx, l.cancel = context.WithCancel(context.Background()) @@ -127,7 +130,7 @@ func (t *volumeStressTestSuite) DefineTests(driver TestDriver, pattern testpatte createPodsAndVolumes := func() { for i := 0; i < l.testOptions.NumPods; i++ { framework.Logf("Creating resources for pod %v/%v", i, l.testOptions.NumPods-1) - r := CreateVolumeResource(driver, l.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange) + r := storageframework.CreateVolumeResource(driver, l.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange) l.resources = append(l.resources, r) podConfig := e2epod.Config{ NS: f.Namespace.Name, @@ -158,7 +161,7 @@ func (t *volumeStressTestSuite) DefineTests(driver TestDriver, pattern testpatte errs = append(errs, resource.CleanupResource()) } - errs = append(errs, tryFunc(l.driverCleanup)) + errs = append(errs, storageutils.TryFunc(l.driverCleanup)) framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource") l.migrationCheck.validateMigrationVolumeOpCounts() } diff --git a/test/e2e/storage/testsuites/volumelimits.go b/test/e2e/storage/testsuites/volumelimits.go index 987f3bd2e09..772a94756fd 100644 --- a/test/e2e/storage/testsuites/volumelimits.go +++ b/test/e2e/storage/testsuites/volumelimits.go @@ -38,11 +38,12 @@ import ( e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" + storageutils "k8s.io/kubernetes/test/e2e/storage/utils" ) type volumeLimitsTestSuite struct { - tsInfo TestSuiteInfo + tsInfo storageframework.TestSuiteInfo } const ( @@ -55,36 +56,44 @@ const ( csiNodeInfoTimeout = 1 * time.Minute ) -var _ TestSuite = &volumeLimitsTestSuite{} +var _ storageframework.TestSuite = &volumeLimitsTestSuite{} -// InitVolumeLimitsTestSuite returns volumeLimitsTestSuite that implements TestSuite interface -func InitVolumeLimitsTestSuite() TestSuite { +// InitCustomVolumeLimitsTestSuite returns volumeLimitsTestSuite that implements TestSuite interface +// using custom test patterns +func InitCustomVolumeLimitsTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite { return &volumeLimitsTestSuite{ - tsInfo: TestSuiteInfo{ - Name: "volumeLimits", - TestPatterns: []testpatterns.TestPattern{ - testpatterns.FsVolModeDynamicPV, - }, + tsInfo: storageframework.TestSuiteInfo{ + Name: "volumeLimits", + TestPatterns: patterns, }, } } -func (t *volumeLimitsTestSuite) GetTestSuiteInfo() TestSuiteInfo { +// InitVolumeLimitsTestSuite returns volumeLimitsTestSuite that implements TestSuite interface +// using testsuite default patterns +func InitVolumeLimitsTestSuite() storageframework.TestSuite { + patterns := []storageframework.TestPattern{ + storageframework.FsVolModeDynamicPV, + } + return InitCustomVolumeLimitsTestSuite(patterns) +} + +func (t *volumeLimitsTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo { return t.tsInfo } -func (t *volumeLimitsTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) { +func (t *volumeLimitsTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { } -func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) { +func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { type local struct { - config *PerTestConfig + config *storageframework.PerTestConfig testCleanup func() cs clientset.Interface ns *v1.Namespace // VolumeResource contains pv, pvc, sc, etc. of the first pod created - resource *VolumeResource + resource *storageframework.VolumeResource // All created PVCs, incl. the one in resource pvcs []*v1.PersistentVolumeClaim @@ -99,8 +108,9 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte l local ) - // No preconditions to test. Normally they would be in a BeforeEach here. - f := framework.NewFrameworkWithCustomTimeouts("volumelimits", getDriverTimeouts(driver)) + // Beware that it also registers an AfterEach which renders f unusable. Any code using + // f must run inside an It or Context callback. + f := framework.NewFrameworkWithCustomTimeouts("volumelimits", storageframework.GetDriverTimeouts(driver)) // This checks that CSIMaxVolumeLimitChecker works as expected. // A randomly chosen node should be able to handle as many CSI volumes as @@ -112,11 +122,11 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte // BEWARE: the test may create lot of volumes and it's really slow. ginkgo.It("should support volume limits [Serial]", func() { driverInfo := driver.GetDriverInfo() - if !driverInfo.Capabilities[CapVolumeLimits] { + if !driverInfo.Capabilities[storageframework.CapVolumeLimits] { ginkgo.Skip(fmt.Sprintf("driver %s does not support volume limits", driverInfo.Name)) } - var dDriver DynamicPVTestDriver - if dDriver = driver.(DynamicPVTestDriver); dDriver == nil { + var dDriver storageframework.DynamicPVTestDriver + if dDriver = driver.(storageframework.DynamicPVTestDriver); dDriver == nil { framework.Failf("Test driver does not provide dynamically created volumes") } @@ -145,10 +155,10 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte // Create a storage class and generate a PVC. Do not instantiate the PVC yet, keep it for the last pod. testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange - claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange) + claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange) framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, dDriver) - l.resource = CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) + l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) defer func() { err := l.resource.CleanupResource() framework.ExpectNoError(err, "while cleaning up resource") @@ -306,14 +316,14 @@ func waitForAllPVCsBound(cs clientset.Interface, timeout time.Duration, pvcs []* return pvNames, nil } -func getNodeLimits(cs clientset.Interface, config *PerTestConfig, nodeName string, driverInfo *DriverInfo) (int, error) { +func getNodeLimits(cs clientset.Interface, config *storageframework.PerTestConfig, nodeName string, driverInfo *storageframework.DriverInfo) (int, error) { if len(driverInfo.InTreePluginName) == 0 { return getCSINodeLimits(cs, config, nodeName, driverInfo) } return getInTreeNodeLimits(cs, nodeName, driverInfo) } -func getInTreeNodeLimits(cs clientset.Interface, nodeName string, driverInfo *DriverInfo) (int, error) { +func getInTreeNodeLimits(cs clientset.Interface, nodeName string, driverInfo *storageframework.DriverInfo) (int, error) { node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { return 0, err @@ -340,7 +350,7 @@ func getInTreeNodeLimits(cs clientset.Interface, nodeName string, driverInfo *Dr return int(limit.Value()), nil } -func getCSINodeLimits(cs clientset.Interface, config *PerTestConfig, nodeName string, driverInfo *DriverInfo) (int, error) { +func getCSINodeLimits(cs clientset.Interface, config *storageframework.PerTestConfig, nodeName string, driverInfo *storageframework.DriverInfo) (int, error) { // Retry with a timeout, the driver might just have been installed and kubelet takes a while to publish everything. var limit int err := wait.PollImmediate(2*time.Second, csiNodeInfoTimeout, func() (bool, error) { diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index f9229bae40c..d54c28248a7 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -39,8 +39,8 @@ import ( e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" - "k8s.io/kubernetes/test/e2e/storage/utils" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" + storageutils "k8s.io/kubernetes/test/e2e/storage/utils" ) const ( @@ -49,22 +49,18 @@ const ( ) type volumeModeTestSuite struct { - tsInfo TestSuiteInfo + tsInfo storageframework.TestSuiteInfo } -var _ TestSuite = &volumeModeTestSuite{} +var _ storageframework.TestSuite = &volumeModeTestSuite{} -// InitVolumeModeTestSuite returns volumeModeTestSuite that implements TestSuite interface -func InitVolumeModeTestSuite() TestSuite { +// InitCustomVolumeModeTestSuite returns volumeModeTestSuite that implements TestSuite interface +// using custom test patterns +func InitCustomVolumeModeTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite { return &volumeModeTestSuite{ - tsInfo: TestSuiteInfo{ - Name: "volumeMode", - TestPatterns: []testpatterns.TestPattern{ - testpatterns.FsVolModePreprovisionedPV, - testpatterns.FsVolModeDynamicPV, - testpatterns.BlockVolModePreprovisionedPV, - testpatterns.BlockVolModeDynamicPV, - }, + tsInfo: storageframework.TestSuiteInfo{ + Name: "volumeMode", + TestPatterns: patterns, SupportedSizeRange: e2evolume.SizeRange{ Min: "1Mi", }, @@ -72,22 +68,34 @@ func InitVolumeModeTestSuite() TestSuite { } } -func (t *volumeModeTestSuite) GetTestSuiteInfo() TestSuiteInfo { +// InitVolumeModeTestSuite returns volumeModeTestSuite that implements TestSuite interface +// using testsuite default patterns +func InitVolumeModeTestSuite() storageframework.TestSuite { + patterns := []storageframework.TestPattern{ + storageframework.FsVolModePreprovisionedPV, + storageframework.FsVolModeDynamicPV, + storageframework.BlockVolModePreprovisionedPV, + storageframework.BlockVolModeDynamicPV, + } + return InitCustomVolumeModeTestSuite(patterns) +} + +func (t *volumeModeTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo { return t.tsInfo } -func (t *volumeModeTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) { +func (t *volumeModeTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { } -func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) { +func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { type local struct { - config *PerTestConfig + config *storageframework.PerTestConfig driverCleanup func() cs clientset.Interface ns *v1.Namespace // VolumeResource contains pv, pvc, sc, etc., owns cleaning that up - VolumeResource + storageframework.VolumeResource migrationCheck *migrationOpCheck } @@ -96,13 +104,9 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern l local ) - // No preconditions to test. Normally they would be in a BeforeEach here. - - // This intentionally comes after checking the preconditions because it - // registers its own BeforeEach which creates the namespace. Beware that it - // also registers an AfterEach which renders f unusable. Any code using + // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. - f := framework.NewFrameworkWithCustomTimeouts("volumemode", getDriverTimeouts(driver)) + f := framework.NewFrameworkWithCustomTimeouts("volumemode", storageframework.GetDriverTimeouts(driver)) init := func() { l = local{} @@ -127,22 +131,22 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern volumeNodeAffinity *v1.VolumeNodeAffinity ) - l.VolumeResource = VolumeResource{ + l.VolumeResource = storageframework.VolumeResource{ Config: l.config, Pattern: pattern, } // Create volume for pre-provisioned volume tests - l.Volume = CreateVolume(driver, l.config, pattern.VolType) + l.Volume = storageframework.CreateVolume(driver, l.config, pattern.VolType) switch pattern.VolType { - case testpatterns.PreprovisionedPV: + case storageframework.PreprovisionedPV: if pattern.VolMode == v1.PersistentVolumeBlock { scName = fmt.Sprintf("%s-%s-sc-for-block", l.ns.Name, dInfo.Name) } else if pattern.VolMode == v1.PersistentVolumeFilesystem { scName = fmt.Sprintf("%s-%s-sc-for-file", l.ns.Name, dInfo.Name) } - if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok { + if pDriver, ok := driver.(storageframework.PreprovisionedPVTestDriver); ok { pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, l.Volume) if pvSource == nil { e2eskipper.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name) @@ -153,8 +157,8 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern l.Pv = e2epv.MakePersistentVolume(pvConfig) l.Pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, l.ns.Name) } - case testpatterns.DynamicPV: - if dDriver, ok := driver.(DynamicPVTestDriver); ok { + case storageframework.DynamicPV: + if dDriver, ok := driver.(storageframework.DynamicPVTestDriver); ok { l.Sc = dDriver.GetDynamicProvisionStorageClass(l.config, fsType) if l.Sc == nil { e2eskipper.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name) @@ -162,7 +166,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern l.Sc.VolumeBindingMode = &volBindMode testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange driverVolumeSizeRange := dInfo.SupportedSizeRange - claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange) + claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange) framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange) l.Pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ @@ -179,16 +183,16 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern cleanup := func() { var errs []error errs = append(errs, l.CleanupResource()) - errs = append(errs, tryFunc(l.driverCleanup)) + errs = append(errs, storageutils.TryFunc(l.driverCleanup)) l.driverCleanup = nil framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource") l.migrationCheck.validateMigrationVolumeOpCounts() } // We register different tests depending on the drive - isBlockSupported := dInfo.Capabilities[CapBlock] + isBlockSupported := dInfo.Capabilities[storageframework.CapBlock] switch pattern.VolType { - case testpatterns.PreprovisionedPV: + case storageframework.PreprovisionedPV: if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported { ginkgo.It("should fail to create pod by failing to mount volume [Slow]", func() { manualInit() @@ -249,7 +253,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern }) } - case testpatterns.DynamicPV: + case storageframework.DynamicPV: if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported { ginkgo.It("should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]", func() { manualInit() @@ -293,7 +297,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern skipTestIfBlockNotSupported(driver) init() testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - l.VolumeResource = *CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) + l.VolumeResource = *storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) defer cleanup() ginkgo.By("Creating pod") @@ -350,7 +354,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern } init() testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - l.VolumeResource = *CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) + l.VolumeResource = *storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) defer cleanup() ginkgo.By("Creating pod") @@ -387,7 +391,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern framework.ExpectNoError(err) ginkgo.By("Listing mounted volumes in the pod") - hostExec := utils.NewHostExec(f) + hostExec := storageutils.NewHostExec(f) defer hostExec.Cleanup() volumePaths, devicePaths, err := listPodVolumePluginDirectory(hostExec, pod, node) framework.ExpectNoError(err) @@ -469,7 +473,7 @@ func swapVolumeMode(podTemplate *v1.Pod) *v1.Pod { // Sample output: // /var/lib/kubelet/pods/a4717a30-000a-4081-a7a8-f51adf280036/volumes/kubernetes.io~secret/default-token-rphdt // /var/lib/kubelet/pods/4475b7a3-4a55-4716-9119-fd0053d9d4a6/volumeDevices/kubernetes.io~aws-ebs/pvc-5f9f80f5-c90b-4586-9966-83f91711e1c0 -func listPodVolumePluginDirectory(h utils.HostExec, pod *v1.Pod, node *v1.Node) (mounts []string, devices []string, err error) { +func listPodVolumePluginDirectory(h storageutils.HostExec, pod *v1.Pod, node *v1.Node) (mounts []string, devices []string, err error) { mountPath := filepath.Join("/var/lib/kubelet/pods/", string(pod.UID), "volumes") devicePath := filepath.Join("/var/lib/kubelet/pods/", string(pod.UID), "volumeDevices") @@ -484,7 +488,7 @@ func listPodVolumePluginDirectory(h utils.HostExec, pod *v1.Pod, node *v1.Node) return mounts, devices, nil } -func listPodDirectory(h utils.HostExec, path string, node *v1.Node) ([]string, error) { +func listPodDirectory(h storageutils.HostExec, path string, node *v1.Node) ([]string, error) { // Return no error if the directory does not exist (e.g. there are no block volumes used) _, err := h.IssueCommandWithResult("test ! -d "+path, node) if err == nil { diff --git a/test/e2e/storage/testsuites/volumes.go b/test/e2e/storage/testsuites/volumes.go index b3d3a27516a..e06bd7f561a 100644 --- a/test/e2e/storage/testsuites/volumes.go +++ b/test/e2e/storage/testsuites/volumes.go @@ -34,46 +34,24 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" + storageutils "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" ) type volumesTestSuite struct { - tsInfo TestSuiteInfo + tsInfo storageframework.TestSuiteInfo } -var _ TestSuite = &volumesTestSuite{} +var _ storageframework.TestSuite = &volumesTestSuite{} -// InitVolumesTestSuite returns volumesTestSuite that implements TestSuite interface -func InitVolumesTestSuite() TestSuite { +// InitCustomVolumesTestSuite returns volumesTestSuite that implements TestSuite interface +// using custom test patterns +func InitCustomVolumesTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite { return &volumesTestSuite{ - tsInfo: TestSuiteInfo{ - Name: "volumes", - TestPatterns: []testpatterns.TestPattern{ - // Default fsType - testpatterns.DefaultFsInlineVolume, - testpatterns.DefaultFsPreprovisionedPV, - testpatterns.DefaultFsDynamicPV, - // ext3 - testpatterns.Ext3InlineVolume, - testpatterns.Ext3PreprovisionedPV, - testpatterns.Ext3DynamicPV, - // ext4 - testpatterns.Ext4InlineVolume, - testpatterns.Ext4PreprovisionedPV, - testpatterns.Ext4DynamicPV, - // xfs - testpatterns.XfsInlineVolume, - testpatterns.XfsPreprovisionedPV, - testpatterns.XfsDynamicPV, - // ntfs - testpatterns.NtfsInlineVolume, - testpatterns.NtfsPreprovisionedPV, - testpatterns.NtfsDynamicPV, - // block volumes - testpatterns.BlockVolModePreprovisionedPV, - testpatterns.BlockVolModeDynamicPV, - }, + tsInfo: storageframework.TestSuiteInfo{ + Name: "volumes", + TestPatterns: patterns, SupportedSizeRange: e2evolume.SizeRange{ Min: "1Mi", }, @@ -81,46 +59,76 @@ func InitVolumesTestSuite() TestSuite { } } -func (t *volumesTestSuite) GetTestSuiteInfo() TestSuiteInfo { +// InitVolumesTestSuite returns volumesTestSuite that implements TestSuite interface +// using testsuite default patterns +func InitVolumesTestSuite() storageframework.TestSuite { + patterns := []storageframework.TestPattern{ + // Default fsType + storageframework.DefaultFsInlineVolume, + storageframework.DefaultFsPreprovisionedPV, + storageframework.DefaultFsDynamicPV, + // ext3 + storageframework.Ext3InlineVolume, + storageframework.Ext3PreprovisionedPV, + storageframework.Ext3DynamicPV, + // ext4 + storageframework.Ext4InlineVolume, + storageframework.Ext4PreprovisionedPV, + storageframework.Ext4DynamicPV, + // xfs + storageframework.XfsInlineVolume, + storageframework.XfsPreprovisionedPV, + storageframework.XfsDynamicPV, + // ntfs + storageframework.NtfsInlineVolume, + storageframework.NtfsPreprovisionedPV, + storageframework.NtfsDynamicPV, + // block volumes + storageframework.BlockVolModePreprovisionedPV, + storageframework.BlockVolModeDynamicPV, + } + return InitCustomVolumesTestSuite(patterns) +} + +func (t *volumesTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo { return t.tsInfo } -func (t *volumesTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) { +func (t *volumesTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { + if pattern.VolMode == v1.PersistentVolumeBlock { + skipTestIfBlockNotSupported(driver) + } } -func skipExecTest(driver TestDriver) { +func skipExecTest(driver storageframework.TestDriver) { dInfo := driver.GetDriverInfo() - if !dInfo.Capabilities[CapExec] { + if !dInfo.Capabilities[storageframework.CapExec] { e2eskipper.Skipf("Driver %q does not support exec - skipping", dInfo.Name) } } -func skipTestIfBlockNotSupported(driver TestDriver) { +func skipTestIfBlockNotSupported(driver storageframework.TestDriver) { dInfo := driver.GetDriverInfo() - if !dInfo.Capabilities[CapBlock] { + if !dInfo.Capabilities[storageframework.CapBlock] { e2eskipper.Skipf("Driver %q does not provide raw block - skipping", dInfo.Name) } } -func (t *volumesTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) { +func (t *volumesTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { type local struct { - config *PerTestConfig + config *storageframework.PerTestConfig driverCleanup func() - resource *VolumeResource + resource *storageframework.VolumeResource migrationCheck *migrationOpCheck } var dInfo = driver.GetDriverInfo() var l local - // No preconditions to test. Normally they would be in a BeforeEach here. - - // This intentionally comes after checking the preconditions because it - // registers its own BeforeEach which creates the namespace. Beware that it - // also registers an AfterEach which renders f unusable. Any code using + // Beware that it also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. - f := framework.NewFrameworkWithCustomTimeouts("volume", getDriverTimeouts(driver)) + f := framework.NewFrameworkWithCustomTimeouts("volume", storageframework.GetDriverTimeouts(driver)) init := func() { l = local{} @@ -129,7 +137,7 @@ func (t *volumesTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T l.config, l.driverCleanup = driver.PrepareTest(f) l.migrationCheck = newMigrationOpCheck(f.ClientSet, dInfo.InTreePluginName) testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange - l.resource = CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) + l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) if l.resource.VolSource == nil { e2eskipper.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name) } @@ -142,20 +150,16 @@ func (t *volumesTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T l.resource = nil } - errs = append(errs, tryFunc(l.driverCleanup)) + errs = append(errs, storageutils.TryFunc(l.driverCleanup)) l.driverCleanup = nil framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource") l.migrationCheck.validateMigrationVolumeOpCounts() } ginkgo.It("should store data", func() { - if pattern.VolMode == v1.PersistentVolumeBlock { - skipTestIfBlockNotSupported(driver) - } - init() defer func() { - e2evolume.TestServerCleanup(f, convertTestConfig(l.config)) + e2evolume.TestServerCleanup(f, storageframework.ConvertTestConfig(l.config)) cleanup() }() @@ -169,9 +173,9 @@ func (t *volumesTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T dInfo.Name, f.Namespace.Name), }, } - config := convertTestConfig(l.config) + config := storageframework.ConvertTestConfig(l.config) var fsGroup *int64 - if framework.NodeOSDistroIs("windows") && dInfo.Capabilities[CapFsGroup] { + if framework.NodeOSDistroIs("windows") && dInfo.Capabilities[storageframework.CapFsGroup] { fsGroupVal := int64(1234) fsGroup = &fsGroupVal } @@ -180,7 +184,7 @@ func (t *volumesTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T // and we don't have reliable way to detect volumes are unmounted or // not before starting the second pod. e2evolume.InjectContent(f, config, fsGroup, pattern.FsType, tests) - if driver.GetDriverInfo().Capabilities[CapPersistence] { + if driver.GetDriverInfo().Capabilities[storageframework.CapPersistence] { e2evolume.TestVolumeClient(f, config, fsGroup, pattern.FsType, tests) } else { ginkgo.By("Skipping persistence check for non-persistent volume") @@ -203,7 +207,7 @@ func testScriptInPod( f *framework.Framework, volumeType string, source *v1.VolumeSource, - config *PerTestConfig) { + config *storageframework.PerTestConfig) { const ( volPath = "/vol1" diff --git a/test/e2e/storage/utils/BUILD b/test/e2e/storage/utils/BUILD index 075b6d7d188..983fa044f5e 100644 --- a/test/e2e/storage/utils/BUILD +++ b/test/e2e/storage/utils/BUILD @@ -1,6 +1,6 @@ package(default_visibility = ["//visibility:public"]) -load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -11,6 +11,8 @@ go_library( "framework.go", "host_exec.go", "local.go", + "pod.go", + "snapshot.go", "utils.go", ], importpath = "k8s.io/kubernetes/test/e2e/storage/utils", @@ -20,6 +22,7 @@ go_library( "//staging/src/k8s.io/api/rbac/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -27,6 +30,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library", "//staging/src/k8s.io/client-go/dynamic:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", @@ -37,6 +41,8 @@ go_library( "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/ssh:go_default_library", "//test/e2e/framework/testfiles:go_default_library", + "//test/e2e/framework/volume:go_default_library", + "//test/e2e/storage/podlogs:go_default_library", "//test/utils/image:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", "//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library", @@ -44,7 +50,6 @@ go_library( "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", "//vendor/k8s.io/klog/v2:go_default_library", - "//vendor/k8s.io/utils/exec:go_default_library", ], ) @@ -60,3 +65,10 @@ filegroup( srcs = [":package-srcs"], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = ["utils_test.go"], + embed = [":go_default_library"], + deps = ["//test/e2e/framework/volume:go_default_library"], +) diff --git a/test/e2e/storage/utils/pod.go b/test/e2e/storage/utils/pod.go new file mode 100644 index 00000000000..fda7db07851 --- /dev/null +++ b/test/e2e/storage/utils/pod.go @@ -0,0 +1,182 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "context" + "fmt" + "regexp" + "strings" + "time" + + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" + e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" + "k8s.io/kubernetes/test/e2e/storage/podlogs" +) + +// StartPodLogs begins capturing log output and events from current +// and future pods running in the namespace of the framework. That +// ends when the returned cleanup function is called. +// +// The output goes to log files (when using --report-dir, as in the +// CI) or the output stream (otherwise). +func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func() { + ctx, cancel := context.WithCancel(context.Background()) + cs := f.ClientSet + + ns := driverNamespace.Name + + to := podlogs.LogOutput{ + StatusWriter: ginkgo.GinkgoWriter, + } + if framework.TestContext.ReportDir == "" { + to.LogWriter = ginkgo.GinkgoWriter + } else { + test := ginkgo.CurrentGinkgoTestDescription() + // Clean up each individual component text such that + // it contains only characters that are valid as file + // name. + reg := regexp.MustCompile("[^a-zA-Z0-9_-]+") + var components []string + for _, component := range test.ComponentTexts { + components = append(components, reg.ReplaceAllString(component, "_")) + } + // We end the prefix with a slash to ensure that all logs + // end up in a directory named after the current test. + // + // Each component name maps to a directory. This + // avoids cluttering the root artifact directory and + // keeps each directory name smaller (the full test + // name at one point exceeded 256 characters, which was + // too much for some filesystems). + to.LogPathPrefix = framework.TestContext.ReportDir + "/" + + strings.Join(components, "/") + "/" + } + podlogs.CopyAllLogs(ctx, cs, ns, to) + + // pod events are something that the framework already collects itself + // after a failed test. Logging them live is only useful for interactive + // debugging, not when we collect reports. + if framework.TestContext.ReportDir == "" { + podlogs.WatchPods(ctx, cs, ns, ginkgo.GinkgoWriter) + } + + return cancel +} + +// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits +// for the desired statues.. +// - First issues the command via `systemctl` +// - If `systemctl` returns stderr "command not found, issues the command via `service` +// - If `service` also returns stderr "command not found", the test is aborted. +// Allowed kubeletOps are `KStart`, `KStop`, and `KRestart` +func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) { + command := "" + systemctlPresent := false + kubeletPid := "" + + nodeIP, err := getHostAddress(c, pod) + framework.ExpectNoError(err) + nodeIP = nodeIP + ":22" + + framework.Logf("Checking if systemctl command is present") + sshResult, err := e2essh.SSH("systemctl --version", nodeIP, framework.TestContext.Provider) + framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName)) + if !strings.Contains(sshResult.Stderr, "command not found") { + command = fmt.Sprintf("systemctl %s kubelet", string(kOp)) + systemctlPresent = true + } else { + command = fmt.Sprintf("service kubelet %s", string(kOp)) + } + + sudoPresent := isSudoPresent(nodeIP, framework.TestContext.Provider) + if sudoPresent { + command = fmt.Sprintf("sudo %s", command) + } + + if kOp == KRestart { + kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent) + } + + framework.Logf("Attempting `%s`", command) + sshResult, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider) + framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName)) + e2essh.LogResult(sshResult) + gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult) + + if kOp == KStop { + if ok := e2enode.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok { + framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName) + } + } + if kOp == KRestart { + // Wait for a minute to check if kubelet Pid is getting changed + isPidChanged := false + for start := time.Now(); time.Since(start) < 1*time.Minute; time.Sleep(2 * time.Second) { + kubeletPidAfterRestart := getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent) + if kubeletPid != kubeletPidAfterRestart { + isPidChanged = true + break + } + } + framework.ExpectEqual(isPidChanged, true, "Kubelet PID remained unchanged after restarting Kubelet") + framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back") + time.Sleep(30 * time.Second) + } + if kOp == KStart || kOp == KRestart { + // For kubelet start and restart operations, Wait until Node becomes Ready + if ok := e2enode.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok { + framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName) + } + } +} + +// getHostAddress gets the node for a pod and returns the first +// address. Returns an error if the node the pod is on doesn't have an +// address. +func getHostAddress(client clientset.Interface, p *v1.Pod) (string, error) { + node, err := client.CoreV1().Nodes().Get(context.TODO(), p.Spec.NodeName, metav1.GetOptions{}) + if err != nil { + return "", err + } + // Try externalAddress first + for _, address := range node.Status.Addresses { + if address.Type == v1.NodeExternalIP { + if address.Address != "" { + return address.Address, nil + } + } + } + // If no externalAddress found, try internalAddress + for _, address := range node.Status.Addresses { + if address.Type == v1.NodeInternalIP { + if address.Address != "" { + return address.Address, nil + } + } + } + + // If not found, return error + return "", fmt.Errorf("No address for pod %v on node %v", + p.Name, p.Spec.NodeName) +} diff --git a/test/e2e/storage/utils/snapshot.go b/test/e2e/storage/utils/snapshot.go new file mode 100644 index 00000000000..a62ab06b645 --- /dev/null +++ b/test/e2e/storage/utils/snapshot.go @@ -0,0 +1,142 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "context" + "fmt" + "time" + + "github.com/onsi/ginkgo" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/storage/names" + "k8s.io/client-go/dynamic" + "k8s.io/kubernetes/test/e2e/framework" +) + +const ( + // SnapshotGroup is the snapshot CRD api group + SnapshotGroup = "snapshot.storage.k8s.io" + // SnapshotAPIVersion is the snapshot CRD api version + SnapshotAPIVersion = "snapshot.storage.k8s.io/v1" +) + +var ( + // SnapshotGVR is GroupVersionResource for volumesnapshots + SnapshotGVR = schema.GroupVersionResource{Group: SnapshotGroup, Version: "v1", Resource: "volumesnapshots"} + // SnapshotClassGVR is GroupVersionResource for volumesnapshotclasses + SnapshotClassGVR = schema.GroupVersionResource{Group: SnapshotGroup, Version: "v1", Resource: "volumesnapshotclasses"} + // SnapshotContentGVR is GroupVersionResource for volumesnapshotcontents + SnapshotContentGVR = schema.GroupVersionResource{Group: SnapshotGroup, Version: "v1", Resource: "volumesnapshotcontents"} +) + +// WaitForSnapshotReady waits for a VolumeSnapshot to be ready to use or until timeout occurs, whichever comes first. +func WaitForSnapshotReady(c dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error { + framework.Logf("Waiting up to %v for VolumeSnapshot %s to become ready", timeout, snapshotName) + + if successful := WaitUntil(poll, timeout, func() bool { + snapshot, err := c.Resource(SnapshotGVR).Namespace(ns).Get(context.TODO(), snapshotName, metav1.GetOptions{}) + if err != nil { + framework.Logf("Failed to get snapshot %q, retrying in %v. Error: %v", snapshotName, poll, err) + return false + } + + status := snapshot.Object["status"] + if status == nil { + framework.Logf("VolumeSnapshot %s found but is not ready.", snapshotName) + return false + } + value := status.(map[string]interface{}) + if value["readyToUse"] == true { + framework.Logf("VolumeSnapshot %s found and is ready", snapshotName) + return true + } + + framework.Logf("VolumeSnapshot %s found but is not ready.", snapshotName) + return false + }); successful { + return nil + } + + return fmt.Errorf("VolumeSnapshot %s is not ready within %v", snapshotName, timeout) +} + +// GetSnapshotContentFromSnapshot returns the VolumeSnapshotContent object Bound to a +// given VolumeSnapshot +func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured.Unstructured) *unstructured.Unstructured { + defer ginkgo.GinkgoRecover() + err := WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout) + framework.ExpectNoError(err) + + vs, err := dc.Resource(SnapshotGVR).Namespace(snapshot.GetNamespace()).Get(context.TODO(), snapshot.GetName(), metav1.GetOptions{}) + + snapshotStatus := vs.Object["status"].(map[string]interface{}) + snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string) + framework.Logf("received snapshotStatus %v", snapshotStatus) + framework.Logf("snapshotContentName %s", snapshotContentName) + framework.ExpectNoError(err) + + vscontent, err := dc.Resource(SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{}) + framework.ExpectNoError(err) + + return vscontent + +} + +// DeleteAndWaitSnapshot deletes a VolumeSnapshot and waits for it to be deleted or until timeout occurs, whichever comes first +func DeleteAndWaitSnapshot(dc dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error { + var err error + ginkgo.By("deleting the snapshot") + err = dc.Resource(SnapshotGVR).Namespace(ns).Delete(context.TODO(), snapshotName, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + + ginkgo.By("checking the Snapshot has been deleted") + err = WaitForNamespacedGVRDeletion(dc, SnapshotGVR, ns, snapshotName, poll, timeout) + + return err +} + +// GenerateSnapshotClassSpec constructs a new SnapshotClass instance spec +// with a unique name that is based on namespace + suffix. +func GenerateSnapshotClassSpec( + snapshotter string, + parameters map[string]string, + ns string, + suffix string, +) *unstructured.Unstructured { + snapshotClass := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "VolumeSnapshotClass", + "apiVersion": SnapshotAPIVersion, + "metadata": map[string]interface{}{ + // Name must be unique, so let's base it on namespace name and use GenerateName + // TODO(#96234): Remove unnecessary suffix. + "name": names.SimpleNameGenerator.GenerateName(ns + "-" + suffix), + }, + "driver": snapshotter, + "parameters": parameters, + "deletionPolicy": "Delete", + }, + } + + return snapshotClass +} diff --git a/test/e2e/storage/utils/utils.go b/test/e2e/storage/utils/utils.go index 0149dd1352d..958f84b7ed9 100644 --- a/test/e2e/storage/utils/utils.go +++ b/test/e2e/storage/utils/utils.go @@ -21,6 +21,7 @@ import ( "crypto/sha256" "encoding/base64" "fmt" + "math" "math/rand" "path/filepath" "strings" @@ -32,6 +33,7 @@ import ( v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -39,13 +41,11 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/dynamic" clientset "k8s.io/client-go/kubernetes" - clientexec "k8s.io/client-go/util/exec" "k8s.io/kubernetes/test/e2e/framework" - e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" imageutils "k8s.io/kubernetes/test/utils/image" - uexec "k8s.io/utils/exec" ) // KubeletOpt type definition @@ -59,7 +59,9 @@ const ( // KStop defines stop value KStop KubeletOpt = "stop" // KRestart defines restart value - KRestart KubeletOpt = "restart" + KRestart KubeletOpt = "restart" + minValidSize string = "1Ki" + maxValidSize string = "10Ei" ) const ( @@ -67,37 +69,10 @@ const ( podSecurityPolicyPrivilegedClusterRoleName = "e2e-test-privileged-psp" ) -// PodExec runs f.ExecCommandInContainerWithFullOutput to execute a shell cmd in target pod -func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string, error) { - if framework.NodeOSDistroIs("windows") { - return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "powershell", "/c", shExec) - } - return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec) - -} - -// VerifyExecInPodSucceed verifies shell cmd in target pod succeed -func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, shExec string) { - stdout, stderr, err := PodExec(f, pod, shExec) - if err != nil { - - if exiterr, ok := err.(uexec.CodeExitError); ok { - exitCode := exiterr.ExitStatus() - framework.ExpectNoError(err, - "%q should succeed, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s", - shExec, exitCode, exiterr, stdout, stderr) - } else { - framework.ExpectNoError(err, - "%q should succeed, but failed with error message %q\nstdout: %s\nstderr: %s", - shExec, err, stdout, stderr) - } - } -} - // VerifyFSGroupInPod verifies that the passed in filePath contains the expectedFSGroup func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string, pod *v1.Pod) { cmd := fmt.Sprintf("ls -l %s", filePath) - stdout, stderr, err := PodExec(f, pod, cmd) + stdout, stderr, err := e2evolume.PodExec(f, pod, cmd) framework.ExpectNoError(err) framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr) fsGroupResult := strings.Fields(stdout)[3] @@ -105,131 +80,6 @@ func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string "Expected fsGroup of %s, got %s", expectedFSGroup, fsGroupResult) } -// VerifyExecInPodFail verifies shell cmd in target pod fail with certain exit code -func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exitCode int) { - stdout, stderr, err := PodExec(f, pod, shExec) - if err != nil { - if exiterr, ok := err.(clientexec.ExitError); ok { - actualExitCode := exiterr.ExitStatus() - framework.ExpectEqual(actualExitCode, exitCode, - "%q should fail with exit code %d, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s", - shExec, exitCode, actualExitCode, exiterr, stdout, stderr) - } else { - framework.ExpectNoError(err, - "%q should fail with exit code %d, but failed with error message %q\nstdout: %s\nstderr: %s", - shExec, exitCode, err, stdout, stderr) - } - } - framework.ExpectError(err, "%q should fail with exit code %d, but exit without error", shExec, exitCode) -} - -func isSudoPresent(nodeIP string, provider string) bool { - framework.Logf("Checking if sudo command is present") - sshResult, err := e2essh.SSH("sudo --version", nodeIP, provider) - framework.ExpectNoError(err, "SSH to %q errored.", nodeIP) - if !strings.Contains(sshResult.Stderr, "command not found") { - return true - } - return false -} - -// getHostAddress gets the node for a pod and returns the first -// address. Returns an error if the node the pod is on doesn't have an -// address. -func getHostAddress(client clientset.Interface, p *v1.Pod) (string, error) { - node, err := client.CoreV1().Nodes().Get(context.TODO(), p.Spec.NodeName, metav1.GetOptions{}) - if err != nil { - return "", err - } - // Try externalAddress first - for _, address := range node.Status.Addresses { - if address.Type == v1.NodeExternalIP { - if address.Address != "" { - return address.Address, nil - } - } - } - // If no externalAddress found, try internalAddress - for _, address := range node.Status.Addresses { - if address.Type == v1.NodeInternalIP { - if address.Address != "" { - return address.Address, nil - } - } - } - - // If not found, return error - return "", fmt.Errorf("No address for pod %v on node %v", - p.Name, p.Spec.NodeName) -} - -// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits -// for the desired statues.. -// - First issues the command via `systemctl` -// - If `systemctl` returns stderr "command not found, issues the command via `service` -// - If `service` also returns stderr "command not found", the test is aborted. -// Allowed kubeletOps are `KStart`, `KStop`, and `KRestart` -func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) { - command := "" - systemctlPresent := false - kubeletPid := "" - - nodeIP, err := getHostAddress(c, pod) - framework.ExpectNoError(err) - nodeIP = nodeIP + ":22" - - framework.Logf("Checking if systemctl command is present") - sshResult, err := e2essh.SSH("systemctl --version", nodeIP, framework.TestContext.Provider) - framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName)) - if !strings.Contains(sshResult.Stderr, "command not found") { - command = fmt.Sprintf("systemctl %s kubelet", string(kOp)) - systemctlPresent = true - } else { - command = fmt.Sprintf("service kubelet %s", string(kOp)) - } - - sudoPresent := isSudoPresent(nodeIP, framework.TestContext.Provider) - if sudoPresent { - command = fmt.Sprintf("sudo %s", command) - } - - if kOp == KRestart { - kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent) - } - - framework.Logf("Attempting `%s`", command) - sshResult, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider) - framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName)) - e2essh.LogResult(sshResult) - gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult) - - if kOp == KStop { - if ok := e2enode.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok { - framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName) - } - } - if kOp == KRestart { - // Wait for a minute to check if kubelet Pid is getting changed - isPidChanged := false - for start := time.Now(); time.Since(start) < 1*time.Minute; time.Sleep(2 * time.Second) { - kubeletPidAfterRestart := getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent) - if kubeletPid != kubeletPidAfterRestart { - isPidChanged = true - break - } - } - framework.ExpectEqual(isPidChanged, true, "Kubelet PID remained unchanged after restarting Kubelet") - framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back") - time.Sleep(30 * time.Second) - } - if kOp == KStart || kOp == KRestart { - // For kubelet start and restart operations, Wait until Node becomes Ready - if ok := e2enode.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok { - framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName) - } - } -} - // getKubeletMainPid return the Main PID of the Kubelet Process func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) string { command := "" @@ -614,46 +464,39 @@ func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface, } } -// CheckVolumeModeOfPath check mode of volume -func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) { - if volMode == v1.PersistentVolumeBlock { - // Check if block exists - VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -b %s", path)) - - // Double check that it's not directory - VerifyExecInPodFail(f, pod, fmt.Sprintf("test -d %s", path), 1) - } else { - // Check if directory exists - VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -d %s", path)) - - // Double check that it's not block - VerifyExecInPodFail(f, pod, fmt.Sprintf("test -b %s", path), 1) +func isSudoPresent(nodeIP string, provider string) bool { + framework.Logf("Checking if sudo command is present") + sshResult, err := e2essh.SSH("sudo --version", nodeIP, provider) + framework.ExpectNoError(err, "SSH to %q errored.", nodeIP) + if !strings.Contains(sshResult.Stderr, "command not found") { + return true } + return false } // CheckReadWriteToPath check that path can b e read and written func CheckReadWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) { if volMode == v1.PersistentVolumeBlock { // random -> file1 - VerifyExecInPodSucceed(f, pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1") + e2evolume.VerifyExecInPodSucceed(f, pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1") // file1 -> dev (write to dev) - VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path)) + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path)) // dev -> file2 (read from dev) - VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path)) + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path)) // file1 == file2 (check contents) - VerifyExecInPodSucceed(f, pod, "diff /tmp/file1 /tmp/file2") + e2evolume.VerifyExecInPodSucceed(f, pod, "diff /tmp/file1 /tmp/file2") // Clean up temp files - VerifyExecInPodSucceed(f, pod, "rm -f /tmp/file1 /tmp/file2") + e2evolume.VerifyExecInPodSucceed(f, pod, "rm -f /tmp/file1 /tmp/file2") // Check that writing file to block volume fails - VerifyExecInPodFail(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1) + e2evolume.VerifyExecInPodFail(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1) } else { // text -> file1 (write to file) - VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path)) + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path)) // grep file1 (read from file and check contents) - VerifyExecInPodSucceed(f, pod, readFile("Hello word.", path)) + e2evolume.VerifyExecInPodSucceed(f, pod, readFile("Hello word.", path)) // Check that writing to directory as block volume fails - VerifyExecInPodFail(f, pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1) + e2evolume.VerifyExecInPodFail(f, pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1) } } @@ -699,8 +542,8 @@ func CheckReadFromPath(f *framework.Framework, pod *v1.Pod, volMode v1.Persisten sum := sha256.Sum256(genBinDataFromSeed(len, seed)) - VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum", pathForVolMode, iflag, len)) - VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, iflag, len, sum)) + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum", pathForVolMode, iflag, len)) + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, iflag, len, sum)) } // CheckWriteToPath that file can be properly written. @@ -724,8 +567,8 @@ func CheckWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.Persistent encoded := base64.StdEncoding.EncodeToString(genBinDataFromSeed(len, seed)) - VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded)) - VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s %s bs=%d count=1", encoded, pathForVolMode, oflag, len)) + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded)) + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s %s bs=%d count=1", encoded, pathForVolMode, oflag, len)) } // findMountPoints returns all mount points on given node under specified directory. @@ -866,7 +709,7 @@ func WaitForGVRFinalizer(ctx context.Context, c dynamic.Interface, gvr schema.Gr // VerifyFilePathGidInPod verfies expected GID of the target filepath func VerifyFilePathGidInPod(f *framework.Framework, filePath, expectedGid string, pod *v1.Pod) { cmd := fmt.Sprintf("ls -l %s", filePath) - stdout, stderr, err := PodExec(f, pod, cmd) + stdout, stderr, err := e2evolume.PodExec(f, pod, cmd) framework.ExpectNoError(err) framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr) ll := strings.Fields(stdout) @@ -877,7 +720,90 @@ func VerifyFilePathGidInPod(f *framework.Framework, filePath, expectedGid string // ChangeFilePathGidInPod changes the GID of the target filepath. func ChangeFilePathGidInPod(f *framework.Framework, filePath, targetGid string, pod *v1.Pod) { cmd := fmt.Sprintf("chgrp %s %s", targetGid, filePath) - _, _, err := PodExec(f, pod, cmd) + _, _, err := e2evolume.PodExec(f, pod, cmd) framework.ExpectNoError(err) VerifyFilePathGidInPod(f, filePath, targetGid, pod) } + +// DeleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found" +func DeleteStorageClass(cs clientset.Interface, className string) error { + err := cs.StorageV1().StorageClasses().Delete(context.TODO(), className, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + return nil +} + +// CreateVolumeSource creates a volume source object +func CreateVolumeSource(pvcName string, readOnly bool) *v1.VolumeSource { + return &v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvcName, + ReadOnly: readOnly, + }, + } +} + +// TryFunc try to execute the function and return err if there is any +func TryFunc(f func()) error { + var err error + if f == nil { + return nil + } + defer func() { + if recoverError := recover(); recoverError != nil { + err = fmt.Errorf("%v", recoverError) + } + }() + f() + return err +} + +// GetSizeRangesIntersection takes two instances of storage size ranges and determines the +// intersection of the intervals (if it exists) and return the minimum of the intersection +// to be used as the claim size for the test. +// if value not set, that means there's no minimum or maximum size limitation and we set default size for it. +func GetSizeRangesIntersection(first e2evolume.SizeRange, second e2evolume.SizeRange) (string, error) { + var firstMin, firstMax, secondMin, secondMax resource.Quantity + var err error + + //if SizeRange is not set, assign a minimum or maximum size + if len(first.Min) == 0 { + first.Min = minValidSize + } + if len(first.Max) == 0 { + first.Max = maxValidSize + } + if len(second.Min) == 0 { + second.Min = minValidSize + } + if len(second.Max) == 0 { + second.Max = maxValidSize + } + + if firstMin, err = resource.ParseQuantity(first.Min); err != nil { + return "", err + } + if firstMax, err = resource.ParseQuantity(first.Max); err != nil { + return "", err + } + if secondMin, err = resource.ParseQuantity(second.Min); err != nil { + return "", err + } + if secondMax, err = resource.ParseQuantity(second.Max); err != nil { + return "", err + } + + interSectionStart := math.Max(float64(firstMin.Value()), float64(secondMin.Value())) + intersectionEnd := math.Min(float64(firstMax.Value()), float64(secondMax.Value())) + + // the minimum of the intersection shall be returned as the claim size + var intersectionMin resource.Quantity + + if intersectionEnd-interSectionStart >= 0 { //have intersection + intersectionMin = *resource.NewQuantity(int64(interSectionStart), "BinarySI") //convert value to BinarySI format. E.g. 5Gi + // return the minimum of the intersection as the claim size + return intersectionMin.String(), nil + } + return "", fmt.Errorf("intersection of size ranges %+v, %+v is null", first, second) +} diff --git a/test/e2e/storage/testsuites/base_test.go b/test/e2e/storage/utils/utils_test.go similarity index 97% rename from test/e2e/storage/testsuites/base_test.go rename to test/e2e/storage/utils/utils_test.go index 1cb4eef32c0..f763f1c4d7f 100644 --- a/test/e2e/storage/testsuites/base_test.go +++ b/test/e2e/storage/utils/utils_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package testsuites +package utils import ( "testing" @@ -41,7 +41,7 @@ import ( // ----------------------------------------------------------------- // |min=?,max=?| #13 | #14 | #15 | #16 | // |---------------------------------------------------------------| -func Test_getSizeRangesIntersection(t *testing.T) { +func Test_GetSizeRangesIntersection(t *testing.T) { type args struct { first e2evolume.SizeRange second e2evolume.SizeRange @@ -463,13 +463,13 @@ func Test_getSizeRangesIntersection(t *testing.T) { }, } for _, tt := range tests { - got, err := getSizeRangesIntersection(tt.args.first, tt.args.second) + got, err := GetSizeRangesIntersection(tt.args.first, tt.args.second) if (err != nil) != tt.wantErr { - t.Errorf("%q. getSizeRangesIntersection() error = %v, wantErr %v", tt.name, err, tt.wantErr) + t.Errorf("%q. GetSizeRangesIntersection() error = %v, wantErr %v", tt.name, err, tt.wantErr) continue } if got != tt.want { - t.Errorf("%q. getSizeRangesIntersection() = %v, want %v", tt.name, got, tt.want) + t.Errorf("%q. GetSizeRangesIntersection() = %v, want %v", tt.name, got, tt.want) } } } diff --git a/test/e2e/upgrades/storage/BUILD b/test/e2e/upgrades/storage/BUILD index 5122664834e..543bc66faa9 100644 --- a/test/e2e/upgrades/storage/BUILD +++ b/test/e2e/upgrades/storage/BUILD @@ -21,6 +21,7 @@ go_library( "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pv:go_default_library", "//test/e2e/framework/skipper:go_default_library", + "//test/e2e/framework/volume:go_default_library", "//test/e2e/storage/utils:go_default_library", "//test/e2e/upgrades:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", diff --git a/test/e2e/upgrades/storage/volume_mode.go b/test/e2e/upgrades/storage/volume_mode.go index 1a89c4842cf..9ff187aefd9 100644 --- a/test/e2e/upgrades/storage/volume_mode.go +++ b/test/e2e/upgrades/storage/volume_mode.go @@ -27,7 +27,8 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" - "k8s.io/kubernetes/test/e2e/storage/utils" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" + storageutils "k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/upgrades" "github.com/onsi/ginkgo" @@ -104,10 +105,10 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) { framework.ExpectNoError(err) ginkgo.By("Checking if PV exists as expected volume mode") - utils.CheckVolumeModeOfPath(f, t.pod, block, devicePath) + e2evolume.CheckVolumeModeOfPath(f, t.pod, block, devicePath) ginkgo.By("Checking if read/write to PV works properly") - utils.CheckReadWriteToPath(f, t.pod, block, devicePath) + storageutils.CheckReadWriteToPath(f, t.pod, block, devicePath) } // Test waits for the downgrade to complete, and then verifies that a pod can no @@ -117,7 +118,7 @@ func (t *VolumeModeDowngradeTest) Test(f *framework.Framework, done <-chan struc <-done ginkgo.By("Verifying that nothing exists at the device path in the pod") - utils.VerifyExecInPodFail(f, t.pod, fmt.Sprintf("test -e %s", devicePath), 1) + e2evolume.VerifyExecInPodFail(f, t.pod, fmt.Sprintf("test -e %s", devicePath), 1) } // Teardown cleans up any remaining resources.