mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Add storage framework and address comments
This commit is contained in:
parent
988563f8f5
commit
356bea6c9f
@ -88,8 +88,8 @@ go_library(
|
||||
"//test/e2e/framework/statefulset:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/e2e/storage/api:go_default_library",
|
||||
"//test/e2e/storage/drivers:go_default_library",
|
||||
"//test/e2e/storage/framework:go_default_library",
|
||||
"//test/e2e/storage/testsuites:go_default_library",
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
@ -115,9 +115,9 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//test/e2e/storage/api:all-srcs",
|
||||
"//test/e2e/storage/drivers:all-srcs",
|
||||
"//test/e2e/storage/external:all-srcs",
|
||||
"//test/e2e/storage/framework:all-srcs",
|
||||
"//test/e2e/storage/podlogs:all-srcs",
|
||||
"//test/e2e/storage/testsuites:all-srcs",
|
||||
"//test/e2e/storage/utils:all-srcs",
|
||||
|
@ -51,8 +51,8 @@ import (
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -121,13 +121,13 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
|
||||
type mockDriverSetup struct {
|
||||
cs clientset.Interface
|
||||
config *storageapi.PerTestConfig
|
||||
config *storageframework.PerTestConfig
|
||||
testCleanups []func()
|
||||
pods []*v1.Pod
|
||||
pvcs []*v1.PersistentVolumeClaim
|
||||
sc map[string]*storagev1.StorageClass
|
||||
vsc map[string]*unstructured.Unstructured
|
||||
driver storageapi.TestDriver
|
||||
driver storageframework.TestDriver
|
||||
provisioner string
|
||||
tp testParameters
|
||||
}
|
||||
@ -189,7 +189,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
createPod := func(ephemeral bool) (class *storagev1.StorageClass, claim *v1.PersistentVolumeClaim, pod *v1.Pod) {
|
||||
ginkgo.By("Creating pod")
|
||||
var sc *storagev1.StorageClass
|
||||
if dDriver, ok := m.driver.(storageapi.DynamicPVTestDriver); ok {
|
||||
if dDriver, ok := m.driver.(storageframework.DynamicPVTestDriver); ok {
|
||||
sc = dDriver.GetDynamicProvisionStorageClass(m.config, "")
|
||||
}
|
||||
scTest := testsuites.StorageClassTest{
|
||||
@ -238,7 +238,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
ginkgo.By("Creating pod with fsGroup")
|
||||
nodeSelection := m.config.ClientNodeSelection
|
||||
var sc *storagev1.StorageClass
|
||||
if dDriver, ok := m.driver.(storageapi.DynamicPVTestDriver); ok {
|
||||
if dDriver, ok := m.driver.(storageframework.DynamicPVTestDriver); ok {
|
||||
sc = dDriver.GetDynamicProvisionStorageClass(m.config, "")
|
||||
}
|
||||
scTest := testsuites.StorageClassTest{
|
||||
@ -1247,7 +1247,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
enableSnapshot: true,
|
||||
javascriptHooks: scripts,
|
||||
})
|
||||
sDriver, ok := m.driver.(storageapi.SnapshottableTestDriver)
|
||||
sDriver, ok := m.driver.(storageframework.SnapshottableTestDriver)
|
||||
if !ok {
|
||||
e2eskipper.Skipf("mock driver %s does not support snapshots -- skipping", m.driver.GetDriverInfo().Name)
|
||||
|
||||
@ -1257,7 +1257,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
defer cleanup()
|
||||
|
||||
var sc *storagev1.StorageClass
|
||||
if dDriver, ok := m.driver.(storageapi.DynamicPVTestDriver); ok {
|
||||
if dDriver, ok := m.driver.(storageframework.DynamicPVTestDriver); ok {
|
||||
sc = dDriver.GetDynamicProvisionStorageClass(m.config, "")
|
||||
}
|
||||
ginkgo.By("Creating storage class")
|
||||
@ -1272,7 +1272,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
|
||||
ginkgo.By("Creating snapshot")
|
||||
// TODO: Test VolumeSnapshots with Retain policy
|
||||
snapshotClass, snapshot := storageapi.CreateSnapshot(sDriver, m.config, storageapi.DynamicSnapshotDelete, claim.Name, claim.Namespace, f.Timeouts)
|
||||
snapshotClass, snapshot := storageframework.CreateSnapshot(sDriver, m.config, storageframework.DynamicSnapshotDelete, claim.Name, claim.Namespace, f.Timeouts)
|
||||
framework.ExpectNoError(err, "failed to create snapshot")
|
||||
m.vsc[snapshotClass.GetName()] = snapshotClass
|
||||
volumeSnapshotName := snapshot.GetName()
|
||||
@ -1306,7 +1306,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Get VolumeSnapshotContent bound to VolumeSnapshot %s", snapshot.GetName()))
|
||||
snapshotContent := storageapi.GetSnapshotContentFromSnapshot(m.config.Framework.DynamicClient, snapshot)
|
||||
snapshotContent := utils.GetSnapshotContentFromSnapshot(m.config.Framework.DynamicClient, snapshot)
|
||||
volumeSnapshotContentName := snapshotContent.GetName()
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Verify VolumeSnapshotContent %s contains finalizer %s", snapshot.GetName(), volumeSnapshotContentFinalizer))
|
||||
@ -1336,7 +1336,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Delete VolumeSnapshot")
|
||||
err = storageapi.DeleteAndWaitSnapshot(m.config.Framework.DynamicClient, f.Namespace.Name, volumeSnapshotName, framework.Poll, framework.SnapshotDeleteTimeout)
|
||||
err = utils.DeleteAndWaitSnapshot(m.config.Framework.DynamicClient, f.Namespace.Name, volumeSnapshotName, framework.Poll, framework.SnapshotDeleteTimeout)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("failed to delete VolumeSnapshot %s", volumeSnapshotName))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Wait for VolumeSnapshotContent %s to be deleted", volumeSnapshotContentName))
|
||||
|
@ -17,8 +17,8 @@ limitations under the License.
|
||||
package storage
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/test/e2e/storage/api"
|
||||
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
|
||||
@ -26,7 +26,7 @@ import (
|
||||
)
|
||||
|
||||
// List of testDrivers to be executed in below loop
|
||||
var csiTestDrivers = []func() api.TestDriver{
|
||||
var csiTestDrivers = []func() storageframework.TestDriver{
|
||||
drivers.InitHostPathCSIDriver,
|
||||
drivers.InitGcePDCSIDriver,
|
||||
// Don't run tests with mock driver (drivers.InitMockCSIDriver), it does not provide persistent storage.
|
||||
@ -37,8 +37,8 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
|
||||
for _, initDriver := range csiTestDrivers {
|
||||
curDriver := initDriver()
|
||||
|
||||
ginkgo.Context(api.GetDriverNameWithFeatureTags(curDriver), func() {
|
||||
api.DefineTestSuites(curDriver, testsuites.CSISuites)
|
||||
ginkgo.Context(storageframework.GetDriverNameWithFeatureTags(curDriver), func() {
|
||||
storageframework.DefineTestSuites(curDriver, testsuites.CSISuites)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
@ -29,7 +29,7 @@ go_library(
|
||||
"//test/e2e/framework/pv:go_default_library",
|
||||
"//test/e2e/framework/skipper:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/e2e/storage/api:go_default_library",
|
||||
"//test/e2e/storage/framework:go_default_library",
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
"//test/e2e/storage/vsphere:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
|
@ -57,7 +57,7 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
@ -70,18 +70,18 @@ const (
|
||||
|
||||
// hostpathCSI
|
||||
type hostpathCSIDriver struct {
|
||||
driverInfo storageapi.DriverInfo
|
||||
driverInfo storageframework.DriverInfo
|
||||
manifests []string
|
||||
cleanupHandle framework.CleanupActionHandle
|
||||
volumeAttributes []map[string]string
|
||||
}
|
||||
|
||||
func initHostPathCSIDriver(name string, capabilities map[storageapi.Capability]bool, volumeAttributes []map[string]string, manifests ...string) storageapi.TestDriver {
|
||||
func initHostPathCSIDriver(name string, capabilities map[storageframework.Capability]bool, volumeAttributes []map[string]string, manifests ...string) storageframework.TestDriver {
|
||||
return &hostpathCSIDriver{
|
||||
driverInfo: storageapi.DriverInfo{
|
||||
driverInfo: storageframework.DriverInfo{
|
||||
Name: name,
|
||||
FeatureTag: "",
|
||||
MaxFileSize: storageapi.FileSizeMedium,
|
||||
MaxFileSize: storageframework.FileSizeMedium,
|
||||
SupportedFsType: sets.NewString(
|
||||
"", // Default fsType
|
||||
),
|
||||
@ -89,11 +89,11 @@ func initHostPathCSIDriver(name string, capabilities map[storageapi.Capability]b
|
||||
Min: "1Mi",
|
||||
},
|
||||
Capabilities: capabilities,
|
||||
StressTestOptions: &storageapi.StressTestOptions{
|
||||
StressTestOptions: &storageframework.StressTestOptions{
|
||||
NumPods: 10,
|
||||
NumRestarts: 10,
|
||||
},
|
||||
VolumeSnapshotStressTestOptions: &storageapi.VolumeSnapshotStressTestOptions{
|
||||
VolumeSnapshotStressTestOptions: &storageframework.VolumeSnapshotStressTestOptions{
|
||||
NumPods: 10,
|
||||
NumSnapshots: 10,
|
||||
},
|
||||
@ -103,22 +103,22 @@ func initHostPathCSIDriver(name string, capabilities map[storageapi.Capability]b
|
||||
}
|
||||
}
|
||||
|
||||
var _ storageapi.TestDriver = &hostpathCSIDriver{}
|
||||
var _ storageapi.DynamicPVTestDriver = &hostpathCSIDriver{}
|
||||
var _ storageapi.SnapshottableTestDriver = &hostpathCSIDriver{}
|
||||
var _ storageapi.EphemeralTestDriver = &hostpathCSIDriver{}
|
||||
var _ storageframework.TestDriver = &hostpathCSIDriver{}
|
||||
var _ storageframework.DynamicPVTestDriver = &hostpathCSIDriver{}
|
||||
var _ storageframework.SnapshottableTestDriver = &hostpathCSIDriver{}
|
||||
var _ storageframework.EphemeralTestDriver = &hostpathCSIDriver{}
|
||||
|
||||
// InitHostPathCSIDriver returns hostpathCSIDriver that implements TestDriver interface
|
||||
func InitHostPathCSIDriver() storageapi.TestDriver {
|
||||
capabilities := map[storageapi.Capability]bool{
|
||||
storageapi.CapPersistence: true,
|
||||
storageapi.CapSnapshotDataSource: true,
|
||||
storageapi.CapMultiPODs: true,
|
||||
storageapi.CapBlock: true,
|
||||
storageapi.CapPVCDataSource: true,
|
||||
storageapi.CapControllerExpansion: true,
|
||||
storageapi.CapSingleNodeVolume: true,
|
||||
storageapi.CapVolumeLimits: true,
|
||||
func InitHostPathCSIDriver() storageframework.TestDriver {
|
||||
capabilities := map[storageframework.Capability]bool{
|
||||
storageframework.CapPersistence: true,
|
||||
storageframework.CapSnapshotDataSource: true,
|
||||
storageframework.CapMultiPODs: true,
|
||||
storageframework.CapBlock: true,
|
||||
storageframework.CapPVCDataSource: true,
|
||||
storageframework.CapControllerExpansion: true,
|
||||
storageframework.CapSingleNodeVolume: true,
|
||||
storageframework.CapVolumeLimits: true,
|
||||
}
|
||||
return initHostPathCSIDriver("csi-hostpath",
|
||||
capabilities,
|
||||
@ -140,43 +140,43 @@ func InitHostPathCSIDriver() storageapi.TestDriver {
|
||||
)
|
||||
}
|
||||
|
||||
func (h *hostpathCSIDriver) GetDriverInfo() *storageapi.DriverInfo {
|
||||
func (h *hostpathCSIDriver) GetDriverInfo() *storageframework.DriverInfo {
|
||||
return &h.driverInfo
|
||||
}
|
||||
|
||||
func (h *hostpathCSIDriver) SkipUnsupportedTest(pattern storageapi.TestPattern) {
|
||||
if pattern.VolType == storageapi.CSIInlineVolume && len(h.volumeAttributes) == 0 {
|
||||
func (h *hostpathCSIDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
|
||||
if pattern.VolType == storageframework.CSIInlineVolume && len(h.volumeAttributes) == 0 {
|
||||
e2eskipper.Skipf("%s has no volume attributes defined, doesn't support ephemeral inline volumes", h.driverInfo.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(config *storageapi.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||
func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||
provisioner := config.GetUniqueDriverName()
|
||||
parameters := map[string]string{}
|
||||
ns := config.Framework.Namespace.Name
|
||||
suffix := fmt.Sprintf("%s-sc", provisioner)
|
||||
|
||||
return storageapi.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
||||
return storageframework.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
||||
}
|
||||
|
||||
func (h *hostpathCSIDriver) GetVolume(config *storageapi.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) {
|
||||
func (h *hostpathCSIDriver) GetVolume(config *storageframework.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) {
|
||||
return h.volumeAttributes[volumeNumber%len(h.volumeAttributes)], false /* not shared */, false /* read-write */
|
||||
}
|
||||
|
||||
func (h *hostpathCSIDriver) GetCSIDriverName(config *storageapi.PerTestConfig) string {
|
||||
func (h *hostpathCSIDriver) GetCSIDriverName(config *storageframework.PerTestConfig) string {
|
||||
return config.GetUniqueDriverName()
|
||||
}
|
||||
|
||||
func (h *hostpathCSIDriver) GetSnapshotClass(config *storageapi.PerTestConfig) *unstructured.Unstructured {
|
||||
func (h *hostpathCSIDriver) GetSnapshotClass(config *storageframework.PerTestConfig) *unstructured.Unstructured {
|
||||
snapshotter := config.GetUniqueDriverName()
|
||||
parameters := map[string]string{}
|
||||
ns := config.Framework.Namespace.Name
|
||||
suffix := fmt.Sprintf("%s-vsc", snapshotter)
|
||||
|
||||
return storageapi.GetSnapshotClass(snapshotter, parameters, ns, suffix)
|
||||
return utils.GenerateSnapshotClassSpec(snapshotter, parameters, ns, suffix)
|
||||
}
|
||||
|
||||
func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*storageapi.PerTestConfig, func()) {
|
||||
func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) {
|
||||
// Create secondary namespace which will be used for creating driver
|
||||
driverNamespace := utils.CreateDriverNamespace(f)
|
||||
ns2 := driverNamespace.Name
|
||||
@ -189,7 +189,7 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*storageapi.Per
|
||||
// The hostpath CSI driver only works when everything runs on the same node.
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(cs)
|
||||
framework.ExpectNoError(err)
|
||||
config := &storageapi.PerTestConfig{
|
||||
config := &storageframework.PerTestConfig{
|
||||
Driver: h,
|
||||
Prefix: "hostpath",
|
||||
Framework: f,
|
||||
@ -242,7 +242,7 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*storageapi.Per
|
||||
|
||||
// mockCSI
|
||||
type mockCSIDriver struct {
|
||||
driverInfo storageapi.DriverInfo
|
||||
driverInfo storageframework.DriverInfo
|
||||
manifests []string
|
||||
podInfo *bool
|
||||
storageCapacity *bool
|
||||
@ -274,12 +274,12 @@ type CSIMockDriverOpts struct {
|
||||
FSGroupPolicy *storagev1.FSGroupPolicy
|
||||
}
|
||||
|
||||
var _ storageapi.TestDriver = &mockCSIDriver{}
|
||||
var _ storageapi.DynamicPVTestDriver = &mockCSIDriver{}
|
||||
var _ storageapi.SnapshottableTestDriver = &mockCSIDriver{}
|
||||
var _ storageframework.TestDriver = &mockCSIDriver{}
|
||||
var _ storageframework.DynamicPVTestDriver = &mockCSIDriver{}
|
||||
var _ storageframework.SnapshottableTestDriver = &mockCSIDriver{}
|
||||
|
||||
// InitMockCSIDriver returns a mockCSIDriver that implements TestDriver interface
|
||||
func InitMockCSIDriver(driverOpts CSIMockDriverOpts) storageapi.TestDriver {
|
||||
func InitMockCSIDriver(driverOpts CSIMockDriverOpts) storageframework.TestDriver {
|
||||
driverManifests := []string{
|
||||
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
|
||||
@ -307,18 +307,18 @@ func InitMockCSIDriver(driverOpts CSIMockDriverOpts) storageapi.TestDriver {
|
||||
}
|
||||
|
||||
return &mockCSIDriver{
|
||||
driverInfo: storageapi.DriverInfo{
|
||||
driverInfo: storageframework.DriverInfo{
|
||||
Name: "csi-mock",
|
||||
FeatureTag: "",
|
||||
MaxFileSize: storageapi.FileSizeMedium,
|
||||
MaxFileSize: storageframework.FileSizeMedium,
|
||||
SupportedFsType: sets.NewString(
|
||||
"", // Default fsType
|
||||
),
|
||||
Capabilities: map[storageapi.Capability]bool{
|
||||
storageapi.CapPersistence: false,
|
||||
storageapi.CapFsGroup: false,
|
||||
storageapi.CapExec: false,
|
||||
storageapi.CapVolumeLimits: true,
|
||||
Capabilities: map[storageframework.Capability]bool{
|
||||
storageframework.CapPersistence: false,
|
||||
storageframework.CapFsGroup: false,
|
||||
storageframework.CapExec: false,
|
||||
storageframework.CapVolumeLimits: true,
|
||||
},
|
||||
},
|
||||
manifests: driverManifests,
|
||||
@ -335,32 +335,32 @@ func InitMockCSIDriver(driverOpts CSIMockDriverOpts) storageapi.TestDriver {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mockCSIDriver) GetDriverInfo() *storageapi.DriverInfo {
|
||||
func (m *mockCSIDriver) GetDriverInfo() *storageframework.DriverInfo {
|
||||
return &m.driverInfo
|
||||
}
|
||||
|
||||
func (m *mockCSIDriver) SkipUnsupportedTest(pattern storageapi.TestPattern) {
|
||||
func (m *mockCSIDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
|
||||
}
|
||||
|
||||
func (m *mockCSIDriver) GetDynamicProvisionStorageClass(config *storageapi.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||
func (m *mockCSIDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||
provisioner := config.GetUniqueDriverName()
|
||||
parameters := map[string]string{}
|
||||
ns := config.Framework.Namespace.Name
|
||||
suffix := fmt.Sprintf("%s-sc", provisioner)
|
||||
|
||||
return storageapi.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
||||
return storageframework.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
||||
}
|
||||
|
||||
func (m *mockCSIDriver) GetSnapshotClass(config *storageapi.PerTestConfig) *unstructured.Unstructured {
|
||||
func (m *mockCSIDriver) GetSnapshotClass(config *storageframework.PerTestConfig) *unstructured.Unstructured {
|
||||
parameters := map[string]string{}
|
||||
snapshotter := m.driverInfo.Name + "-" + config.Framework.UniqueName
|
||||
ns := config.Framework.Namespace.Name
|
||||
suffix := fmt.Sprintf("%s-vsc", snapshotter)
|
||||
|
||||
return storageapi.GetSnapshotClass(snapshotter, parameters, ns, suffix)
|
||||
return utils.GenerateSnapshotClassSpec(snapshotter, parameters, ns, suffix)
|
||||
}
|
||||
|
||||
func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*storageapi.PerTestConfig, func()) {
|
||||
func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) {
|
||||
// Create secondary namespace which will be used for creating driver
|
||||
driverNamespace := utils.CreateDriverNamespace(f)
|
||||
ns2 := driverNamespace.Name
|
||||
@ -373,7 +373,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*storageapi.PerTest
|
||||
// pods should be scheduled on the node
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(cs)
|
||||
framework.ExpectNoError(err)
|
||||
config := &storageapi.PerTestConfig{
|
||||
config := &storageframework.PerTestConfig{
|
||||
Driver: m,
|
||||
Prefix: "mock",
|
||||
Framework: f,
|
||||
@ -480,21 +480,21 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*storageapi.PerTest
|
||||
|
||||
// gce-pd
|
||||
type gcePDCSIDriver struct {
|
||||
driverInfo storageapi.DriverInfo
|
||||
driverInfo storageframework.DriverInfo
|
||||
cleanupHandle framework.CleanupActionHandle
|
||||
}
|
||||
|
||||
var _ storageapi.TestDriver = &gcePDCSIDriver{}
|
||||
var _ storageapi.DynamicPVTestDriver = &gcePDCSIDriver{}
|
||||
var _ storageapi.SnapshottableTestDriver = &gcePDCSIDriver{}
|
||||
var _ storageframework.TestDriver = &gcePDCSIDriver{}
|
||||
var _ storageframework.DynamicPVTestDriver = &gcePDCSIDriver{}
|
||||
var _ storageframework.SnapshottableTestDriver = &gcePDCSIDriver{}
|
||||
|
||||
// InitGcePDCSIDriver returns gcePDCSIDriver that implements TestDriver interface
|
||||
func InitGcePDCSIDriver() storageapi.TestDriver {
|
||||
func InitGcePDCSIDriver() storageframework.TestDriver {
|
||||
return &gcePDCSIDriver{
|
||||
driverInfo: storageapi.DriverInfo{
|
||||
driverInfo: storageframework.DriverInfo{
|
||||
Name: GCEPDCSIDriverName,
|
||||
FeatureTag: "[Serial]",
|
||||
MaxFileSize: storageapi.FileSizeMedium,
|
||||
MaxFileSize: storageframework.FileSizeMedium,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
Min: "5Gi",
|
||||
},
|
||||
@ -506,27 +506,27 @@ func InitGcePDCSIDriver() storageapi.TestDriver {
|
||||
"xfs",
|
||||
),
|
||||
SupportedMountOption: sets.NewString("debug", "nouid32"),
|
||||
Capabilities: map[storageapi.Capability]bool{
|
||||
storageapi.CapPersistence: true,
|
||||
storageapi.CapBlock: true,
|
||||
storageapi.CapFsGroup: true,
|
||||
storageapi.CapExec: true,
|
||||
storageapi.CapMultiPODs: true,
|
||||
Capabilities: map[storageframework.Capability]bool{
|
||||
storageframework.CapPersistence: true,
|
||||
storageframework.CapBlock: true,
|
||||
storageframework.CapFsGroup: true,
|
||||
storageframework.CapExec: true,
|
||||
storageframework.CapMultiPODs: true,
|
||||
// GCE supports volume limits, but the test creates large
|
||||
// number of volumes and times out test suites.
|
||||
storageapi.CapVolumeLimits: false,
|
||||
storageapi.CapTopology: true,
|
||||
storageapi.CapControllerExpansion: true,
|
||||
storageapi.CapNodeExpansion: true,
|
||||
storageapi.CapSnapshotDataSource: true,
|
||||
storageframework.CapVolumeLimits: false,
|
||||
storageframework.CapTopology: true,
|
||||
storageframework.CapControllerExpansion: true,
|
||||
storageframework.CapNodeExpansion: true,
|
||||
storageframework.CapSnapshotDataSource: true,
|
||||
},
|
||||
RequiredAccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
TopologyKeys: []string{GCEPDCSIZoneTopologyKey},
|
||||
StressTestOptions: &storageapi.StressTestOptions{
|
||||
StressTestOptions: &storageframework.StressTestOptions{
|
||||
NumPods: 10,
|
||||
NumRestarts: 10,
|
||||
},
|
||||
VolumeSnapshotStressTestOptions: &storageapi.VolumeSnapshotStressTestOptions{
|
||||
VolumeSnapshotStressTestOptions: &storageframework.VolumeSnapshotStressTestOptions{
|
||||
// GCE only allows for one snapshot per volume to be created at a time,
|
||||
// which can cause test timeouts. We reduce the likelihood of test timeouts
|
||||
// by increasing the number of pods (and volumes) and reducing the number
|
||||
@ -538,11 +538,11 @@ func InitGcePDCSIDriver() storageapi.TestDriver {
|
||||
}
|
||||
}
|
||||
|
||||
func (g *gcePDCSIDriver) GetDriverInfo() *storageapi.DriverInfo {
|
||||
func (g *gcePDCSIDriver) GetDriverInfo() *storageframework.DriverInfo {
|
||||
return &g.driverInfo
|
||||
}
|
||||
|
||||
func (g *gcePDCSIDriver) SkipUnsupportedTest(pattern storageapi.TestPattern) {
|
||||
func (g *gcePDCSIDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
|
||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
||||
if pattern.FsType == "xfs" {
|
||||
e2eskipper.SkipUnlessNodeOSDistroIs("ubuntu", "custom")
|
||||
@ -552,7 +552,7 @@ func (g *gcePDCSIDriver) SkipUnsupportedTest(pattern storageapi.TestPattern) {
|
||||
}
|
||||
}
|
||||
|
||||
func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(config *storageapi.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||
func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||
ns := config.Framework.Namespace.Name
|
||||
provisioner := g.driverInfo.Name
|
||||
suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name)
|
||||
@ -563,19 +563,19 @@ func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(config *storageapi.PerT
|
||||
}
|
||||
delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer
|
||||
|
||||
return storageapi.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix)
|
||||
return storageframework.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix)
|
||||
}
|
||||
|
||||
func (g *gcePDCSIDriver) GetSnapshotClass(config *storageapi.PerTestConfig) *unstructured.Unstructured {
|
||||
func (g *gcePDCSIDriver) GetSnapshotClass(config *storageframework.PerTestConfig) *unstructured.Unstructured {
|
||||
parameters := map[string]string{}
|
||||
snapshotter := g.driverInfo.Name
|
||||
ns := config.Framework.Namespace.Name
|
||||
suffix := fmt.Sprintf("%s-vsc", snapshotter)
|
||||
|
||||
return storageapi.GetSnapshotClass(snapshotter, parameters, ns, suffix)
|
||||
return utils.GenerateSnapshotClassSpec(snapshotter, parameters, ns, suffix)
|
||||
}
|
||||
|
||||
func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*storageapi.PerTestConfig, func()) {
|
||||
func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) {
|
||||
ginkgo.By("deploying csi gce-pd driver")
|
||||
// Create secondary namespace which will be used for creating driver
|
||||
driverNamespace := utils.CreateDriverNamespace(f)
|
||||
@ -591,7 +591,7 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*storageapi.PerTes
|
||||
// These are the options which would have to be used:
|
||||
// o := utils.PatchCSIOptions{
|
||||
// OldDriverName: g.driverInfo.Name,
|
||||
// NewDriverName: storageapi.GetUniqueDriverName(g),
|
||||
// NewDriverName: storageframework.GetUniqueDriverName(g),
|
||||
// DriverContainerName: "gce-driver",
|
||||
// ProvisionerContainerName: "csi-external-provisioner",
|
||||
// }
|
||||
@ -637,7 +637,7 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*storageapi.PerTes
|
||||
}
|
||||
g.cleanupHandle = framework.AddCleanupAction(cleanupFunc)
|
||||
|
||||
return &storageapi.PerTestConfig{
|
||||
return &storageframework.PerTestConfig{
|
||||
Driver: g,
|
||||
Prefix: "gcepd",
|
||||
Framework: f,
|
||||
|
File diff suppressed because it is too large
Load Diff
4
test/e2e/storage/external/BUILD
vendored
4
test/e2e/storage/external/BUILD
vendored
@ -19,7 +19,7 @@ go_library(
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/skipper:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/e2e/storage/api:go_default_library",
|
||||
"//test/e2e/storage/framework:go_default_library",
|
||||
"//test/e2e/storage/testsuites:go_default_library",
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
@ -36,7 +36,7 @@ go_test(
|
||||
deps = [
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/e2e/storage/api:go_default_library",
|
||||
"//test/e2e/storage/framework:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
],
|
||||
)
|
||||
|
44
test/e2e/storage/external/external.go
vendored
44
test/e2e/storage/external/external.go
vendored
@ -39,7 +39,7 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
|
||||
@ -55,7 +55,7 @@ type driverDefinition struct {
|
||||
// for details. The only field with a non-zero default is the list of
|
||||
// supported file systems (SupportedFsType): it is set so that tests using
|
||||
// the default file system are enabled.
|
||||
DriverInfo storageapi.DriverInfo
|
||||
DriverInfo storageframework.DriverInfo
|
||||
|
||||
// StorageClass must be set to enable dynamic provisioning tests.
|
||||
// The default is to not run those tests.
|
||||
@ -171,9 +171,9 @@ func AddDriverDefinition(filename string) error {
|
||||
return errors.Errorf("%q: DriverInfo.Name not set", filename)
|
||||
}
|
||||
|
||||
description := "External Storage " + storageapi.GetDriverNameWithFeatureTags(driver)
|
||||
description := "External Storage " + storageframework.GetDriverNameWithFeatureTags(driver)
|
||||
ginkgo.Describe(description, func() {
|
||||
storageapi.DefineTestSuites(driver, testsuites.CSISuites)
|
||||
storageframework.DefineTestSuites(driver, testsuites.CSISuites)
|
||||
})
|
||||
|
||||
return nil
|
||||
@ -189,7 +189,7 @@ func loadDriverDefinition(filename string) (*driverDefinition, error) {
|
||||
}
|
||||
// Some reasonable defaults follow.
|
||||
driver := &driverDefinition{
|
||||
DriverInfo: storageapi.DriverInfo{
|
||||
DriverInfo: storageframework.DriverInfo{
|
||||
SupportedFsType: sets.NewString(
|
||||
"", // Default fsType
|
||||
),
|
||||
@ -206,20 +206,20 @@ func loadDriverDefinition(filename string) (*driverDefinition, error) {
|
||||
return driver, nil
|
||||
}
|
||||
|
||||
var _ storageapi.TestDriver = &driverDefinition{}
|
||||
var _ storageframework.TestDriver = &driverDefinition{}
|
||||
|
||||
// We have to implement the interface because dynamic PV may or may
|
||||
// not be supported. driverDefinition.SkipUnsupportedTest checks that
|
||||
// based on the actual driver definition.
|
||||
var _ storageapi.DynamicPVTestDriver = &driverDefinition{}
|
||||
var _ storageframework.DynamicPVTestDriver = &driverDefinition{}
|
||||
|
||||
// Same for snapshotting.
|
||||
var _ storageapi.SnapshottableTestDriver = &driverDefinition{}
|
||||
var _ storageframework.SnapshottableTestDriver = &driverDefinition{}
|
||||
|
||||
// And for ephemeral volumes.
|
||||
var _ storageapi.EphemeralTestDriver = &driverDefinition{}
|
||||
var _ storageframework.EphemeralTestDriver = &driverDefinition{}
|
||||
|
||||
var _ storageapi.CustomTimeoutsTestDriver = &driverDefinition{}
|
||||
var _ storageframework.CustomTimeoutsTestDriver = &driverDefinition{}
|
||||
|
||||
// runtime.DecodeInto needs a runtime.Object but doesn't do any
|
||||
// deserialization of it and therefore none of the methods below need
|
||||
@ -234,21 +234,21 @@ func (d *driverDefinition) GetObjectKind() schema.ObjectKind {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driverDefinition) GetDriverInfo() *storageapi.DriverInfo {
|
||||
func (d *driverDefinition) GetDriverInfo() *storageframework.DriverInfo {
|
||||
return &d.DriverInfo
|
||||
}
|
||||
|
||||
func (d *driverDefinition) SkipUnsupportedTest(pattern storageapi.TestPattern) {
|
||||
func (d *driverDefinition) SkipUnsupportedTest(pattern storageframework.TestPattern) {
|
||||
supported := false
|
||||
// TODO (?): add support for more volume types
|
||||
switch pattern.VolType {
|
||||
case "":
|
||||
supported = true
|
||||
case storageapi.DynamicPV:
|
||||
case storageframework.DynamicPV:
|
||||
if d.StorageClass.FromName || d.StorageClass.FromFile != "" || d.StorageClass.FromExistingClassName != "" {
|
||||
supported = true
|
||||
}
|
||||
case storageapi.CSIInlineVolume:
|
||||
case storageframework.CSIInlineVolume:
|
||||
supported = len(d.InlineVolumes) != 0
|
||||
}
|
||||
if !supported {
|
||||
@ -257,7 +257,7 @@ func (d *driverDefinition) SkipUnsupportedTest(pattern storageapi.TestPattern) {
|
||||
|
||||
}
|
||||
|
||||
func (d *driverDefinition) GetDynamicProvisionStorageClass(e2econfig *storageapi.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||
func (d *driverDefinition) GetDynamicProvisionStorageClass(e2econfig *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||
var (
|
||||
sc *storagev1.StorageClass
|
||||
err error
|
||||
@ -295,7 +295,7 @@ func (d *driverDefinition) GetDynamicProvisionStorageClass(e2econfig *storageapi
|
||||
// reconsidered if we eventually need to move in-tree storage tests out.
|
||||
sc.Parameters["csi.storage.k8s.io/fstype"] = fsType
|
||||
}
|
||||
return storageapi.CopyStorageClass(sc, f.Namespace.Name, "e2e-sc")
|
||||
return storageframework.CopyStorageClass(sc, f.Namespace.Name, "e2e-sc")
|
||||
}
|
||||
|
||||
func (d *driverDefinition) GetTimeouts() *framework.TimeoutContext {
|
||||
@ -348,7 +348,7 @@ func loadSnapshotClass(filename string) (*unstructured.Unstructured, error) {
|
||||
return snapshotClass, nil
|
||||
}
|
||||
|
||||
func (d *driverDefinition) GetSnapshotClass(e2econfig *storageapi.PerTestConfig) *unstructured.Unstructured {
|
||||
func (d *driverDefinition) GetSnapshotClass(e2econfig *storageframework.PerTestConfig) *unstructured.Unstructured {
|
||||
if !d.SnapshotClass.FromName && d.SnapshotClass.FromFile == "" && d.SnapshotClass.FromExistingClassName == "" {
|
||||
e2eskipper.Skipf("Driver %q does not support snapshotting - skipping", d.DriverInfo.Name)
|
||||
}
|
||||
@ -390,10 +390,10 @@ func (d *driverDefinition) GetSnapshotClass(e2econfig *storageapi.PerTestConfig)
|
||||
}
|
||||
}
|
||||
|
||||
return storageapi.GetSnapshotClass(snapshotter, parameters, ns, suffix)
|
||||
return utils.GenerateSnapshotClassSpec(snapshotter, parameters, ns, suffix)
|
||||
}
|
||||
|
||||
func (d *driverDefinition) GetVolume(e2econfig *storageapi.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) {
|
||||
func (d *driverDefinition) GetVolume(e2econfig *storageframework.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) {
|
||||
if len(d.InlineVolumes) == 0 {
|
||||
e2eskipper.Skipf("%s does not have any InlineVolumeAttributes defined", d.DriverInfo.Name)
|
||||
}
|
||||
@ -401,12 +401,12 @@ func (d *driverDefinition) GetVolume(e2econfig *storageapi.PerTestConfig, volume
|
||||
return e2evolume.Attributes, e2evolume.Shared, e2evolume.ReadOnly
|
||||
}
|
||||
|
||||
func (d *driverDefinition) GetCSIDriverName(e2econfig *storageapi.PerTestConfig) string {
|
||||
func (d *driverDefinition) GetCSIDriverName(e2econfig *storageframework.PerTestConfig) string {
|
||||
return d.DriverInfo.Name
|
||||
}
|
||||
|
||||
func (d *driverDefinition) PrepareTest(f *framework.Framework) (*storageapi.PerTestConfig, func()) {
|
||||
e2econfig := &storageapi.PerTestConfig{
|
||||
func (d *driverDefinition) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) {
|
||||
e2econfig := &storageframework.PerTestConfig{
|
||||
Driver: d,
|
||||
Prefix: "external",
|
||||
Framework: f,
|
||||
|
4
test/e2e/storage/external/external_test.go
vendored
4
test/e2e/storage/external/external_test.go
vendored
@ -23,12 +23,12 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
)
|
||||
|
||||
func TestDriverParameter(t *testing.T) {
|
||||
expected := &driverDefinition{
|
||||
DriverInfo: storageapi.DriverInfo{
|
||||
DriverInfo: storageframework.DriverInfo{
|
||||
Name: "foo.example.com",
|
||||
SupportedFsType: sets.NewString(
|
||||
"", // Default fsType
|
||||
|
@ -3,15 +3,15 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"driveroperations.go",
|
||||
"snapshotresource.go",
|
||||
"driver_operations.go",
|
||||
"snapshot_resource.go",
|
||||
"testconfig.go",
|
||||
"testdriver.go",
|
||||
"testpattern.go",
|
||||
"testsuite.go",
|
||||
"volumeresource.go",
|
||||
"volume_resource.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/storage/api",
|
||||
importpath = "k8s.io/kubernetes/test/e2e/storage/framework",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
@ -25,7 +25,6 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/pv:go_default_library",
|
@ -14,17 +14,15 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
// GetDriverNameWithFeatureTags returns driver name with feature tags
|
||||
@ -89,29 +87,3 @@ func GetStorageClass(
|
||||
VolumeBindingMode: bindingMode,
|
||||
}
|
||||
}
|
||||
|
||||
// GetSnapshotClass constructs a new SnapshotClass instance
|
||||
// with a unique name that is based on namespace + suffix.
|
||||
func GetSnapshotClass(
|
||||
snapshotter string,
|
||||
parameters map[string]string,
|
||||
ns string,
|
||||
suffix string,
|
||||
) *unstructured.Unstructured {
|
||||
snapshotClass := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": "VolumeSnapshotClass",
|
||||
"apiVersion": utils.SnapshotAPIVersion,
|
||||
"metadata": map[string]interface{}{
|
||||
// Name must be unique, so let's base it on namespace name and use GenerateName
|
||||
// TODO(#96234): Remove unnecessary suffix.
|
||||
"name": names.SimpleNameGenerator.GenerateName(ns + "-" + suffix),
|
||||
},
|
||||
"driver": snapshotter,
|
||||
"parameters": parameters,
|
||||
"deletionPolicy": "Delete",
|
||||
},
|
||||
}
|
||||
|
||||
return snapshotClass
|
||||
}
|
@ -14,12 +14,11 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
@ -29,7 +28,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
@ -79,28 +77,6 @@ func CreateSnapshot(sDriver SnapshottableTestDriver, config *PerTestConfig, patt
|
||||
return sclass, snapshot
|
||||
}
|
||||
|
||||
// GetSnapshotContentFromSnapshot returns the VolumeSnapshotContent object Bound to a
|
||||
// given VolumeSnapshot
|
||||
func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured.Unstructured) *unstructured.Unstructured {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
err := utils.WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
vs, err := dc.Resource(utils.SnapshotGVR).Namespace(snapshot.GetNamespace()).Get(context.TODO(), snapshot.GetName(), metav1.GetOptions{})
|
||||
|
||||
snapshotStatus := vs.Object["status"].(map[string]interface{})
|
||||
snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string)
|
||||
framework.Logf("received snapshotStatus %v", snapshotStatus)
|
||||
framework.Logf("snapshotContentName %s", snapshotContentName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
vscontent, err := dc.Resource(utils.SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
return vscontent
|
||||
|
||||
}
|
||||
|
||||
// CreateSnapshotResource creates a snapshot resource for the current test. It knows how to deal with
|
||||
// different test pattern snapshot provisioning and deletion policy
|
||||
func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern TestPattern, pvcName string, pvcNamespace string, timeouts *framework.TimeoutContext) *SnapshotResource {
|
||||
@ -113,7 +89,7 @@ func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConf
|
||||
|
||||
dc := r.Config.Framework.DynamicClient
|
||||
|
||||
r.Vscontent = GetSnapshotContentFromSnapshot(dc, r.Vs)
|
||||
r.Vscontent = utils.GetSnapshotContentFromSnapshot(dc, r.Vs)
|
||||
|
||||
if pattern.SnapshotType == PreprovisionedCreatedSnapshot {
|
||||
// prepare a pre-provisioned VolumeSnapshotContent with certain data
|
||||
@ -284,21 +260,6 @@ func (sr *SnapshotResource) CleanupResource(timeouts *framework.TimeoutContext)
|
||||
return utilerrors.NewAggregate(cleanupErrs)
|
||||
}
|
||||
|
||||
// DeleteAndWaitSnapshot deletes a VolumeSnapshot and waits for it to be deleted or until timeout occurs, whichever comes first
|
||||
func DeleteAndWaitSnapshot(dc dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error {
|
||||
var err error
|
||||
ginkgo.By("deleting the snapshot")
|
||||
err = dc.Resource(utils.SnapshotGVR).Namespace(ns).Delete(context.TODO(), snapshotName, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
ginkgo.By("checking the Snapshot has been deleted")
|
||||
err = utils.WaitForNamespacedGVRDeletion(dc, utils.SnapshotGVR, ns, snapshotName, poll, timeout)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func getSnapshot(claimName string, ns, snapshotClassName string) *unstructured.Unstructured {
|
||||
snapshot := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package framework
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package framework
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package framework
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
@ -18,14 +18,14 @@ package storage
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo"
|
||||
"k8s.io/kubernetes/test/e2e/storage/api"
|
||||
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
// List of testDrivers to be executed in below loop
|
||||
var testDrivers = []func() api.TestDriver{
|
||||
var testDrivers = []func() storageframework.TestDriver{
|
||||
drivers.InitNFSDriver,
|
||||
drivers.InitGlusterFSDriver,
|
||||
drivers.InitISCSIDriver,
|
||||
@ -55,8 +55,8 @@ var _ = utils.SIGDescribe("In-tree Volumes", func() {
|
||||
for _, initDriver := range testDrivers {
|
||||
curDriver := initDriver()
|
||||
|
||||
ginkgo.Context(api.GetDriverNameWithFeatureTags(curDriver), func() {
|
||||
api.DefineTestSuites(curDriver, testsuites.BaseSuites)
|
||||
ginkgo.Context(storageframework.GetDriverNameWithFeatureTags(curDriver), func() {
|
||||
storageframework.DefineTestSuites(curDriver, testsuites.BaseSuites)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
@ -51,7 +51,7 @@ go_library(
|
||||
"//test/e2e/framework/pv:go_default_library",
|
||||
"//test/e2e/framework/skipper:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/e2e/storage/api:go_default_library",
|
||||
"//test/e2e/storage/framework:go_default_library",
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
)
|
||||
|
||||
var migratedPlugins *string
|
||||
@ -52,7 +52,7 @@ type migrationOpCheck struct {
|
||||
}
|
||||
|
||||
// BaseSuites is a list of storage test suites that work for in-tree and CSI drivers
|
||||
var BaseSuites = []func() storageapi.TestSuite{
|
||||
var BaseSuites = []func() storageframework.TestSuite{
|
||||
InitVolumesTestSuite,
|
||||
InitVolumeIOTestSuite,
|
||||
InitVolumeModeTestSuite,
|
||||
@ -225,8 +225,8 @@ func (moc *migrationOpCheck) validateMigrationVolumeOpCounts() {
|
||||
}
|
||||
|
||||
// Skip skipVolTypes patterns if the driver supports dynamic provisioning
|
||||
func skipVolTypePatterns(pattern storageapi.TestPattern, driver storageapi.TestDriver, skipVolTypes map[storageapi.TestVolType]bool) {
|
||||
_, supportsProvisioning := driver.(storageapi.DynamicPVTestDriver)
|
||||
func skipVolTypePatterns(pattern storageframework.TestPattern, driver storageframework.TestDriver, skipVolTypes map[storageframework.TestVolType]bool) {
|
||||
_, supportsProvisioning := driver.(storageframework.DynamicPVTestDriver)
|
||||
if supportsProvisioning && skipVolTypes[pattern.VolType] {
|
||||
e2eskipper.Skipf("Driver supports dynamic provisioning, skipping %s pattern", pattern.VolType)
|
||||
}
|
||||
|
@ -26,20 +26,20 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
type disruptiveTestSuite struct {
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
tsInfo storageframework.TestSuiteInfo
|
||||
}
|
||||
|
||||
// InitCustomDisruptiveTestSuite returns subPathTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomDisruptiveTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
func InitCustomDisruptiveTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
|
||||
return &disruptiveTestSuite{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
tsInfo: storageframework.TestSuiteInfo{
|
||||
Name: "disruptive",
|
||||
FeatureTag: "[Disruptive][LinuxOnly]",
|
||||
TestPatterns: patterns,
|
||||
@ -49,43 +49,43 @@ func InitCustomDisruptiveTestSuite(patterns []storageapi.TestPattern) storageapi
|
||||
|
||||
// InitDisruptiveTestSuite returns subPathTestSuite that implements TestSuite interface
|
||||
// using test suite default patterns
|
||||
func InitDisruptiveTestSuite() storageapi.TestSuite {
|
||||
testPatterns := []storageapi.TestPattern{
|
||||
func InitDisruptiveTestSuite() storageframework.TestSuite {
|
||||
testPatterns := []storageframework.TestPattern{
|
||||
// FSVolMode is already covered in subpath testsuite
|
||||
storageapi.DefaultFsInlineVolume,
|
||||
storageapi.FsVolModePreprovisionedPV,
|
||||
storageapi.FsVolModeDynamicPV,
|
||||
storageapi.BlockVolModePreprovisionedPV,
|
||||
storageapi.BlockVolModeDynamicPV,
|
||||
storageframework.DefaultFsInlineVolume,
|
||||
storageframework.FsVolModePreprovisionedPV,
|
||||
storageframework.FsVolModeDynamicPV,
|
||||
storageframework.BlockVolModePreprovisionedPV,
|
||||
storageframework.BlockVolModeDynamicPV,
|
||||
}
|
||||
return InitCustomDisruptiveTestSuite(testPatterns)
|
||||
}
|
||||
|
||||
func (s *disruptiveTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
func (s *disruptiveTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
|
||||
return s.tsInfo
|
||||
}
|
||||
|
||||
func (s *disruptiveTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
skipVolTypePatterns(pattern, driver, storageapi.NewVolTypeMap(storageapi.PreprovisionedPV))
|
||||
func (s *disruptiveTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
skipVolTypePatterns(pattern, driver, storageframework.NewVolTypeMap(storageframework.PreprovisionedPV))
|
||||
}
|
||||
|
||||
func (s *disruptiveTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
type local struct {
|
||||
config *storageapi.PerTestConfig
|
||||
config *storageframework.PerTestConfig
|
||||
driverCleanup func()
|
||||
|
||||
cs clientset.Interface
|
||||
ns *v1.Namespace
|
||||
|
||||
// VolumeResource contains pv, pvc, sc, etc., owns cleaning that up
|
||||
resource *storageapi.VolumeResource
|
||||
resource *storageframework.VolumeResource
|
||||
pod *v1.Pod
|
||||
}
|
||||
var l local
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("disruptive", storageapi.GetDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("disruptive", storageframework.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
@ -96,7 +96,7 @@ func (s *disruptiveTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
l.config, l.driverCleanup = driver.PrepareTest(f)
|
||||
|
||||
testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange
|
||||
l.resource = storageapi.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
@ -154,7 +154,7 @@ func (s *disruptiveTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
var err error
|
||||
var pvcs []*v1.PersistentVolumeClaim
|
||||
var inlineSources []*v1.VolumeSource
|
||||
if pattern.VolType == storageapi.InlineVolume {
|
||||
if pattern.VolType == storageframework.InlineVolume {
|
||||
inlineSources = append(inlineSources, l.resource.VolSource)
|
||||
} else {
|
||||
pvcs = append(pvcs, l.resource.Pvc)
|
||||
|
@ -34,19 +34,19 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
type ephemeralTestSuite struct {
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
tsInfo storageframework.TestSuiteInfo
|
||||
}
|
||||
|
||||
// InitCustomEphemeralTestSuite returns ephemeralTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomEphemeralTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
func InitCustomEphemeralTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
|
||||
return &ephemeralTestSuite{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
tsInfo: storageframework.TestSuiteInfo{
|
||||
Name: "ephemeral",
|
||||
TestPatterns: patterns,
|
||||
},
|
||||
@ -55,17 +55,17 @@ func InitCustomEphemeralTestSuite(patterns []storageapi.TestPattern) storageapi.
|
||||
|
||||
// InitEphemeralTestSuite returns ephemeralTestSuite that implements TestSuite interface
|
||||
// using test suite default patterns
|
||||
func InitEphemeralTestSuite() storageapi.TestSuite {
|
||||
genericLateBinding := storageapi.DefaultFsGenericEphemeralVolume
|
||||
func InitEphemeralTestSuite() storageframework.TestSuite {
|
||||
genericLateBinding := storageframework.DefaultFsGenericEphemeralVolume
|
||||
genericLateBinding.Name += " (late-binding)"
|
||||
genericLateBinding.BindingMode = storagev1.VolumeBindingWaitForFirstConsumer
|
||||
|
||||
genericImmediateBinding := storageapi.DefaultFsGenericEphemeralVolume
|
||||
genericImmediateBinding := storageframework.DefaultFsGenericEphemeralVolume
|
||||
genericImmediateBinding.Name += " (immediate-binding)"
|
||||
genericImmediateBinding.BindingMode = storagev1.VolumeBindingImmediate
|
||||
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.DefaultFsCSIEphemeralVolume,
|
||||
patterns := []storageframework.TestPattern{
|
||||
storageframework.DefaultFsCSIEphemeralVolume,
|
||||
genericLateBinding,
|
||||
genericImmediateBinding,
|
||||
}
|
||||
@ -73,35 +73,35 @@ func InitEphemeralTestSuite() storageapi.TestSuite {
|
||||
return InitCustomEphemeralTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (p *ephemeralTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
func (p *ephemeralTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
|
||||
return p.tsInfo
|
||||
}
|
||||
|
||||
func (p *ephemeralTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (p *ephemeralTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
}
|
||||
|
||||
func (p *ephemeralTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
type local struct {
|
||||
config *storageapi.PerTestConfig
|
||||
config *storageframework.PerTestConfig
|
||||
driverCleanup func()
|
||||
|
||||
testCase *EphemeralTest
|
||||
resource *storageapi.VolumeResource
|
||||
resource *storageframework.VolumeResource
|
||||
}
|
||||
var (
|
||||
eDriver storageapi.EphemeralTestDriver
|
||||
eDriver storageframework.EphemeralTestDriver
|
||||
l local
|
||||
)
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("ephemeral", storageapi.GetDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("ephemeral", storageframework.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
if pattern.VolType == storageapi.CSIInlineVolume {
|
||||
eDriver, _ = driver.(storageapi.EphemeralTestDriver)
|
||||
if pattern.VolType == storageframework.CSIInlineVolume {
|
||||
eDriver, _ = driver.(storageframework.EphemeralTestDriver)
|
||||
}
|
||||
if pattern.VolType == storageapi.GenericEphemeralVolume {
|
||||
if pattern.VolType == storageframework.GenericEphemeralVolume {
|
||||
enabled, err := GenericEphemeralVolumesEnabled(f.ClientSet, f.Timeouts, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "check GenericEphemeralVolume feature")
|
||||
if !enabled {
|
||||
@ -113,10 +113,10 @@ func (p *ephemeralTestSuite) DefineTests(driver storageapi.TestDriver, pattern s
|
||||
|
||||
// Now do the more expensive test initialization.
|
||||
l.config, l.driverCleanup = driver.PrepareTest(f)
|
||||
l.resource = storageapi.CreateVolumeResource(driver, l.config, pattern, e2evolume.SizeRange{})
|
||||
l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, e2evolume.SizeRange{})
|
||||
|
||||
switch pattern.VolType {
|
||||
case storageapi.CSIInlineVolume:
|
||||
case storageframework.CSIInlineVolume:
|
||||
l.testCase = &EphemeralTest{
|
||||
Client: l.config.Framework.ClientSet,
|
||||
Timeouts: f.Timeouts,
|
||||
@ -127,7 +127,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageapi.TestDriver, pattern s
|
||||
return eDriver.GetVolume(l.config, volumeNumber)
|
||||
},
|
||||
}
|
||||
case storageapi.GenericEphemeralVolume:
|
||||
case storageframework.GenericEphemeralVolume:
|
||||
l.testCase = &EphemeralTest{
|
||||
Client: l.config.Framework.ClientSet,
|
||||
Timeouts: f.Timeouts,
|
||||
@ -210,7 +210,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageapi.TestDriver, pattern s
|
||||
|
||||
ginkgo.It("should support multiple inline ephemeral volumes", func() {
|
||||
if pattern.BindingMode == storagev1.VolumeBindingImmediate &&
|
||||
pattern.VolType == storageapi.GenericEphemeralVolume {
|
||||
pattern.VolType == storageframework.GenericEphemeralVolume {
|
||||
e2eskipper.Skipf("Multiple generic ephemeral volumes with immediate binding may cause pod startup failures when the volumes get created in separate topology segments.")
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
@ -42,15 +42,15 @@ const (
|
||||
)
|
||||
|
||||
type fsGroupChangePolicyTestSuite struct {
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
tsInfo storageframework.TestSuiteInfo
|
||||
}
|
||||
|
||||
var _ storageapi.TestSuite = &fsGroupChangePolicyTestSuite{}
|
||||
var _ storageframework.TestSuite = &fsGroupChangePolicyTestSuite{}
|
||||
|
||||
// InitCustomFsGroupChangePolicyTestSuite returns fsGroupChangePolicyTestSuite that implements TestSuite interface
|
||||
func InitCustomFsGroupChangePolicyTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
func InitCustomFsGroupChangePolicyTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
|
||||
return &fsGroupChangePolicyTestSuite{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
tsInfo: storageframework.TestSuiteInfo{
|
||||
Name: "fsgroupchangepolicy",
|
||||
TestPatterns: patterns,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
@ -61,21 +61,21 @@ func InitCustomFsGroupChangePolicyTestSuite(patterns []storageapi.TestPattern) s
|
||||
}
|
||||
|
||||
// InitFsGroupChangePolicyTestSuite returns fsGroupChangePolicyTestSuite that implements TestSuite interface
|
||||
func InitFsGroupChangePolicyTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.DefaultFsDynamicPV,
|
||||
func InitFsGroupChangePolicyTestSuite() storageframework.TestSuite {
|
||||
patterns := []storageframework.TestPattern{
|
||||
storageframework.DefaultFsDynamicPV,
|
||||
}
|
||||
return InitCustomFsGroupChangePolicyTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (s *fsGroupChangePolicyTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
func (s *fsGroupChangePolicyTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
|
||||
return s.tsInfo
|
||||
}
|
||||
|
||||
func (s *fsGroupChangePolicyTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
skipVolTypePatterns(pattern, driver, storageapi.NewVolTypeMap(storageapi.CSIInlineVolume, storageapi.GenericEphemeralVolume))
|
||||
func (s *fsGroupChangePolicyTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
skipVolTypePatterns(pattern, driver, storageframework.NewVolTypeMap(storageframework.CSIInlineVolume, storageframework.GenericEphemeralVolume))
|
||||
dInfo := driver.GetDriverInfo()
|
||||
if !dInfo.Capabilities[storageapi.CapFsGroup] {
|
||||
if !dInfo.Capabilities[storageframework.CapFsGroup] {
|
||||
e2eskipper.Skipf("Driver %q does not support FsGroup - skipping", dInfo.Name)
|
||||
}
|
||||
|
||||
@ -83,28 +83,28 @@ func (s *fsGroupChangePolicyTestSuite) SkipUnsupportedTests(driver storageapi.Te
|
||||
e2eskipper.Skipf("Test does not support non-filesystem volume mode - skipping")
|
||||
}
|
||||
|
||||
if pattern.VolType != storageapi.DynamicPV {
|
||||
if pattern.VolType != storageframework.DynamicPV {
|
||||
e2eskipper.Skipf("Suite %q does not support %v", s.tsInfo.Name, pattern.VolType)
|
||||
}
|
||||
|
||||
_, ok := driver.(storageapi.DynamicPVTestDriver)
|
||||
_, ok := driver.(storageframework.DynamicPVTestDriver)
|
||||
if !ok {
|
||||
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *fsGroupChangePolicyTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (s *fsGroupChangePolicyTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
type local struct {
|
||||
config *storageapi.PerTestConfig
|
||||
config *storageframework.PerTestConfig
|
||||
driverCleanup func()
|
||||
driver storageapi.TestDriver
|
||||
resource *storageapi.VolumeResource
|
||||
driver storageframework.TestDriver
|
||||
resource *storageframework.VolumeResource
|
||||
}
|
||||
var l local
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("fsgroupchangepolicy", storageapi.GetDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("fsgroupchangepolicy", storageframework.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
e2eskipper.SkipIfNodeOSDistroIs("windows")
|
||||
@ -112,7 +112,7 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver storageapi.TestDriver,
|
||||
l.driver = driver
|
||||
l.config, l.driverCleanup = driver.PrepareTest(f)
|
||||
testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange
|
||||
l.resource = storageapi.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resource = storageframework.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
|
@ -32,23 +32,23 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
type multiVolumeTestSuite struct {
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
tsInfo storageframework.TestSuiteInfo
|
||||
}
|
||||
|
||||
var _ storageapi.TestSuite = &multiVolumeTestSuite{}
|
||||
var _ storageframework.TestSuite = &multiVolumeTestSuite{}
|
||||
|
||||
// InitCustomMultiVolumeTestSuite returns multiVolumeTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomMultiVolumeTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
func InitCustomMultiVolumeTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
|
||||
return &multiVolumeTestSuite{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
tsInfo: storageframework.TestSuiteInfo{
|
||||
Name: "multiVolume [Slow]",
|
||||
TestPatterns: patterns,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
@ -60,37 +60,37 @@ func InitCustomMultiVolumeTestSuite(patterns []storageapi.TestPattern) storageap
|
||||
|
||||
// InitMultiVolumeTestSuite returns multiVolumeTestSuite that implements TestSuite interface
|
||||
// using test suite default patterns
|
||||
func InitMultiVolumeTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.FsVolModePreprovisionedPV,
|
||||
storageapi.FsVolModeDynamicPV,
|
||||
storageapi.BlockVolModePreprovisionedPV,
|
||||
storageapi.BlockVolModeDynamicPV,
|
||||
func InitMultiVolumeTestSuite() storageframework.TestSuite {
|
||||
patterns := []storageframework.TestPattern{
|
||||
storageframework.FsVolModePreprovisionedPV,
|
||||
storageframework.FsVolModeDynamicPV,
|
||||
storageframework.BlockVolModePreprovisionedPV,
|
||||
storageframework.BlockVolModeDynamicPV,
|
||||
}
|
||||
return InitCustomMultiVolumeTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (t *multiVolumeTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
func (t *multiVolumeTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
|
||||
return t.tsInfo
|
||||
}
|
||||
|
||||
func (t *multiVolumeTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (t *multiVolumeTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
dInfo := driver.GetDriverInfo()
|
||||
skipVolTypePatterns(pattern, driver, storageapi.NewVolTypeMap(storageapi.PreprovisionedPV))
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[storageapi.CapBlock] {
|
||||
skipVolTypePatterns(pattern, driver, storageframework.NewVolTypeMap(storageframework.PreprovisionedPV))
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[storageframework.CapBlock] {
|
||||
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolMode)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *multiVolumeTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
type local struct {
|
||||
config *storageapi.PerTestConfig
|
||||
config *storageframework.PerTestConfig
|
||||
driverCleanup func()
|
||||
|
||||
cs clientset.Interface
|
||||
ns *v1.Namespace
|
||||
driver storageapi.TestDriver
|
||||
resources []*storageapi.VolumeResource
|
||||
driver storageframework.TestDriver
|
||||
resources []*storageframework.VolumeResource
|
||||
|
||||
migrationCheck *migrationOpCheck
|
||||
}
|
||||
@ -101,7 +101,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("multivolume", storageapi.GetDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("multivolume", storageframework.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
@ -135,7 +135,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
// Currently, multiple volumes are not generally available for pre-provisoined volume,
|
||||
// because containerized storage servers, such as iSCSI and rbd, are just returning
|
||||
// a static volume inside container, not actually creating a new volume per request.
|
||||
if pattern.VolType == storageapi.PreprovisionedPV {
|
||||
if pattern.VolType == storageframework.PreprovisionedPV {
|
||||
e2eskipper.Skipf("This test doesn't work with pre-provisioned volume -- skipping")
|
||||
}
|
||||
|
||||
@ -147,7 +147,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
|
||||
for i := 0; i < numVols; i++ {
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
resource := storageapi.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
resource := storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resources = append(l.resources, resource)
|
||||
pvcs = append(pvcs, resource.Pvc)
|
||||
}
|
||||
@ -165,7 +165,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
// Currently, multiple volumes are not generally available for pre-provisoined volume,
|
||||
// because containerized storage servers, such as iSCSI and rbd, are just returning
|
||||
// a static volume inside container, not actually creating a new volume per request.
|
||||
if pattern.VolType == storageapi.PreprovisionedPV {
|
||||
if pattern.VolType == storageframework.PreprovisionedPV {
|
||||
e2eskipper.Skipf("This test doesn't work with pre-provisioned volume -- skipping")
|
||||
}
|
||||
|
||||
@ -173,8 +173,8 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
defer cleanup()
|
||||
|
||||
// Check different-node test requirement
|
||||
if l.driver.GetDriverInfo().Capabilities[storageapi.CapSingleNodeVolume] {
|
||||
e2eskipper.Skipf("Driver %s only supports %v -- skipping", l.driver.GetDriverInfo().Name, storageapi.CapSingleNodeVolume)
|
||||
if l.driver.GetDriverInfo().Capabilities[storageframework.CapSingleNodeVolume] {
|
||||
e2eskipper.Skipf("Driver %s only supports %v -- skipping", l.driver.GetDriverInfo().Name, storageframework.CapSingleNodeVolume)
|
||||
}
|
||||
nodes, err := e2enode.GetReadySchedulableNodes(l.cs)
|
||||
framework.ExpectNoError(err)
|
||||
@ -197,7 +197,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
|
||||
for i := 0; i < numVols; i++ {
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
resource := storageapi.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
resource := storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resources = append(l.resources, resource)
|
||||
pvcs = append(pvcs, resource.Pvc)
|
||||
}
|
||||
@ -219,7 +219,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
// Currently, multiple volumes are not generally available for pre-provisoined volume,
|
||||
// because containerized storage servers, such as iSCSI and rbd, are just returning
|
||||
// a static volume inside container, not actually creating a new volume per request.
|
||||
if pattern.VolType == storageapi.PreprovisionedPV {
|
||||
if pattern.VolType == storageframework.PreprovisionedPV {
|
||||
e2eskipper.Skipf("This test doesn't work with pre-provisioned volume -- skipping")
|
||||
}
|
||||
|
||||
@ -236,7 +236,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
curPattern.VolMode = v1.PersistentVolumeFilesystem
|
||||
}
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
resource := storageapi.CreateVolumeResource(driver, l.config, curPattern, testVolumeSizeRange)
|
||||
resource := storageframework.CreateVolumeResource(driver, l.config, curPattern, testVolumeSizeRange)
|
||||
l.resources = append(l.resources, resource)
|
||||
pvcs = append(pvcs, resource.Pvc)
|
||||
}
|
||||
@ -258,7 +258,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
// Currently, multiple volumes are not generally available for pre-provisoined volume,
|
||||
// because containerized storage servers, such as iSCSI and rbd, are just returning
|
||||
// a static volume inside container, not actually creating a new volume per request.
|
||||
if pattern.VolType == storageapi.PreprovisionedPV {
|
||||
if pattern.VolType == storageframework.PreprovisionedPV {
|
||||
e2eskipper.Skipf("This test doesn't work with pre-provisioned volume -- skipping")
|
||||
}
|
||||
|
||||
@ -266,8 +266,8 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
defer cleanup()
|
||||
|
||||
// Check different-node test requirement
|
||||
if l.driver.GetDriverInfo().Capabilities[storageapi.CapSingleNodeVolume] {
|
||||
e2eskipper.Skipf("Driver %s only supports %v -- skipping", l.driver.GetDriverInfo().Name, storageapi.CapSingleNodeVolume)
|
||||
if l.driver.GetDriverInfo().Capabilities[storageframework.CapSingleNodeVolume] {
|
||||
e2eskipper.Skipf("Driver %s only supports %v -- skipping", l.driver.GetDriverInfo().Name, storageframework.CapSingleNodeVolume)
|
||||
}
|
||||
nodes, err := e2enode.GetReadySchedulableNodes(l.cs)
|
||||
framework.ExpectNoError(err)
|
||||
@ -295,7 +295,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
curPattern.VolMode = v1.PersistentVolumeFilesystem
|
||||
}
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
resource := storageapi.CreateVolumeResource(driver, l.config, curPattern, testVolumeSizeRange)
|
||||
resource := storageframework.CreateVolumeResource(driver, l.config, curPattern, testVolumeSizeRange)
|
||||
l.resources = append(l.resources, resource)
|
||||
pvcs = append(pvcs, resource.Pvc)
|
||||
}
|
||||
@ -315,13 +315,13 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
|
||||
numPods := 2
|
||||
|
||||
if !l.driver.GetDriverInfo().Capabilities[storageapi.CapMultiPODs] {
|
||||
if !l.driver.GetDriverInfo().Capabilities[storageframework.CapMultiPODs] {
|
||||
e2eskipper.Skipf("Driver %q does not support multiple concurrent pods - skipping", dInfo.Name)
|
||||
}
|
||||
|
||||
// Create volume
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
resource := storageapi.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
|
||||
resource := storageframework.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resources = append(l.resources, resource)
|
||||
|
||||
// Test access to the volume from pods on different node
|
||||
@ -340,13 +340,13 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
|
||||
numPods := 2
|
||||
|
||||
if !l.driver.GetDriverInfo().Capabilities[storageapi.CapMultiPODs] {
|
||||
if !l.driver.GetDriverInfo().Capabilities[storageframework.CapMultiPODs] {
|
||||
e2eskipper.Skipf("Driver %q does not support multiple concurrent pods - skipping", dInfo.Name)
|
||||
}
|
||||
|
||||
// Create volume
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
resource := storageapi.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
|
||||
resource := storageframework.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resources = append(l.resources, resource)
|
||||
|
||||
// Initialize the volume with a filesystem - it's going to be mounted as read-only below.
|
||||
@ -368,8 +368,8 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
|
||||
numPods := 2
|
||||
|
||||
if !l.driver.GetDriverInfo().Capabilities[storageapi.CapRWX] {
|
||||
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", l.driver.GetDriverInfo().Name, storageapi.CapRWX)
|
||||
if !l.driver.GetDriverInfo().Capabilities[storageframework.CapRWX] {
|
||||
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", l.driver.GetDriverInfo().Name, storageframework.CapRWX)
|
||||
}
|
||||
|
||||
// Check different-node test requirement
|
||||
@ -391,7 +391,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
|
||||
// Create volume
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
resource := storageapi.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
|
||||
resource := storageframework.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resources = append(l.resources, resource)
|
||||
|
||||
// Test access to the volume from pods on different node
|
||||
|
@ -38,7 +38,7 @@ import (
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
@ -63,14 +63,14 @@ type StorageClassTest struct {
|
||||
}
|
||||
|
||||
type provisioningTestSuite struct {
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
tsInfo storageframework.TestSuiteInfo
|
||||
}
|
||||
|
||||
// InitCustomProvisioningTestSuite returns provisioningTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomProvisioningTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
func InitCustomProvisioningTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
|
||||
return &provisioningTestSuite{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
tsInfo: storageframework.TestSuiteInfo{
|
||||
Name: "provisioning",
|
||||
TestPatterns: patterns,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
@ -82,33 +82,33 @@ func InitCustomProvisioningTestSuite(patterns []storageapi.TestPattern) storagea
|
||||
|
||||
// InitProvisioningTestSuite returns provisioningTestSuite that implements TestSuite interface\
|
||||
// using test suite default patterns
|
||||
func InitProvisioningTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.DefaultFsDynamicPV,
|
||||
storageapi.BlockVolModeDynamicPV,
|
||||
storageapi.NtfsDynamicPV,
|
||||
func InitProvisioningTestSuite() storageframework.TestSuite {
|
||||
patterns := []storageframework.TestPattern{
|
||||
storageframework.DefaultFsDynamicPV,
|
||||
storageframework.BlockVolModeDynamicPV,
|
||||
storageframework.NtfsDynamicPV,
|
||||
}
|
||||
return InitCustomProvisioningTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (p *provisioningTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
func (p *provisioningTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
|
||||
return p.tsInfo
|
||||
}
|
||||
|
||||
func (p *provisioningTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (p *provisioningTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
// Check preconditions.
|
||||
if pattern.VolType != storageapi.DynamicPV {
|
||||
if pattern.VolType != storageframework.DynamicPV {
|
||||
e2eskipper.Skipf("Suite %q does not support %v", p.tsInfo.Name, pattern.VolType)
|
||||
}
|
||||
dInfo := driver.GetDriverInfo()
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[storageapi.CapBlock] {
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[storageframework.CapBlock] {
|
||||
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolMode)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *provisioningTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
type local struct {
|
||||
config *storageapi.PerTestConfig
|
||||
config *storageframework.PerTestConfig
|
||||
driverCleanup func()
|
||||
|
||||
testCase *StorageClassTest
|
||||
@ -121,17 +121,17 @@ func (p *provisioningTestSuite) DefineTests(driver storageapi.TestDriver, patter
|
||||
}
|
||||
var (
|
||||
dInfo = driver.GetDriverInfo()
|
||||
dDriver storageapi.DynamicPVTestDriver
|
||||
dDriver storageframework.DynamicPVTestDriver
|
||||
l local
|
||||
)
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("provisioning", storageapi.GetDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("provisioning", storageframework.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
dDriver, _ = driver.(storageapi.DynamicPVTestDriver)
|
||||
dDriver, _ = driver.(storageframework.DynamicPVTestDriver)
|
||||
// Now do the more expensive test initialization.
|
||||
l.config, l.driverCleanup = driver.PrepareTest(f)
|
||||
l.migrationCheck = newMigrationOpCheck(f.ClientSet, dInfo.InTreePluginName)
|
||||
@ -195,14 +195,14 @@ func (p *provisioningTestSuite) DefineTests(driver storageapi.TestDriver, patter
|
||||
})
|
||||
|
||||
ginkgo.It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() {
|
||||
if !dInfo.Capabilities[storageapi.CapSnapshotDataSource] {
|
||||
if !dInfo.Capabilities[storageframework.CapSnapshotDataSource] {
|
||||
e2eskipper.Skipf("Driver %q does not support populate data from snapshot - skipping", dInfo.Name)
|
||||
}
|
||||
if !dInfo.SupportedFsType.Has(pattern.FsType) {
|
||||
e2eskipper.Skipf("Driver %q does not support %q fs type - skipping", dInfo.Name, pattern.FsType)
|
||||
}
|
||||
|
||||
sDriver, ok := driver.(storageapi.SnapshottableTestDriver)
|
||||
sDriver, ok := driver.(storageframework.SnapshottableTestDriver)
|
||||
if !ok {
|
||||
framework.Failf("Driver %q has CapSnapshotDataSource but does not implement SnapshottableTestDriver", dInfo.Name)
|
||||
}
|
||||
@ -211,7 +211,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageapi.TestDriver, patter
|
||||
defer cleanup()
|
||||
|
||||
dc := l.config.Framework.DynamicClient
|
||||
testConfig := storageapi.ConvertTestConfig(l.config)
|
||||
testConfig := storageframework.ConvertTestConfig(l.config)
|
||||
expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
|
||||
dataSource, cleanupFunc := prepareSnapshotDataSourceForProvisioning(f, testConfig, l.config, pattern, l.cs, dc, l.pvc, l.sc, sDriver, pattern.VolMode, expectedContent)
|
||||
defer cleanupFunc()
|
||||
@ -233,13 +233,13 @@ func (p *provisioningTestSuite) DefineTests(driver storageapi.TestDriver, patter
|
||||
})
|
||||
|
||||
ginkgo.It("should provision storage with pvc data source", func() {
|
||||
if !dInfo.Capabilities[storageapi.CapPVCDataSource] {
|
||||
if !dInfo.Capabilities[storageframework.CapPVCDataSource] {
|
||||
e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name)
|
||||
}
|
||||
init()
|
||||
defer cleanup()
|
||||
|
||||
testConfig := storageapi.ConvertTestConfig(l.config)
|
||||
testConfig := storageframework.ConvertTestConfig(l.config)
|
||||
expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
|
||||
dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
|
||||
defer dataSourceCleanup()
|
||||
@ -262,17 +262,17 @@ func (p *provisioningTestSuite) DefineTests(driver storageapi.TestDriver, patter
|
||||
|
||||
ginkgo.It("should provision storage with pvc data source in parallel [Slow]", func() {
|
||||
// Test cloning a single volume multiple times.
|
||||
if !dInfo.Capabilities[storageapi.CapPVCDataSource] {
|
||||
if !dInfo.Capabilities[storageframework.CapPVCDataSource] {
|
||||
e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name)
|
||||
}
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[storageapi.CapBlock] {
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[storageframework.CapBlock] {
|
||||
e2eskipper.Skipf("Driver %q does not support block volumes - skipping", dInfo.Name)
|
||||
}
|
||||
|
||||
init()
|
||||
defer cleanup()
|
||||
|
||||
testConfig := storageapi.ConvertTestConfig(l.config)
|
||||
testConfig := storageframework.ConvertTestConfig(l.config)
|
||||
expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
|
||||
dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
|
||||
defer dataSourceCleanup()
|
||||
@ -785,13 +785,13 @@ func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeCl
|
||||
func prepareSnapshotDataSourceForProvisioning(
|
||||
f *framework.Framework,
|
||||
config e2evolume.TestConfig,
|
||||
perTestConfig *storageapi.PerTestConfig,
|
||||
pattern storageapi.TestPattern,
|
||||
perTestConfig *storageframework.PerTestConfig,
|
||||
pattern storageframework.TestPattern,
|
||||
client clientset.Interface,
|
||||
dynamicClient dynamic.Interface,
|
||||
initClaim *v1.PersistentVolumeClaim,
|
||||
class *storagev1.StorageClass,
|
||||
sDriver storageapi.SnapshottableTestDriver,
|
||||
sDriver storageframework.SnapshottableTestDriver,
|
||||
mode v1.PersistentVolumeMode,
|
||||
injectContent string,
|
||||
) (*v1.TypedLocalObjectReference, func()) {
|
||||
@ -817,7 +817,7 @@ func prepareSnapshotDataSourceForProvisioning(
|
||||
}
|
||||
e2evolume.InjectContent(f, config, nil, "", tests)
|
||||
|
||||
snapshotResource := storageapi.CreateSnapshotResource(sDriver, perTestConfig, pattern, updatedClaim.GetName(), updatedClaim.GetNamespace(), f.Timeouts)
|
||||
snapshotResource := storageframework.CreateSnapshotResource(sDriver, perTestConfig, pattern, updatedClaim.GetName(), updatedClaim.GetNamespace(), f.Timeouts)
|
||||
|
||||
group := "snapshot.storage.k8s.io"
|
||||
dataSourceRef := &v1.TypedLocalObjectReference{
|
||||
|
@ -35,7 +35,7 @@ import (
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
@ -44,19 +44,19 @@ import (
|
||||
const datapath = "/mnt/test/data"
|
||||
|
||||
type snapshottableTestSuite struct {
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
tsInfo storageframework.TestSuiteInfo
|
||||
}
|
||||
|
||||
var (
|
||||
sDriver storageapi.SnapshottableTestDriver
|
||||
dDriver storageapi.DynamicPVTestDriver
|
||||
sDriver storageframework.SnapshottableTestDriver
|
||||
dDriver storageframework.DynamicPVTestDriver
|
||||
)
|
||||
|
||||
// InitCustomSnapshottableTestSuite returns snapshottableTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomSnapshottableTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
func InitCustomSnapshottableTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
|
||||
return &snapshottableTestSuite{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
tsInfo: storageframework.TestSuiteInfo{
|
||||
Name: "snapshottable",
|
||||
TestPatterns: patterns,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
@ -69,35 +69,35 @@ func InitCustomSnapshottableTestSuite(patterns []storageapi.TestPattern) storage
|
||||
|
||||
// InitSnapshottableTestSuite returns snapshottableTestSuite that implements TestSuite interface
|
||||
// using testsuite default patterns
|
||||
func InitSnapshottableTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.DynamicSnapshotDelete,
|
||||
storageapi.DynamicSnapshotRetain,
|
||||
storageapi.PreprovisionedSnapshotDelete,
|
||||
storageapi.PreprovisionedSnapshotRetain,
|
||||
func InitSnapshottableTestSuite() storageframework.TestSuite {
|
||||
patterns := []storageframework.TestPattern{
|
||||
storageframework.DynamicSnapshotDelete,
|
||||
storageframework.DynamicSnapshotRetain,
|
||||
storageframework.PreprovisionedSnapshotDelete,
|
||||
storageframework.PreprovisionedSnapshotRetain,
|
||||
}
|
||||
return InitCustomSnapshottableTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (s *snapshottableTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
func (s *snapshottableTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
|
||||
return s.tsInfo
|
||||
}
|
||||
|
||||
func (s *snapshottableTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (s *snapshottableTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
// Check preconditions.
|
||||
dInfo := driver.GetDriverInfo()
|
||||
ok := false
|
||||
_, ok = driver.(storageapi.SnapshottableTestDriver)
|
||||
if !dInfo.Capabilities[storageapi.CapSnapshotDataSource] || !ok {
|
||||
_, ok = driver.(storageframework.SnapshottableTestDriver)
|
||||
if !dInfo.Capabilities[storageframework.CapSnapshotDataSource] || !ok {
|
||||
e2eskipper.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name)
|
||||
}
|
||||
_, ok = driver.(storageapi.DynamicPVTestDriver)
|
||||
_, ok = driver.(storageframework.DynamicPVTestDriver)
|
||||
if !ok {
|
||||
e2eskipper.Skipf("Driver %q does not support dynamic provisioning - skipping", driver.GetDriverInfo().Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *snapshottableTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
@ -106,7 +106,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageapi.TestDriver, patte
|
||||
ginkgo.Describe("volume snapshot controller", func() {
|
||||
var (
|
||||
err error
|
||||
config *storageapi.PerTestConfig
|
||||
config *storageframework.PerTestConfig
|
||||
driverCleanup func()
|
||||
cleanupSteps []func()
|
||||
|
||||
@ -118,8 +118,8 @@ func (s *snapshottableTestSuite) DefineTests(driver storageapi.TestDriver, patte
|
||||
originalMntTestData string
|
||||
)
|
||||
init := func() {
|
||||
sDriver, _ = driver.(storageapi.SnapshottableTestDriver)
|
||||
dDriver, _ = driver.(storageapi.DynamicPVTestDriver)
|
||||
sDriver, _ = driver.(storageframework.SnapshottableTestDriver)
|
||||
dDriver, _ = driver.(storageframework.DynamicPVTestDriver)
|
||||
cleanupSteps = make([]func(), 0)
|
||||
// init snap class, create a source PV, PVC, Pod
|
||||
cs = f.ClientSet
|
||||
@ -129,11 +129,11 @@ func (s *snapshottableTestSuite) DefineTests(driver storageapi.TestDriver, patte
|
||||
config, driverCleanup = driver.PrepareTest(f)
|
||||
cleanupSteps = append(cleanupSteps, driverCleanup)
|
||||
|
||||
var volumeResource *storageapi.VolumeResource
|
||||
var volumeResource *storageframework.VolumeResource
|
||||
cleanupSteps = append(cleanupSteps, func() {
|
||||
framework.ExpectNoError(volumeResource.CleanupResource())
|
||||
})
|
||||
volumeResource = storageapi.CreateVolumeResource(dDriver, config, pattern, s.GetTestSuiteInfo().SupportedSizeRange)
|
||||
volumeResource = storageframework.CreateVolumeResource(dDriver, config, pattern, s.GetTestSuiteInfo().SupportedSizeRange)
|
||||
|
||||
pvc = volumeResource.Pvc
|
||||
sc = volumeResource.Sc
|
||||
@ -188,11 +188,11 @@ func (s *snapshottableTestSuite) DefineTests(driver storageapi.TestDriver, patte
|
||||
)
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
var sr *storageapi.SnapshotResource
|
||||
var sr *storageframework.SnapshotResource
|
||||
cleanupSteps = append(cleanupSteps, func() {
|
||||
framework.ExpectNoError(sr.CleanupResource(f.Timeouts))
|
||||
})
|
||||
sr = storageapi.CreateSnapshotResource(sDriver, config, pattern, pvc.GetName(), pvc.GetNamespace(), f.Timeouts)
|
||||
sr = storageframework.CreateSnapshotResource(sDriver, config, pattern, pvc.GetName(), pvc.GetNamespace(), f.Timeouts)
|
||||
vs = sr.Vs
|
||||
vscontent = sr.Vscontent
|
||||
vsc = sr.Vsclass
|
||||
@ -215,7 +215,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageapi.TestDriver, patte
|
||||
// Check SnapshotContent properties
|
||||
ginkgo.By("checking the SnapshotContent")
|
||||
// PreprovisionedCreatedSnapshot do not need to set volume snapshot class name
|
||||
if pattern.SnapshotType != storageapi.PreprovisionedCreatedSnapshot {
|
||||
if pattern.SnapshotType != storageframework.PreprovisionedCreatedSnapshot {
|
||||
framework.ExpectEqual(snapshotContentSpec["volumeSnapshotClassName"], vsc.GetName())
|
||||
}
|
||||
framework.ExpectEqual(volumeSnapshotRef["name"], vs.GetName())
|
||||
@ -268,15 +268,15 @@ func (s *snapshottableTestSuite) DefineTests(driver storageapi.TestDriver, patte
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("should delete the VolumeSnapshotContent according to its deletion policy")
|
||||
err = storageapi.DeleteAndWaitSnapshot(dc, vs.GetNamespace(), vs.GetName(), framework.Poll, f.Timeouts.SnapshotDelete)
|
||||
err = storageutils.DeleteAndWaitSnapshot(dc, vs.GetNamespace(), vs.GetName(), framework.Poll, f.Timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
switch pattern.SnapshotDeletionPolicy {
|
||||
case storageapi.DeleteSnapshot:
|
||||
case storageframework.DeleteSnapshot:
|
||||
ginkgo.By("checking the SnapshotContent has been deleted")
|
||||
err = utils.WaitForGVRDeletion(dc, storageutils.SnapshotContentGVR, vscontent.GetName(), framework.Poll, f.Timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
case storageapi.RetainSnapshot:
|
||||
case storageframework.RetainSnapshot:
|
||||
ginkgo.By("checking the SnapshotContent has not been deleted")
|
||||
err = utils.WaitForGVRDeletion(dc, storageutils.SnapshotContentGVR, vscontent.GetName(), 1*time.Second /* poll */, 30*time.Second /* timeout */)
|
||||
framework.ExpectError(err)
|
||||
|
@ -33,22 +33,22 @@ import (
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
type snapshottableStressTestSuite struct {
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
tsInfo storageframework.TestSuiteInfo
|
||||
}
|
||||
|
||||
type snapshottableStressTest struct {
|
||||
config *storageapi.PerTestConfig
|
||||
testOptions storageapi.VolumeSnapshotStressTestOptions
|
||||
config *storageframework.PerTestConfig
|
||||
testOptions storageframework.VolumeSnapshotStressTestOptions
|
||||
driverCleanup func()
|
||||
|
||||
pods []*v1.Pod
|
||||
volumes []*storageapi.VolumeResource
|
||||
snapshots []*storageapi.SnapshotResource
|
||||
volumes []*storageframework.VolumeResource
|
||||
snapshots []*storageframework.SnapshotResource
|
||||
// Because we are appending snapshot resources in parallel goroutines.
|
||||
snapshotsMutex sync.Mutex
|
||||
|
||||
@ -60,9 +60,9 @@ type snapshottableStressTest struct {
|
||||
|
||||
// InitCustomSnapshottableStressTestSuite returns snapshottableStressTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomSnapshottableStressTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
func InitCustomSnapshottableStressTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
|
||||
return &snapshottableStressTestSuite{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
tsInfo: storageframework.TestSuiteInfo{
|
||||
Name: "snapshottable-stress",
|
||||
TestPatterns: patterns,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
@ -75,19 +75,19 @@ func InitCustomSnapshottableStressTestSuite(patterns []storageapi.TestPattern) s
|
||||
|
||||
// InitSnapshottableStressTestSuite returns snapshottableStressTestSuite that implements TestSuite interface
|
||||
// using testsuite default patterns
|
||||
func InitSnapshottableStressTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.DynamicSnapshotDelete,
|
||||
storageapi.DynamicSnapshotRetain,
|
||||
func InitSnapshottableStressTestSuite() storageframework.TestSuite {
|
||||
patterns := []storageframework.TestPattern{
|
||||
storageframework.DynamicSnapshotDelete,
|
||||
storageframework.DynamicSnapshotRetain,
|
||||
}
|
||||
return InitCustomSnapshottableStressTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (t *snapshottableStressTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
func (t *snapshottableStressTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
|
||||
return t.tsInfo
|
||||
}
|
||||
|
||||
func (t *snapshottableStressTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (t *snapshottableStressTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
driverInfo := driver.GetDriverInfo()
|
||||
var ok bool
|
||||
if driverInfo.VolumeSnapshotStressTestOptions == nil {
|
||||
@ -99,21 +99,21 @@ func (t *snapshottableStressTestSuite) SkipUnsupportedTests(driver storageapi.Te
|
||||
if driverInfo.VolumeSnapshotStressTestOptions.NumSnapshots <= 0 {
|
||||
framework.Failf("NumSnapshots in snapshot stress test options must be a positive integer, received: %d", driverInfo.VolumeSnapshotStressTestOptions.NumSnapshots)
|
||||
}
|
||||
_, ok = driver.(storageapi.SnapshottableTestDriver)
|
||||
if !driverInfo.Capabilities[storageapi.CapSnapshotDataSource] || !ok {
|
||||
_, ok = driver.(storageframework.SnapshottableTestDriver)
|
||||
if !driverInfo.Capabilities[storageframework.CapSnapshotDataSource] || !ok {
|
||||
e2eskipper.Skipf("Driver %q doesn't implement SnapshottableTestDriver - skipping", driverInfo.Name)
|
||||
}
|
||||
|
||||
_, ok = driver.(storageapi.DynamicPVTestDriver)
|
||||
_, ok = driver.(storageframework.DynamicPVTestDriver)
|
||||
if !ok {
|
||||
e2eskipper.Skipf("Driver %s doesn't implement DynamicPVTestDriver -- skipping", driverInfo.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *snapshottableStressTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
var (
|
||||
driverInfo *storageapi.DriverInfo
|
||||
snapshottableDriver storageapi.SnapshottableTestDriver
|
||||
driverInfo *storageframework.DriverInfo
|
||||
snapshottableDriver storageframework.SnapshottableTestDriver
|
||||
cs clientset.Interface
|
||||
stressTest *snapshottableStressTest
|
||||
)
|
||||
@ -124,7 +124,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageapi.TestDriver,
|
||||
|
||||
init := func() {
|
||||
driverInfo = driver.GetDriverInfo()
|
||||
snapshottableDriver, _ = driver.(storageapi.SnapshottableTestDriver)
|
||||
snapshottableDriver, _ = driver.(storageframework.SnapshottableTestDriver)
|
||||
cs = f.ClientSet
|
||||
config, driverCleanup := driver.PrepareTest(f)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@ -132,8 +132,8 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageapi.TestDriver,
|
||||
stressTest = &snapshottableStressTest{
|
||||
config: config,
|
||||
driverCleanup: driverCleanup,
|
||||
volumes: []*storageapi.VolumeResource{},
|
||||
snapshots: []*storageapi.SnapshotResource{},
|
||||
volumes: []*storageframework.VolumeResource{},
|
||||
snapshots: []*storageframework.SnapshotResource{},
|
||||
pods: []*v1.Pod{},
|
||||
testOptions: *driverInfo.VolumeSnapshotStressTestOptions,
|
||||
ctx: ctx,
|
||||
@ -145,7 +145,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageapi.TestDriver,
|
||||
for i := 0; i < stressTest.testOptions.NumPods; i++ {
|
||||
framework.Logf("Creating resources for pod %d/%d", i, stressTest.testOptions.NumPods-1)
|
||||
|
||||
volume := storageapi.CreateVolumeResource(driver, stressTest.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange)
|
||||
volume := storageframework.CreateVolumeResource(driver, stressTest.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange)
|
||||
stressTest.volumes = append(stressTest.volumes, volume)
|
||||
|
||||
podConfig := e2epod.Config{
|
||||
@ -197,7 +197,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageapi.TestDriver,
|
||||
for i, snapshot := range stressTest.snapshots {
|
||||
wg.Add(1)
|
||||
|
||||
go func(i int, snapshot *storageapi.SnapshotResource) {
|
||||
go func(i int, snapshot *storageframework.SnapshotResource) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer wg.Done()
|
||||
|
||||
@ -229,7 +229,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageapi.TestDriver,
|
||||
for i, volume := range stressTest.volumes {
|
||||
wg.Add(1)
|
||||
|
||||
go func(i int, volume *storageapi.VolumeResource) {
|
||||
go func(i int, volume *storageframework.VolumeResource) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer wg.Done()
|
||||
|
||||
@ -275,7 +275,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageapi.TestDriver,
|
||||
return
|
||||
default:
|
||||
framework.Logf("Pod-%d [%s], Iteration %d/%d", podIndex, pod.Name, snapshotIndex, stressTest.testOptions.NumSnapshots-1)
|
||||
snapshot := storageapi.CreateSnapshotResource(snapshottableDriver, stressTest.config, pattern, volume.Pvc.GetName(), volume.Pvc.GetNamespace(), f.Timeouts)
|
||||
snapshot := storageframework.CreateSnapshotResource(snapshottableDriver, stressTest.config, pattern, volume.Pvc.GetName(), volume.Pvc.GetNamespace(), f.Timeouts)
|
||||
stressTest.snapshotsMutex.Lock()
|
||||
defer stressTest.snapshotsMutex.Unlock()
|
||||
stressTest.snapshots = append(stressTest.snapshots, snapshot)
|
||||
|
@ -39,7 +39,7 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -56,14 +56,14 @@ var (
|
||||
)
|
||||
|
||||
type subPathTestSuite struct {
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
tsInfo storageframework.TestSuiteInfo
|
||||
}
|
||||
|
||||
// InitCustomSubPathTestSuite returns subPathTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomSubPathTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
func InitCustomSubPathTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
|
||||
return &subPathTestSuite{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
tsInfo: storageframework.TestSuiteInfo{
|
||||
Name: "subPath",
|
||||
TestPatterns: patterns,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
@ -75,33 +75,33 @@ func InitCustomSubPathTestSuite(patterns []storageapi.TestPattern) storageapi.Te
|
||||
|
||||
// InitSubPathTestSuite returns subPathTestSuite that implements TestSuite interface
|
||||
// using testsuite default patterns
|
||||
func InitSubPathTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.DefaultFsInlineVolume,
|
||||
storageapi.DefaultFsPreprovisionedPV,
|
||||
storageapi.DefaultFsDynamicPV,
|
||||
storageapi.NtfsDynamicPV,
|
||||
func InitSubPathTestSuite() storageframework.TestSuite {
|
||||
patterns := []storageframework.TestPattern{
|
||||
storageframework.DefaultFsInlineVolume,
|
||||
storageframework.DefaultFsPreprovisionedPV,
|
||||
storageframework.DefaultFsDynamicPV,
|
||||
storageframework.NtfsDynamicPV,
|
||||
}
|
||||
return InitCustomSubPathTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (s *subPathTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
func (s *subPathTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
|
||||
return s.tsInfo
|
||||
}
|
||||
|
||||
func (s *subPathTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
skipVolTypePatterns(pattern, driver, storageapi.NewVolTypeMap(
|
||||
storageapi.PreprovisionedPV,
|
||||
storageapi.InlineVolume))
|
||||
func (s *subPathTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
skipVolTypePatterns(pattern, driver, storageframework.NewVolTypeMap(
|
||||
storageframework.PreprovisionedPV,
|
||||
storageframework.InlineVolume))
|
||||
}
|
||||
|
||||
func (s *subPathTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
type local struct {
|
||||
config *storageapi.PerTestConfig
|
||||
config *storageframework.PerTestConfig
|
||||
driverCleanup func()
|
||||
|
||||
hostExec utils.HostExec
|
||||
resource *storageapi.VolumeResource
|
||||
resource *storageframework.VolumeResource
|
||||
roVolSource *v1.VolumeSource
|
||||
pod *v1.Pod
|
||||
formatPod *v1.Pod
|
||||
@ -115,7 +115,7 @@ func (s *subPathTestSuite) DefineTests(driver storageapi.TestDriver, pattern sto
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("provisioning", storageapi.GetDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("provisioning", storageframework.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
@ -124,24 +124,24 @@ func (s *subPathTestSuite) DefineTests(driver storageapi.TestDriver, pattern sto
|
||||
l.config, l.driverCleanup = driver.PrepareTest(f)
|
||||
l.migrationCheck = newMigrationOpCheck(f.ClientSet, driver.GetDriverInfo().InTreePluginName)
|
||||
testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange
|
||||
l.resource = storageapi.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.hostExec = utils.NewHostExec(f)
|
||||
|
||||
// Setup subPath test dependent resource
|
||||
volType := pattern.VolType
|
||||
switch volType {
|
||||
case storageapi.InlineVolume:
|
||||
if iDriver, ok := driver.(storageapi.InlineVolumeTestDriver); ok {
|
||||
case storageframework.InlineVolume:
|
||||
if iDriver, ok := driver.(storageframework.InlineVolumeTestDriver); ok {
|
||||
l.roVolSource = iDriver.GetVolumeSource(true, pattern.FsType, l.resource.Volume)
|
||||
}
|
||||
case storageapi.PreprovisionedPV:
|
||||
case storageframework.PreprovisionedPV:
|
||||
l.roVolSource = &v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: l.resource.Pvc.Name,
|
||||
ReadOnly: true,
|
||||
},
|
||||
}
|
||||
case storageapi.DynamicPV:
|
||||
case storageframework.DynamicPV:
|
||||
l.roVolSource = &v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: l.resource.Pvc.Name,
|
||||
|
@ -35,21 +35,21 @@ import (
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
type topologyTestSuite struct {
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
tsInfo storageframework.TestSuiteInfo
|
||||
}
|
||||
|
||||
type topologyTest struct {
|
||||
config *storageapi.PerTestConfig
|
||||
config *storageframework.PerTestConfig
|
||||
driverCleanup func()
|
||||
|
||||
migrationCheck *migrationOpCheck
|
||||
|
||||
resource storageapi.VolumeResource
|
||||
resource storageframework.VolumeResource
|
||||
pod *v1.Pod
|
||||
allTopologies []topology
|
||||
}
|
||||
@ -58,9 +58,9 @@ type topology map[string]string
|
||||
|
||||
// InitCustomTopologyTestSuite returns topologyTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomTopologyTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
func InitCustomTopologyTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
|
||||
return &topologyTestSuite{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
tsInfo: storageframework.TestSuiteInfo{
|
||||
Name: "topology",
|
||||
TestPatterns: patterns,
|
||||
},
|
||||
@ -69,51 +69,51 @@ func InitCustomTopologyTestSuite(patterns []storageapi.TestPattern) storageapi.T
|
||||
|
||||
// InitTopologyTestSuite returns topologyTestSuite that implements TestSuite interface
|
||||
// using testsuite default patterns
|
||||
func InitTopologyTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.TopologyImmediate,
|
||||
storageapi.TopologyDelayed,
|
||||
func InitTopologyTestSuite() storageframework.TestSuite {
|
||||
patterns := []storageframework.TestPattern{
|
||||
storageframework.TopologyImmediate,
|
||||
storageframework.TopologyDelayed,
|
||||
}
|
||||
return InitCustomTopologyTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (t *topologyTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
func (t *topologyTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
|
||||
return t.tsInfo
|
||||
}
|
||||
|
||||
func (t *topologyTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (t *topologyTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
dInfo := driver.GetDriverInfo()
|
||||
var ok bool
|
||||
_, ok = driver.(storageapi.DynamicPVTestDriver)
|
||||
_, ok = driver.(storageframework.DynamicPVTestDriver)
|
||||
if !ok {
|
||||
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
|
||||
}
|
||||
|
||||
if !dInfo.Capabilities[storageapi.CapTopology] {
|
||||
if !dInfo.Capabilities[storageframework.CapTopology] {
|
||||
e2eskipper.Skipf("Driver %q does not support topology - skipping", dInfo.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *topologyTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (t *topologyTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
var (
|
||||
dInfo = driver.GetDriverInfo()
|
||||
dDriver storageapi.DynamicPVTestDriver
|
||||
dDriver storageframework.DynamicPVTestDriver
|
||||
cs clientset.Interface
|
||||
err error
|
||||
)
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("topology", storageapi.GetDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("topology", storageframework.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() topologyTest {
|
||||
dDriver, _ = driver.(storageapi.DynamicPVTestDriver)
|
||||
dDriver, _ = driver.(storageframework.DynamicPVTestDriver)
|
||||
l := topologyTest{}
|
||||
|
||||
// Now do the more expensive test initialization.
|
||||
l.config, l.driverCleanup = driver.PrepareTest(f)
|
||||
|
||||
l.resource = storageapi.VolumeResource{
|
||||
l.resource = storageframework.VolumeResource{
|
||||
Config: l.config,
|
||||
Pattern: pattern,
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
@ -53,14 +53,14 @@ const (
|
||||
)
|
||||
|
||||
type volumeExpandTestSuite struct {
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
tsInfo storageframework.TestSuiteInfo
|
||||
}
|
||||
|
||||
// InitCustomVolumeExpandTestSuite returns volumeExpandTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomVolumeExpandTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
func InitCustomVolumeExpandTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
|
||||
return &volumeExpandTestSuite{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
tsInfo: storageframework.TestSuiteInfo{
|
||||
Name: "volume-expand",
|
||||
TestPatterns: patterns,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
@ -72,39 +72,39 @@ func InitCustomVolumeExpandTestSuite(patterns []storageapi.TestPattern) storagea
|
||||
|
||||
// InitVolumeExpandTestSuite returns volumeExpandTestSuite that implements TestSuite interface
|
||||
// using testsuite default patterns
|
||||
func InitVolumeExpandTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.DefaultFsDynamicPV,
|
||||
storageapi.BlockVolModeDynamicPV,
|
||||
storageapi.DefaultFsDynamicPVAllowExpansion,
|
||||
storageapi.BlockVolModeDynamicPVAllowExpansion,
|
||||
storageapi.NtfsDynamicPV,
|
||||
storageapi.NtfsDynamicPVAllowExpansion,
|
||||
func InitVolumeExpandTestSuite() storageframework.TestSuite {
|
||||
patterns := []storageframework.TestPattern{
|
||||
storageframework.DefaultFsDynamicPV,
|
||||
storageframework.BlockVolModeDynamicPV,
|
||||
storageframework.DefaultFsDynamicPVAllowExpansion,
|
||||
storageframework.BlockVolModeDynamicPVAllowExpansion,
|
||||
storageframework.NtfsDynamicPV,
|
||||
storageframework.NtfsDynamicPVAllowExpansion,
|
||||
}
|
||||
return InitCustomVolumeExpandTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (v *volumeExpandTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
func (v *volumeExpandTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
|
||||
return v.tsInfo
|
||||
}
|
||||
|
||||
func (v *volumeExpandTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (v *volumeExpandTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
// Check preconditions.
|
||||
if !driver.GetDriverInfo().Capabilities[storageapi.CapControllerExpansion] {
|
||||
if !driver.GetDriverInfo().Capabilities[storageframework.CapControllerExpansion] {
|
||||
e2eskipper.Skipf("Driver %q does not support volume expansion - skipping", driver.GetDriverInfo().Name)
|
||||
}
|
||||
// Check preconditions.
|
||||
if !driver.GetDriverInfo().Capabilities[storageapi.CapBlock] && pattern.VolMode == v1.PersistentVolumeBlock {
|
||||
if !driver.GetDriverInfo().Capabilities[storageframework.CapBlock] && pattern.VolMode == v1.PersistentVolumeBlock {
|
||||
e2eskipper.Skipf("Driver %q does not support block volume mode - skipping", driver.GetDriverInfo().Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (v *volumeExpandTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
type local struct {
|
||||
config *storageapi.PerTestConfig
|
||||
config *storageframework.PerTestConfig
|
||||
driverCleanup func()
|
||||
|
||||
resource *storageapi.VolumeResource
|
||||
resource *storageframework.VolumeResource
|
||||
pod *v1.Pod
|
||||
pod2 *v1.Pod
|
||||
|
||||
@ -114,7 +114,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageapi.TestDriver, patter
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volume-expand", storageapi.GetDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volume-expand", storageframework.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
@ -123,7 +123,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageapi.TestDriver, patter
|
||||
l.config, l.driverCleanup = driver.PrepareTest(f)
|
||||
l.migrationCheck = newMigrationOpCheck(f.ClientSet, driver.GetDriverInfo().InTreePluginName)
|
||||
testVolumeSizeRange := v.GetTestSuiteInfo().SupportedSizeRange
|
||||
l.resource = storageapi.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
|
@ -40,7 +40,7 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
@ -48,22 +48,22 @@ import (
|
||||
// Test files are generated in testVolumeIO()
|
||||
// If test file generation algorithm changes, these must be recomputed.
|
||||
var md5hashes = map[int64]string{
|
||||
storageapi.FileSizeSmall: "5c34c2813223a7ca05a3c2f38c0d1710",
|
||||
storageapi.FileSizeMedium: "f2fa202b1ffeedda5f3a58bd1ae81104",
|
||||
storageapi.FileSizeLarge: "8d763edc71bd16217664793b5a15e403",
|
||||
storageframework.FileSizeSmall: "5c34c2813223a7ca05a3c2f38c0d1710",
|
||||
storageframework.FileSizeMedium: "f2fa202b1ffeedda5f3a58bd1ae81104",
|
||||
storageframework.FileSizeLarge: "8d763edc71bd16217664793b5a15e403",
|
||||
}
|
||||
|
||||
const mountPath = "/opt"
|
||||
|
||||
type volumeIOTestSuite struct {
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
tsInfo storageframework.TestSuiteInfo
|
||||
}
|
||||
|
||||
// InitCustomVolumeIOTestSuite returns volumeIOTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomVolumeIOTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
func InitCustomVolumeIOTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
|
||||
return &volumeIOTestSuite{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
tsInfo: storageframework.TestSuiteInfo{
|
||||
Name: "volumeIO",
|
||||
TestPatterns: patterns,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
@ -75,31 +75,31 @@ func InitCustomVolumeIOTestSuite(patterns []storageapi.TestPattern) storageapi.T
|
||||
|
||||
// InitVolumeIOTestSuite returns volumeIOTestSuite that implements TestSuite interface
|
||||
// using testsuite default patterns
|
||||
func InitVolumeIOTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.DefaultFsInlineVolume,
|
||||
storageapi.DefaultFsPreprovisionedPV,
|
||||
storageapi.DefaultFsDynamicPV,
|
||||
func InitVolumeIOTestSuite() storageframework.TestSuite {
|
||||
patterns := []storageframework.TestPattern{
|
||||
storageframework.DefaultFsInlineVolume,
|
||||
storageframework.DefaultFsPreprovisionedPV,
|
||||
storageframework.DefaultFsDynamicPV,
|
||||
}
|
||||
return InitCustomVolumeIOTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (t *volumeIOTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
func (t *volumeIOTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
|
||||
return t.tsInfo
|
||||
}
|
||||
|
||||
func (t *volumeIOTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
skipVolTypePatterns(pattern, driver, storageapi.NewVolTypeMap(
|
||||
storageapi.PreprovisionedPV,
|
||||
storageapi.InlineVolume))
|
||||
func (t *volumeIOTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
skipVolTypePatterns(pattern, driver, storageframework.NewVolTypeMap(
|
||||
storageframework.PreprovisionedPV,
|
||||
storageframework.InlineVolume))
|
||||
}
|
||||
|
||||
func (t *volumeIOTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (t *volumeIOTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
type local struct {
|
||||
config *storageapi.PerTestConfig
|
||||
config *storageframework.PerTestConfig
|
||||
driverCleanup func()
|
||||
|
||||
resource *storageapi.VolumeResource
|
||||
resource *storageframework.VolumeResource
|
||||
|
||||
migrationCheck *migrationOpCheck
|
||||
}
|
||||
@ -110,7 +110,7 @@ func (t *volumeIOTestSuite) DefineTests(driver storageapi.TestDriver, pattern st
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volumeio", storageapi.GetDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volumeio", storageframework.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
@ -120,7 +120,7 @@ func (t *volumeIOTestSuite) DefineTests(driver storageapi.TestDriver, pattern st
|
||||
l.migrationCheck = newMigrationOpCheck(f.ClientSet, dInfo.InTreePluginName)
|
||||
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
l.resource = storageapi.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
if l.resource.VolSource == nil {
|
||||
e2eskipper.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
|
||||
}
|
||||
@ -151,23 +151,23 @@ func (t *volumeIOTestSuite) DefineTests(driver storageapi.TestDriver, pattern st
|
||||
fileSizes := createFileSizes(dInfo.MaxFileSize)
|
||||
testFile := fmt.Sprintf("%s_io_test_%s", dInfo.Name, f.Namespace.Name)
|
||||
var fsGroup *int64
|
||||
if !framework.NodeOSDistroIs("windows") && dInfo.Capabilities[storageapi.CapFsGroup] {
|
||||
if !framework.NodeOSDistroIs("windows") && dInfo.Capabilities[storageframework.CapFsGroup] {
|
||||
fsGroupVal := int64(1234)
|
||||
fsGroup = &fsGroupVal
|
||||
}
|
||||
podSec := v1.PodSecurityContext{
|
||||
FSGroup: fsGroup,
|
||||
}
|
||||
err := testVolumeIO(f, cs, storageapi.ConvertTestConfig(l.config), *l.resource.VolSource, &podSec, testFile, fileSizes)
|
||||
err := testVolumeIO(f, cs, storageframework.ConvertTestConfig(l.config), *l.resource.VolSource, &podSec, testFile, fileSizes)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
}
|
||||
|
||||
func createFileSizes(maxFileSize int64) []int64 {
|
||||
allFileSizes := []int64{
|
||||
storageapi.FileSizeSmall,
|
||||
storageapi.FileSizeMedium,
|
||||
storageapi.FileSizeLarge,
|
||||
storageframework.FileSizeSmall,
|
||||
storageframework.FileSizeMedium,
|
||||
storageframework.FileSizeLarge,
|
||||
}
|
||||
fileSizes := []int64{}
|
||||
|
||||
@ -249,8 +249,8 @@ func makePodSpec(config e2evolume.TestConfig, initCmd string, volsrc v1.VolumeSo
|
||||
// Write `fsize` bytes to `fpath` in the pod, using dd and the `ddInput` file.
|
||||
func writeToFile(f *framework.Framework, pod *v1.Pod, fpath, ddInput string, fsize int64) error {
|
||||
ginkgo.By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath))
|
||||
loopCnt := fsize / storageapi.MinFileSize
|
||||
writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, ddInput, storageapi.MinFileSize, fpath)
|
||||
loopCnt := fsize / storageframework.MinFileSize
|
||||
writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, ddInput, storageframework.MinFileSize, fpath)
|
||||
stdout, stderr, err := e2evolume.PodExec(f, pod, writeCmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing to volume using %q: %s\nstdout: %s\nstderr: %s", writeCmd, err, stdout, stderr)
|
||||
@ -311,7 +311,7 @@ func deleteFile(f *framework.Framework, pod *v1.Pod, fpath string) {
|
||||
func testVolumeIO(f *framework.Framework, cs clientset.Interface, config e2evolume.TestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) {
|
||||
ddInput := filepath.Join(mountPath, fmt.Sprintf("%s-%s-dd_if", config.Prefix, config.Namespace))
|
||||
writeBlk := strings.Repeat("abcdefghijklmnopqrstuvwxyz123456", 32) // 1KiB value
|
||||
loopCnt := storageapi.MinFileSize / int64(len(writeBlk))
|
||||
loopCnt := storageframework.MinFileSize / int64(len(writeBlk))
|
||||
// initContainer cmd to create and fill dd's input file. The initContainer is used to create
|
||||
// the `dd` input file which is currently 1MiB. Rather than store a 1MiB go value, a loop is
|
||||
// used to create a 1MiB file in the target directory.
|
||||
@ -348,8 +348,8 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config e2evolu
|
||||
// create files of the passed-in file sizes and verify test file size and content
|
||||
for _, fsize := range fsizes {
|
||||
// file sizes must be a multiple of `MinFileSize`
|
||||
if math.Mod(float64(fsize), float64(storageapi.MinFileSize)) != 0 {
|
||||
fsize = fsize/storageapi.MinFileSize + storageapi.MinFileSize
|
||||
if math.Mod(float64(fsize), float64(storageframework.MinFileSize)) != 0 {
|
||||
fsize = fsize/storageframework.MinFileSize + storageframework.MinFileSize
|
||||
}
|
||||
fpath := filepath.Join(mountPath, fmt.Sprintf("%s-%d", file, fsize))
|
||||
defer func() {
|
||||
|
@ -32,37 +32,37 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
type volumeStressTestSuite struct {
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
tsInfo storageframework.TestSuiteInfo
|
||||
}
|
||||
|
||||
type volumeStressTest struct {
|
||||
config *storageapi.PerTestConfig
|
||||
config *storageframework.PerTestConfig
|
||||
driverCleanup func()
|
||||
|
||||
migrationCheck *migrationOpCheck
|
||||
|
||||
resources []*storageapi.VolumeResource
|
||||
resources []*storageframework.VolumeResource
|
||||
pods []*v1.Pod
|
||||
// stop and wait for any async routines
|
||||
wg sync.WaitGroup
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
testOptions storageapi.StressTestOptions
|
||||
testOptions storageframework.StressTestOptions
|
||||
}
|
||||
|
||||
var _ storageapi.TestSuite = &volumeStressTestSuite{}
|
||||
var _ storageframework.TestSuite = &volumeStressTestSuite{}
|
||||
|
||||
// InitCustomVolumeStressTestSuite returns volumeStressTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomVolumeStressTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
func InitCustomVolumeStressTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
|
||||
return &volumeStressTestSuite{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
tsInfo: storageframework.TestSuiteInfo{
|
||||
Name: "volume-stress",
|
||||
TestPatterns: patterns,
|
||||
},
|
||||
@ -71,19 +71,19 @@ func InitCustomVolumeStressTestSuite(patterns []storageapi.TestPattern) storagea
|
||||
|
||||
// InitVolumeStressTestSuite returns volumeStressTestSuite that implements TestSuite interface
|
||||
// using testsuite default patterns
|
||||
func InitVolumeStressTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.DefaultFsDynamicPV,
|
||||
storageapi.BlockVolModeDynamicPV,
|
||||
func InitVolumeStressTestSuite() storageframework.TestSuite {
|
||||
patterns := []storageframework.TestPattern{
|
||||
storageframework.DefaultFsDynamicPV,
|
||||
storageframework.BlockVolModeDynamicPV,
|
||||
}
|
||||
return InitCustomVolumeStressTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (t *volumeStressTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
func (t *volumeStressTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
|
||||
return t.tsInfo
|
||||
}
|
||||
|
||||
func (t *volumeStressTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (t *volumeStressTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
dInfo := driver.GetDriverInfo()
|
||||
if dInfo.StressTestOptions == nil {
|
||||
e2eskipper.Skipf("Driver %s doesn't specify stress test options -- skipping", dInfo.Name)
|
||||
@ -95,15 +95,15 @@ func (t *volumeStressTestSuite) SkipUnsupportedTests(driver storageapi.TestDrive
|
||||
framework.Failf("NumRestarts in stress test options must be a positive integer, received: %d", dInfo.StressTestOptions.NumRestarts)
|
||||
}
|
||||
|
||||
if _, ok := driver.(storageapi.DynamicPVTestDriver); !ok {
|
||||
if _, ok := driver.(storageframework.DynamicPVTestDriver); !ok {
|
||||
e2eskipper.Skipf("Driver %s doesn't implement DynamicPVTestDriver -- skipping", dInfo.Name)
|
||||
}
|
||||
if !driver.GetDriverInfo().Capabilities[storageapi.CapBlock] && pattern.VolMode == v1.PersistentVolumeBlock {
|
||||
if !driver.GetDriverInfo().Capabilities[storageframework.CapBlock] && pattern.VolMode == v1.PersistentVolumeBlock {
|
||||
e2eskipper.Skipf("Driver %q does not support block volume mode - skipping", dInfo.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *volumeStressTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
var (
|
||||
dInfo = driver.GetDriverInfo()
|
||||
cs clientset.Interface
|
||||
@ -112,7 +112,7 @@ func (t *volumeStressTestSuite) DefineTests(driver storageapi.TestDriver, patter
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("stress", storageapi.GetDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("stress", storageframework.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
cs = f.ClientSet
|
||||
@ -121,7 +121,7 @@ func (t *volumeStressTestSuite) DefineTests(driver storageapi.TestDriver, patter
|
||||
// Now do the more expensive test initialization.
|
||||
l.config, l.driverCleanup = driver.PrepareTest(f)
|
||||
l.migrationCheck = newMigrationOpCheck(f.ClientSet, dInfo.InTreePluginName)
|
||||
l.resources = []*storageapi.VolumeResource{}
|
||||
l.resources = []*storageframework.VolumeResource{}
|
||||
l.pods = []*v1.Pod{}
|
||||
l.testOptions = *dInfo.StressTestOptions
|
||||
l.ctx, l.cancel = context.WithCancel(context.Background())
|
||||
@ -130,7 +130,7 @@ func (t *volumeStressTestSuite) DefineTests(driver storageapi.TestDriver, patter
|
||||
createPodsAndVolumes := func() {
|
||||
for i := 0; i < l.testOptions.NumPods; i++ {
|
||||
framework.Logf("Creating resources for pod %v/%v", i, l.testOptions.NumPods-1)
|
||||
r := storageapi.CreateVolumeResource(driver, l.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange)
|
||||
r := storageframework.CreateVolumeResource(driver, l.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange)
|
||||
l.resources = append(l.resources, r)
|
||||
podConfig := e2epod.Config{
|
||||
NS: f.Namespace.Name,
|
||||
|
@ -38,12 +38,12 @@ import (
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
type volumeLimitsTestSuite struct {
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
tsInfo storageframework.TestSuiteInfo
|
||||
}
|
||||
|
||||
const (
|
||||
@ -56,13 +56,13 @@ const (
|
||||
csiNodeInfoTimeout = 1 * time.Minute
|
||||
)
|
||||
|
||||
var _ storageapi.TestSuite = &volumeLimitsTestSuite{}
|
||||
var _ storageframework.TestSuite = &volumeLimitsTestSuite{}
|
||||
|
||||
// InitCustomVolumeLimitsTestSuite returns volumeLimitsTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomVolumeLimitsTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
func InitCustomVolumeLimitsTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
|
||||
return &volumeLimitsTestSuite{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
tsInfo: storageframework.TestSuiteInfo{
|
||||
Name: "volumeLimits",
|
||||
TestPatterns: patterns,
|
||||
},
|
||||
@ -71,29 +71,29 @@ func InitCustomVolumeLimitsTestSuite(patterns []storageapi.TestPattern) storagea
|
||||
|
||||
// InitVolumeLimitsTestSuite returns volumeLimitsTestSuite that implements TestSuite interface
|
||||
// using testsuite default patterns
|
||||
func InitVolumeLimitsTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.FsVolModeDynamicPV,
|
||||
func InitVolumeLimitsTestSuite() storageframework.TestSuite {
|
||||
patterns := []storageframework.TestPattern{
|
||||
storageframework.FsVolModeDynamicPV,
|
||||
}
|
||||
return InitCustomVolumeLimitsTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (t *volumeLimitsTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
func (t *volumeLimitsTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
|
||||
return t.tsInfo
|
||||
}
|
||||
|
||||
func (t *volumeLimitsTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (t *volumeLimitsTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
}
|
||||
|
||||
func (t *volumeLimitsTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
type local struct {
|
||||
config *storageapi.PerTestConfig
|
||||
config *storageframework.PerTestConfig
|
||||
testCleanup func()
|
||||
|
||||
cs clientset.Interface
|
||||
ns *v1.Namespace
|
||||
// VolumeResource contains pv, pvc, sc, etc. of the first pod created
|
||||
resource *storageapi.VolumeResource
|
||||
resource *storageframework.VolumeResource
|
||||
|
||||
// All created PVCs, incl. the one in resource
|
||||
pvcs []*v1.PersistentVolumeClaim
|
||||
@ -110,7 +110,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageapi.TestDriver, patter
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volumelimits", storageapi.GetDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volumelimits", storageframework.GetDriverTimeouts(driver))
|
||||
|
||||
// This checks that CSIMaxVolumeLimitChecker works as expected.
|
||||
// A randomly chosen node should be able to handle as many CSI volumes as
|
||||
@ -122,11 +122,11 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageapi.TestDriver, patter
|
||||
// BEWARE: the test may create lot of volumes and it's really slow.
|
||||
ginkgo.It("should support volume limits [Serial]", func() {
|
||||
driverInfo := driver.GetDriverInfo()
|
||||
if !driverInfo.Capabilities[storageapi.CapVolumeLimits] {
|
||||
if !driverInfo.Capabilities[storageframework.CapVolumeLimits] {
|
||||
ginkgo.Skip(fmt.Sprintf("driver %s does not support volume limits", driverInfo.Name))
|
||||
}
|
||||
var dDriver storageapi.DynamicPVTestDriver
|
||||
if dDriver = driver.(storageapi.DynamicPVTestDriver); dDriver == nil {
|
||||
var dDriver storageframework.DynamicPVTestDriver
|
||||
if dDriver = driver.(storageframework.DynamicPVTestDriver); dDriver == nil {
|
||||
framework.Failf("Test driver does not provide dynamically created volumes")
|
||||
}
|
||||
|
||||
@ -158,7 +158,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageapi.TestDriver, patter
|
||||
claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
|
||||
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, dDriver)
|
||||
|
||||
l.resource = storageapi.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
defer func() {
|
||||
err := l.resource.CleanupResource()
|
||||
framework.ExpectNoError(err, "while cleaning up resource")
|
||||
@ -316,14 +316,14 @@ func waitForAllPVCsBound(cs clientset.Interface, timeout time.Duration, pvcs []*
|
||||
return pvNames, nil
|
||||
}
|
||||
|
||||
func getNodeLimits(cs clientset.Interface, config *storageapi.PerTestConfig, nodeName string, driverInfo *storageapi.DriverInfo) (int, error) {
|
||||
func getNodeLimits(cs clientset.Interface, config *storageframework.PerTestConfig, nodeName string, driverInfo *storageframework.DriverInfo) (int, error) {
|
||||
if len(driverInfo.InTreePluginName) == 0 {
|
||||
return getCSINodeLimits(cs, config, nodeName, driverInfo)
|
||||
}
|
||||
return getInTreeNodeLimits(cs, nodeName, driverInfo)
|
||||
}
|
||||
|
||||
func getInTreeNodeLimits(cs clientset.Interface, nodeName string, driverInfo *storageapi.DriverInfo) (int, error) {
|
||||
func getInTreeNodeLimits(cs clientset.Interface, nodeName string, driverInfo *storageframework.DriverInfo) (int, error) {
|
||||
node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@ -350,7 +350,7 @@ func getInTreeNodeLimits(cs clientset.Interface, nodeName string, driverInfo *st
|
||||
return int(limit.Value()), nil
|
||||
}
|
||||
|
||||
func getCSINodeLimits(cs clientset.Interface, config *storageapi.PerTestConfig, nodeName string, driverInfo *storageapi.DriverInfo) (int, error) {
|
||||
func getCSINodeLimits(cs clientset.Interface, config *storageframework.PerTestConfig, nodeName string, driverInfo *storageframework.DriverInfo) (int, error) {
|
||||
// Retry with a timeout, the driver might just have been installed and kubelet takes a while to publish everything.
|
||||
var limit int
|
||||
err := wait.PollImmediate(2*time.Second, csiNodeInfoTimeout, func() (bool, error) {
|
||||
|
@ -39,7 +39,7 @@ import (
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
@ -49,16 +49,16 @@ const (
|
||||
)
|
||||
|
||||
type volumeModeTestSuite struct {
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
tsInfo storageframework.TestSuiteInfo
|
||||
}
|
||||
|
||||
var _ storageapi.TestSuite = &volumeModeTestSuite{}
|
||||
var _ storageframework.TestSuite = &volumeModeTestSuite{}
|
||||
|
||||
// InitCustomVolumeModeTestSuite returns volumeModeTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomVolumeModeTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
func InitCustomVolumeModeTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
|
||||
return &volumeModeTestSuite{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
tsInfo: storageframework.TestSuiteInfo{
|
||||
Name: "volumeMode",
|
||||
TestPatterns: patterns,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
@ -70,32 +70,32 @@ func InitCustomVolumeModeTestSuite(patterns []storageapi.TestPattern) storageapi
|
||||
|
||||
// InitVolumeModeTestSuite returns volumeModeTestSuite that implements TestSuite interface
|
||||
// using testsuite default patterns
|
||||
func InitVolumeModeTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.FsVolModePreprovisionedPV,
|
||||
storageapi.FsVolModeDynamicPV,
|
||||
storageapi.BlockVolModePreprovisionedPV,
|
||||
storageapi.BlockVolModeDynamicPV,
|
||||
func InitVolumeModeTestSuite() storageframework.TestSuite {
|
||||
patterns := []storageframework.TestPattern{
|
||||
storageframework.FsVolModePreprovisionedPV,
|
||||
storageframework.FsVolModeDynamicPV,
|
||||
storageframework.BlockVolModePreprovisionedPV,
|
||||
storageframework.BlockVolModeDynamicPV,
|
||||
}
|
||||
return InitCustomVolumeModeTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (t *volumeModeTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
func (t *volumeModeTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
|
||||
return t.tsInfo
|
||||
}
|
||||
|
||||
func (t *volumeModeTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (t *volumeModeTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
}
|
||||
|
||||
func (t *volumeModeTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
type local struct {
|
||||
config *storageapi.PerTestConfig
|
||||
config *storageframework.PerTestConfig
|
||||
driverCleanup func()
|
||||
|
||||
cs clientset.Interface
|
||||
ns *v1.Namespace
|
||||
// VolumeResource contains pv, pvc, sc, etc., owns cleaning that up
|
||||
storageapi.VolumeResource
|
||||
storageframework.VolumeResource
|
||||
|
||||
migrationCheck *migrationOpCheck
|
||||
}
|
||||
@ -106,7 +106,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volumemode", storageapi.GetDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volumemode", storageframework.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
@ -131,22 +131,22 @@ func (t *volumeModeTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
volumeNodeAffinity *v1.VolumeNodeAffinity
|
||||
)
|
||||
|
||||
l.VolumeResource = storageapi.VolumeResource{
|
||||
l.VolumeResource = storageframework.VolumeResource{
|
||||
Config: l.config,
|
||||
Pattern: pattern,
|
||||
}
|
||||
|
||||
// Create volume for pre-provisioned volume tests
|
||||
l.Volume = storageapi.CreateVolume(driver, l.config, pattern.VolType)
|
||||
l.Volume = storageframework.CreateVolume(driver, l.config, pattern.VolType)
|
||||
|
||||
switch pattern.VolType {
|
||||
case storageapi.PreprovisionedPV:
|
||||
case storageframework.PreprovisionedPV:
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock {
|
||||
scName = fmt.Sprintf("%s-%s-sc-for-block", l.ns.Name, dInfo.Name)
|
||||
} else if pattern.VolMode == v1.PersistentVolumeFilesystem {
|
||||
scName = fmt.Sprintf("%s-%s-sc-for-file", l.ns.Name, dInfo.Name)
|
||||
}
|
||||
if pDriver, ok := driver.(storageapi.PreprovisionedPVTestDriver); ok {
|
||||
if pDriver, ok := driver.(storageframework.PreprovisionedPVTestDriver); ok {
|
||||
pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, l.Volume)
|
||||
if pvSource == nil {
|
||||
e2eskipper.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name)
|
||||
@ -157,8 +157,8 @@ func (t *volumeModeTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
l.Pv = e2epv.MakePersistentVolume(pvConfig)
|
||||
l.Pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, l.ns.Name)
|
||||
}
|
||||
case storageapi.DynamicPV:
|
||||
if dDriver, ok := driver.(storageapi.DynamicPVTestDriver); ok {
|
||||
case storageframework.DynamicPV:
|
||||
if dDriver, ok := driver.(storageframework.DynamicPVTestDriver); ok {
|
||||
l.Sc = dDriver.GetDynamicProvisionStorageClass(l.config, fsType)
|
||||
if l.Sc == nil {
|
||||
e2eskipper.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
|
||||
@ -190,9 +190,9 @@ func (t *volumeModeTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
}
|
||||
|
||||
// We register different tests depending on the drive
|
||||
isBlockSupported := dInfo.Capabilities[storageapi.CapBlock]
|
||||
isBlockSupported := dInfo.Capabilities[storageframework.CapBlock]
|
||||
switch pattern.VolType {
|
||||
case storageapi.PreprovisionedPV:
|
||||
case storageframework.PreprovisionedPV:
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
|
||||
ginkgo.It("should fail to create pod by failing to mount volume [Slow]", func() {
|
||||
manualInit()
|
||||
@ -253,7 +253,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
})
|
||||
}
|
||||
|
||||
case storageapi.DynamicPV:
|
||||
case storageframework.DynamicPV:
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
|
||||
ginkgo.It("should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]", func() {
|
||||
manualInit()
|
||||
@ -297,7 +297,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
skipTestIfBlockNotSupported(driver)
|
||||
init()
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
l.VolumeResource = *storageapi.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.VolumeResource = *storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
defer cleanup()
|
||||
|
||||
ginkgo.By("Creating pod")
|
||||
@ -354,7 +354,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageapi.TestDriver, pattern
|
||||
}
|
||||
init()
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
l.VolumeResource = *storageapi.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.VolumeResource = *storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
defer cleanup()
|
||||
|
||||
ginkgo.By("Creating pod")
|
||||
|
@ -34,22 +34,22 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
type volumesTestSuite struct {
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
tsInfo storageframework.TestSuiteInfo
|
||||
}
|
||||
|
||||
var _ storageapi.TestSuite = &volumesTestSuite{}
|
||||
var _ storageframework.TestSuite = &volumesTestSuite{}
|
||||
|
||||
// InitCustomVolumesTestSuite returns volumesTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomVolumesTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
func InitCustomVolumesTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
|
||||
return &volumesTestSuite{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
tsInfo: storageframework.TestSuiteInfo{
|
||||
Name: "volumes",
|
||||
TestPatterns: patterns,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
@ -61,65 +61,65 @@ func InitCustomVolumesTestSuite(patterns []storageapi.TestPattern) storageapi.Te
|
||||
|
||||
// InitVolumesTestSuite returns volumesTestSuite that implements TestSuite interface
|
||||
// using testsuite default patterns
|
||||
func InitVolumesTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
func InitVolumesTestSuite() storageframework.TestSuite {
|
||||
patterns := []storageframework.TestPattern{
|
||||
// Default fsType
|
||||
storageapi.DefaultFsInlineVolume,
|
||||
storageapi.DefaultFsPreprovisionedPV,
|
||||
storageapi.DefaultFsDynamicPV,
|
||||
storageframework.DefaultFsInlineVolume,
|
||||
storageframework.DefaultFsPreprovisionedPV,
|
||||
storageframework.DefaultFsDynamicPV,
|
||||
// ext3
|
||||
storageapi.Ext3InlineVolume,
|
||||
storageapi.Ext3PreprovisionedPV,
|
||||
storageapi.Ext3DynamicPV,
|
||||
storageframework.Ext3InlineVolume,
|
||||
storageframework.Ext3PreprovisionedPV,
|
||||
storageframework.Ext3DynamicPV,
|
||||
// ext4
|
||||
storageapi.Ext4InlineVolume,
|
||||
storageapi.Ext4PreprovisionedPV,
|
||||
storageapi.Ext4DynamicPV,
|
||||
storageframework.Ext4InlineVolume,
|
||||
storageframework.Ext4PreprovisionedPV,
|
||||
storageframework.Ext4DynamicPV,
|
||||
// xfs
|
||||
storageapi.XfsInlineVolume,
|
||||
storageapi.XfsPreprovisionedPV,
|
||||
storageapi.XfsDynamicPV,
|
||||
storageframework.XfsInlineVolume,
|
||||
storageframework.XfsPreprovisionedPV,
|
||||
storageframework.XfsDynamicPV,
|
||||
// ntfs
|
||||
storageapi.NtfsInlineVolume,
|
||||
storageapi.NtfsPreprovisionedPV,
|
||||
storageapi.NtfsDynamicPV,
|
||||
storageframework.NtfsInlineVolume,
|
||||
storageframework.NtfsPreprovisionedPV,
|
||||
storageframework.NtfsDynamicPV,
|
||||
// block volumes
|
||||
storageapi.BlockVolModePreprovisionedPV,
|
||||
storageapi.BlockVolModeDynamicPV,
|
||||
storageframework.BlockVolModePreprovisionedPV,
|
||||
storageframework.BlockVolModeDynamicPV,
|
||||
}
|
||||
return InitCustomVolumesTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (t *volumesTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
func (t *volumesTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
|
||||
return t.tsInfo
|
||||
}
|
||||
|
||||
func (t *volumesTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (t *volumesTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock {
|
||||
skipTestIfBlockNotSupported(driver)
|
||||
}
|
||||
}
|
||||
|
||||
func skipExecTest(driver storageapi.TestDriver) {
|
||||
func skipExecTest(driver storageframework.TestDriver) {
|
||||
dInfo := driver.GetDriverInfo()
|
||||
if !dInfo.Capabilities[storageapi.CapExec] {
|
||||
if !dInfo.Capabilities[storageframework.CapExec] {
|
||||
e2eskipper.Skipf("Driver %q does not support exec - skipping", dInfo.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func skipTestIfBlockNotSupported(driver storageapi.TestDriver) {
|
||||
func skipTestIfBlockNotSupported(driver storageframework.TestDriver) {
|
||||
dInfo := driver.GetDriverInfo()
|
||||
if !dInfo.Capabilities[storageapi.CapBlock] {
|
||||
if !dInfo.Capabilities[storageframework.CapBlock] {
|
||||
e2eskipper.Skipf("Driver %q does not provide raw block - skipping", dInfo.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *volumesTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
func (t *volumesTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
|
||||
type local struct {
|
||||
config *storageapi.PerTestConfig
|
||||
config *storageframework.PerTestConfig
|
||||
driverCleanup func()
|
||||
|
||||
resource *storageapi.VolumeResource
|
||||
resource *storageframework.VolumeResource
|
||||
|
||||
migrationCheck *migrationOpCheck
|
||||
}
|
||||
@ -128,7 +128,7 @@ func (t *volumesTestSuite) DefineTests(driver storageapi.TestDriver, pattern sto
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volume", storageapi.GetDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volume", storageframework.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
@ -137,7 +137,7 @@ func (t *volumesTestSuite) DefineTests(driver storageapi.TestDriver, pattern sto
|
||||
l.config, l.driverCleanup = driver.PrepareTest(f)
|
||||
l.migrationCheck = newMigrationOpCheck(f.ClientSet, dInfo.InTreePluginName)
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
l.resource = storageapi.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
if l.resource.VolSource == nil {
|
||||
e2eskipper.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
|
||||
}
|
||||
@ -159,7 +159,7 @@ func (t *volumesTestSuite) DefineTests(driver storageapi.TestDriver, pattern sto
|
||||
ginkgo.It("should store data", func() {
|
||||
init()
|
||||
defer func() {
|
||||
e2evolume.TestServerCleanup(f, storageapi.ConvertTestConfig(l.config))
|
||||
e2evolume.TestServerCleanup(f, storageframework.ConvertTestConfig(l.config))
|
||||
cleanup()
|
||||
}()
|
||||
|
||||
@ -173,9 +173,9 @@ func (t *volumesTestSuite) DefineTests(driver storageapi.TestDriver, pattern sto
|
||||
dInfo.Name, f.Namespace.Name),
|
||||
},
|
||||
}
|
||||
config := storageapi.ConvertTestConfig(l.config)
|
||||
config := storageframework.ConvertTestConfig(l.config)
|
||||
var fsGroup *int64
|
||||
if framework.NodeOSDistroIs("windows") && dInfo.Capabilities[storageapi.CapFsGroup] {
|
||||
if framework.NodeOSDistroIs("windows") && dInfo.Capabilities[storageframework.CapFsGroup] {
|
||||
fsGroupVal := int64(1234)
|
||||
fsGroup = &fsGroupVal
|
||||
}
|
||||
@ -184,7 +184,7 @@ func (t *volumesTestSuite) DefineTests(driver storageapi.TestDriver, pattern sto
|
||||
// and we don't have reliable way to detect volumes are unmounted or
|
||||
// not before starting the second pod.
|
||||
e2evolume.InjectContent(f, config, fsGroup, pattern.FsType, tests)
|
||||
if driver.GetDriverInfo().Capabilities[storageapi.CapPersistence] {
|
||||
if driver.GetDriverInfo().Capabilities[storageframework.CapPersistence] {
|
||||
e2evolume.TestVolumeClient(f, config, fsGroup, pattern.FsType, tests)
|
||||
} else {
|
||||
ginkgo.By("Skipping persistence check for non-persistent volume")
|
||||
@ -207,7 +207,7 @@ func testScriptInPod(
|
||||
f *framework.Framework,
|
||||
volumeType string,
|
||||
source *v1.VolumeSource,
|
||||
config *storageapi.PerTestConfig) {
|
||||
config *storageframework.PerTestConfig) {
|
||||
|
||||
const (
|
||||
volPath = "/vol1"
|
||||
|
@ -30,6 +30,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
|
@ -21,8 +21,12 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
@ -73,3 +77,66 @@ func WaitForSnapshotReady(c dynamic.Interface, ns string, snapshotName string, p
|
||||
|
||||
return fmt.Errorf("VolumeSnapshot %s is not ready within %v", snapshotName, timeout)
|
||||
}
|
||||
|
||||
// GetSnapshotContentFromSnapshot returns the VolumeSnapshotContent object Bound to a
|
||||
// given VolumeSnapshot
|
||||
func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured.Unstructured) *unstructured.Unstructured {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
err := WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
vs, err := dc.Resource(SnapshotGVR).Namespace(snapshot.GetNamespace()).Get(context.TODO(), snapshot.GetName(), metav1.GetOptions{})
|
||||
|
||||
snapshotStatus := vs.Object["status"].(map[string]interface{})
|
||||
snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string)
|
||||
framework.Logf("received snapshotStatus %v", snapshotStatus)
|
||||
framework.Logf("snapshotContentName %s", snapshotContentName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
vscontent, err := dc.Resource(SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
return vscontent
|
||||
|
||||
}
|
||||
|
||||
// DeleteAndWaitSnapshot deletes a VolumeSnapshot and waits for it to be deleted or until timeout occurs, whichever comes first
|
||||
func DeleteAndWaitSnapshot(dc dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error {
|
||||
var err error
|
||||
ginkgo.By("deleting the snapshot")
|
||||
err = dc.Resource(SnapshotGVR).Namespace(ns).Delete(context.TODO(), snapshotName, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
ginkgo.By("checking the Snapshot has been deleted")
|
||||
err = WaitForNamespacedGVRDeletion(dc, SnapshotGVR, ns, snapshotName, poll, timeout)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// GenerateSnapshotClassSpec constructs a new SnapshotClass instance spec
|
||||
// with a unique name that is based on namespace + suffix.
|
||||
func GenerateSnapshotClassSpec(
|
||||
snapshotter string,
|
||||
parameters map[string]string,
|
||||
ns string,
|
||||
suffix string,
|
||||
) *unstructured.Unstructured {
|
||||
snapshotClass := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": "VolumeSnapshotClass",
|
||||
"apiVersion": SnapshotAPIVersion,
|
||||
"metadata": map[string]interface{}{
|
||||
// Name must be unique, so let's base it on namespace name and use GenerateName
|
||||
// TODO(#96234): Remove unnecessary suffix.
|
||||
"name": names.SimpleNameGenerator.GenerateName(ns + "-" + suffix),
|
||||
},
|
||||
"driver": snapshotter,
|
||||
"parameters": parameters,
|
||||
"deletionPolicy": "Delete",
|
||||
},
|
||||
}
|
||||
|
||||
return snapshotClass
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user