Merge pull request #85540 from pohly/testsuites-api

e2e storage: public API for testsuites, support CSIInlineVolume type for generic resource
This commit is contained in:
Kubernetes Prow Robot 2019-12-05 17:15:19 -08:00 committed by GitHub
commit d47e1364c9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 392 additions and 299 deletions

View File

@ -76,6 +76,11 @@ var (
Name: "Inline-volume (default fs)",
VolType: InlineVolume,
}
// DefaultFsEphemeralVolume is TestPattern for "Ephemeral-volume (default fs)"
DefaultFsEphemeralVolume = TestPattern{
Name: "Ephemeral-volume (default fs)",
VolType: CSIInlineVolume,
}
// DefaultFsPreprovisionedPV is TestPattern for "Pre-provisioned PV (default fs)"
DefaultFsPreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (default fs)",
@ -95,6 +100,12 @@ var (
VolType: InlineVolume,
FsType: "ext3",
}
// Ext3EphemeralVolume is TestPattern for "Ephemeral-volume (ext3)"
Ext3EphemeralVolume = TestPattern{
Name: "Ephemeral-volume (ext3)",
VolType: InlineVolume,
FsType: "ext3",
}
// Ext3PreprovisionedPV is TestPattern for "Pre-provisioned PV (ext3)"
Ext3PreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (ext3)",
@ -116,6 +127,12 @@ var (
VolType: InlineVolume,
FsType: "ext4",
}
// Ext4EphemeralVolume is TestPattern for "Ephemeral-volume (ext4)"
Ext4EphemeralVolume = TestPattern{
Name: "Ephemeral-volume (ext4)",
VolType: CSIInlineVolume,
FsType: "ext4",
}
// Ext4PreprovisionedPV is TestPattern for "Pre-provisioned PV (ext4)"
Ext4PreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (ext4)",
@ -138,6 +155,13 @@ var (
FsType: "xfs",
FeatureTag: "[Slow]",
}
// XfsEphemeralVolume is TestPattern for "Ephemeral-volume (xfs)"
XfsEphemeralVolume = TestPattern{
Name: "Ephemeral-volume (xfs)",
VolType: CSIInlineVolume,
FsType: "xfs",
FeatureTag: "[Slow]",
}
// XfsPreprovisionedPV is TestPattern for "Pre-provisioned PV (xfs)"
XfsPreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (xfs)",
@ -162,6 +186,13 @@ var (
FsType: "ntfs",
FeatureTag: "[sig-windows]",
}
// NtfsEphemeralVolume is TestPattern for "Ephemeral-volume (ntfs)"
NtfsEphemeralVolume = TestPattern{
Name: "Ephemeral-volume (ntfs)",
VolType: CSIInlineVolume,
FsType: "ntfs",
FeatureTag: "[sig-windows]",
}
// NtfsPreprovisionedPV is TestPattern for "Pre-provisioned PV (ntfs)"
NtfsPreprovisionedPV = TestPattern{
Name: "Pre-provisioned PV (ntfs)",

View File

@ -76,7 +76,13 @@ filegroup(
go_test(
name = "go_default_test",
srcs = ["base_test.go"],
srcs = [
"api_test.go",
"base_test.go",
],
embed = [":go_default_library"],
deps = ["//test/e2e/framework/volume:go_default_library"],
deps = [
"//test/e2e/framework/volume:go_default_library",
"//test/e2e/storage/testpatterns:go_default_library",
],
)

View File

@ -0,0 +1,52 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package testsuites_test is used intentionally to ensure that the
// code below only has access to exported names. It doesn't have any
// actual test. That the custom volume test suite defined below
// compile is the test.
//
// It's needed because we don't have any in-tree volume test
// suite implementations that aren't in the "testuites" package itself.
// We don't need this for the "TestDriver" interface because there
// we have implementations in a separate package.
package testsuites_test
import (
"k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
)
type fakeSuite struct {
}
func (f *fakeSuite) GetTestSuiteInfo() testsuites.TestSuiteInfo {
return testsuites.TestSuiteInfo{
Name: "fake",
FeatureTag: "",
TestPatterns: []testpatterns.TestPattern{testpatterns.DefaultFsDynamicPV},
SupportedSizeRange: volume.SizeRange{Min: "1Mi", Max: "1Gi"},
}
}
func (f *fakeSuite) DefineTests(testsuites.TestDriver, testpatterns.TestPattern) {
}
func (f *fakeSuite) SkipRedundantSuite(testsuites.TestDriver, testpatterns.TestPattern) {
}
var _ testsuites.TestSuite = &fakeSuite{}

View File

@ -62,48 +62,42 @@ type opCounts map[string]int64
// TestSuite represents an interface for a set of tests which works with TestDriver
type TestSuite interface {
// getTestSuiteInfo returns the TestSuiteInfo for this TestSuite
getTestSuiteInfo() TestSuiteInfo
// defineTest defines tests of the testpattern for the driver.
// GetTestSuiteInfo returns the TestSuiteInfo for this TestSuite
GetTestSuiteInfo() TestSuiteInfo
// DefineTests defines tests of the testpattern for the driver.
// Called inside a Ginkgo context that reflects the current driver and test pattern,
// so the test suite can define tests directly with ginkgo.It.
defineTests(TestDriver, testpatterns.TestPattern)
// skipRedundantSuite will skip the test suite based on the given TestPattern and TestDriver
skipRedundantSuite(TestDriver, testpatterns.TestPattern)
DefineTests(TestDriver, testpatterns.TestPattern)
// SkipRedundantSuite will skip the test suite based on the given TestPattern and TestDriver
SkipRedundantSuite(TestDriver, testpatterns.TestPattern)
}
// TestSuiteInfo represents a set of parameters for TestSuite
type TestSuiteInfo struct {
name string // name of the TestSuite
featureTag string // featureTag for the TestSuite
testPatterns []testpatterns.TestPattern // Slice of TestPattern for the TestSuite
supportedSizeRange volume.SizeRange // Size range supported by the test suite
}
// TestResource represents an interface for resources that is used by TestSuite
type TestResource interface {
// cleanupResource cleans up the test resources created when setting up the resource
cleanupResource() error
Name string // name of the TestSuite
FeatureTag string // featureTag for the TestSuite
TestPatterns []testpatterns.TestPattern // Slice of TestPattern for the TestSuite
SupportedSizeRange volume.SizeRange // Size range supported by the test suite
}
func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string {
tsInfo := suite.getTestSuiteInfo()
return fmt.Sprintf("[Testpattern: %s]%s %s%s", pattern.Name, pattern.FeatureTag, tsInfo.name, tsInfo.featureTag)
tsInfo := suite.GetTestSuiteInfo()
return fmt.Sprintf("[Testpattern: %s]%s %s%s", pattern.Name, pattern.FeatureTag, tsInfo.Name, tsInfo.FeatureTag)
}
// DefineTestSuite defines tests for all testpatterns and all testSuites for a driver
func DefineTestSuite(driver TestDriver, tsInits []func() TestSuite) {
for _, testSuiteInit := range tsInits {
suite := testSuiteInit()
for _, pattern := range suite.getTestSuiteInfo().testPatterns {
for _, pattern := range suite.GetTestSuiteInfo().TestPatterns {
p := pattern
ginkgo.Context(getTestNameStr(suite, p), func() {
ginkgo.BeforeEach(func() {
// Skip unsupported tests to avoid unnecessary resource initialization
suite.skipRedundantSuite(driver, p)
suite.SkipRedundantSuite(driver, p)
skipUnsupportedTest(driver, p)
})
suite.defineTests(driver, p)
suite.DefineTests(driver, p)
})
}
}
@ -117,7 +111,7 @@ func DefineTestSuite(driver TestDriver, tsInits []func() TestSuite) {
// 3. Check if fsType is supported
// 4. Check with driver specific logic
//
// Test suites can also skip tests inside their own defineTests function or in
// Test suites can also skip tests inside their own DefineTests function or in
// individual tests.
func skipUnsupportedTest(driver TestDriver, pattern testpatterns.TestPattern) {
dInfo := driver.GetDriverInfo()
@ -170,54 +164,52 @@ func skipUnsupportedTest(driver TestDriver, pattern testpatterns.TestPattern) {
driver.SkipUnsupportedTest(pattern)
}
// genericVolumeTestResource is a generic implementation of TestResource that wil be able to
// VolumeResource is a generic implementation of TestResource that wil be able to
// be used in most of TestSuites.
// See volume_io.go or volumes.go in test/e2e/storage/testsuites/ for how to use this resource.
// Also, see subpath.go in the same directory for how to extend and use it.
type genericVolumeTestResource struct {
driver TestDriver
config *PerTestConfig
pattern testpatterns.TestPattern
volType string
volSource *v1.VolumeSource
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
sc *storagev1.StorageClass
type VolumeResource struct {
Config *PerTestConfig
Pattern testpatterns.TestPattern
VolType string
VolSource *v1.VolumeSource
Pvc *v1.PersistentVolumeClaim
Pv *v1.PersistentVolume
Sc *storagev1.StorageClass
volume TestVolume
Volume TestVolume
}
var _ TestResource = &genericVolumeTestResource{}
func createGenericVolumeTestResource(driver TestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, testVolumeSizeRange volume.SizeRange) *genericVolumeTestResource {
r := genericVolumeTestResource{
driver: driver,
config: config,
pattern: pattern,
// CreateVolumeResource constructs a VolumeResource for the current test. It knows how to deal with
// different test pattern volume types.
func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, testVolumeSizeRange volume.SizeRange) *VolumeResource {
r := VolumeResource{
Config: config,
Pattern: pattern,
}
dInfo := driver.GetDriverInfo()
f := config.Framework
cs := f.ClientSet
// Create volume for pre-provisioned volume tests
r.volume = CreateVolume(driver, config, pattern.VolType)
r.Volume = CreateVolume(driver, config, pattern.VolType)
switch pattern.VolType {
case testpatterns.InlineVolume:
framework.Logf("Creating resource for inline volume")
if iDriver, ok := driver.(InlineVolumeTestDriver); ok {
r.volSource = iDriver.GetVolumeSource(false, pattern.FsType, r.volume)
r.volType = dInfo.Name
r.VolSource = iDriver.GetVolumeSource(false, pattern.FsType, r.Volume)
r.VolType = dInfo.Name
}
case testpatterns.PreprovisionedPV:
framework.Logf("Creating resource for pre-provisioned PV")
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, pattern.FsType, r.volume)
pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, pattern.FsType, r.Volume)
if pvSource != nil {
r.pv, r.pvc = createPVCPV(f, dInfo.Name, pvSource, volumeNodeAffinity, pattern.VolMode, dInfo.RequiredAccessModes)
r.volSource = createVolumeSource(r.pvc.Name, false /* readOnly */)
r.Pv, r.Pvc = createPVCPV(f, dInfo.Name, pvSource, volumeNodeAffinity, pattern.VolMode, dInfo.RequiredAccessModes)
r.VolSource = createVolumeSource(r.Pvc.Name, false /* readOnly */)
}
r.volType = fmt.Sprintf("%s-preprovisionedPV", dInfo.Name)
r.VolType = fmt.Sprintf("%s-preprovisionedPV", dInfo.Name)
}
case testpatterns.DynamicPV:
framework.Logf("Creating resource for dynamic PV")
@ -227,32 +219,44 @@ func createGenericVolumeTestResource(driver TestDriver, config *PerTestConfig, p
claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
framework.Logf("Using claimSize:%s, test suite supported size:%v, driver(%s) supported size:%v ", claimSize, testVolumeSizeRange, dDriver.GetDriverInfo().Name, testVolumeSizeRange)
r.sc = dDriver.GetDynamicProvisionStorageClass(r.config, pattern.FsType)
r.Sc = dDriver.GetDynamicProvisionStorageClass(r.Config, pattern.FsType)
if pattern.BindingMode != "" {
r.sc.VolumeBindingMode = &pattern.BindingMode
r.Sc.VolumeBindingMode = &pattern.BindingMode
}
if pattern.AllowExpansion != false {
r.sc.AllowVolumeExpansion = &pattern.AllowExpansion
r.Sc.AllowVolumeExpansion = &pattern.AllowExpansion
}
ginkgo.By("creating a StorageClass " + r.sc.Name)
ginkgo.By("creating a StorageClass " + r.Sc.Name)
r.sc, err = cs.StorageV1().StorageClasses().Create(r.sc)
r.Sc, err = cs.StorageV1().StorageClasses().Create(r.Sc)
framework.ExpectNoError(err)
if r.sc != nil {
r.pv, r.pvc = createPVCPVFromDynamicProvisionSC(
f, dInfo.Name, claimSize, r.sc, pattern.VolMode, dInfo.RequiredAccessModes)
r.volSource = createVolumeSource(r.pvc.Name, false /* readOnly */)
if r.Sc != nil {
r.Pv, r.Pvc = createPVCPVFromDynamicProvisionSC(
f, dInfo.Name, claimSize, r.Sc, pattern.VolMode, dInfo.RequiredAccessModes)
r.VolSource = createVolumeSource(r.Pvc.Name, false /* readOnly */)
}
r.VolType = fmt.Sprintf("%s-dynamicPV", dInfo.Name)
}
case testpatterns.CSIInlineVolume:
framework.Logf("Creating resource for CSI ephemeral inline volume")
if eDriver, ok := driver.(EphemeralTestDriver); ok {
r.VolType = fmt.Sprintf("%s-ephemeral", dInfo.Name)
attributes, _, _ := eDriver.GetVolume(config, 0)
r.VolSource = &v1.VolumeSource{
CSI: &v1.CSIVolumeSource{
Driver: eDriver.GetCSIDriverName(config),
VolumeAttributes: attributes,
},
}
r.volType = fmt.Sprintf("%s-dynamicPV", dInfo.Name)
}
default:
framework.Failf("genericVolumeTestResource doesn't support: %s", pattern.VolType)
framework.Failf("VolumeResource doesn't support: %s", pattern.VolType)
}
if r.volSource == nil {
if r.VolSource == nil {
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
}
@ -269,52 +273,52 @@ func createVolumeSource(pvcName string, readOnly bool) *v1.VolumeSource {
}
// cleanupResource cleans up genericVolumeTestResource
func (r *genericVolumeTestResource) cleanupResource() error {
f := r.config.Framework
// CleanupResource cleans up VolumeResource
func (r *VolumeResource) CleanupResource() error {
f := r.Config.Framework
var cleanUpErrs []error
if r.pvc != nil || r.pv != nil {
switch r.pattern.VolType {
if r.Pvc != nil || r.Pv != nil {
switch r.Pattern.VolType {
case testpatterns.PreprovisionedPV:
ginkgo.By("Deleting pv and pvc")
if errs := e2epv.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.pv, r.pvc); len(errs) != 0 {
if errs := e2epv.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.Pv, r.Pvc); len(errs) != 0 {
framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs))
}
case testpatterns.DynamicPV:
ginkgo.By("Deleting pvc")
// We only delete the PVC so that PV (and disk) can be cleaned up by dynamic provisioner
if r.pv != nil && r.pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete {
if r.Pv != nil && r.Pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete {
framework.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isnt %v",
r.pv.Name, v1.PersistentVolumeReclaimDelete)
r.Pv.Name, v1.PersistentVolumeReclaimDelete)
}
if r.pvc != nil {
err := e2epv.DeletePersistentVolumeClaim(f.ClientSet, r.pvc.Name, f.Namespace.Name)
if r.Pvc != nil {
err := e2epv.DeletePersistentVolumeClaim(f.ClientSet, r.Pvc.Name, f.Namespace.Name)
if err != nil {
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to delete PVC %v", r.pvc.Name))
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to delete PVC %v", r.Pvc.Name))
}
if r.pv != nil {
err = framework.WaitForPersistentVolumeDeleted(f.ClientSet, r.pv.Name, 5*time.Second, 5*time.Minute)
if r.Pv != nil {
err = framework.WaitForPersistentVolumeDeleted(f.ClientSet, r.Pv.Name, 5*time.Second, 5*time.Minute)
if err != nil {
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err,
"Persistent Volume %v not deleted by dynamic provisioner", r.pv.Name))
"Persistent Volume %v not deleted by dynamic provisioner", r.Pv.Name))
}
}
}
default:
framework.Failf("Found PVC (%v) or PV (%v) but not running Preprovisioned or Dynamic test pattern", r.pvc, r.pv)
framework.Failf("Found PVC (%v) or PV (%v) but not running Preprovisioned or Dynamic test pattern", r.Pvc, r.Pv)
}
}
if r.sc != nil {
if r.Sc != nil {
ginkgo.By("Deleting sc")
if err := deleteStorageClass(f.ClientSet, r.sc.Name); err != nil {
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to delete StorageClass %v", r.sc.Name))
if err := deleteStorageClass(f.ClientSet, r.Sc.Name); err != nil {
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to delete StorageClass %v", r.Sc.Name))
}
}
// Cleanup volume for pre-provisioned volume tests
if r.volume != nil {
if err := tryFunc(r.volume.DeleteVolume); err != nil {
if r.Volume != nil {
if err := tryFunc(r.Volume.DeleteVolume); err != nil {
cleanUpErrs = append(cleanUpErrs, errors.Wrap(err, "Failed to delete Volume"))
}
}

View File

@ -39,9 +39,9 @@ var _ TestSuite = &disruptiveTestSuite{}
func InitDisruptiveTestSuite() TestSuite {
return &disruptiveTestSuite{
tsInfo: TestSuiteInfo{
name: "disruptive",
featureTag: "[Disruptive]",
testPatterns: []testpatterns.TestPattern{
Name: "disruptive",
FeatureTag: "[Disruptive]",
TestPatterns: []testpatterns.TestPattern{
// FSVolMode is already covered in subpath testsuite
testpatterns.DefaultFsInlineVolume,
testpatterns.FsVolModePreprovisionedPV,
@ -52,15 +52,15 @@ func InitDisruptiveTestSuite() TestSuite {
},
}
}
func (s *disruptiveTestSuite) getTestSuiteInfo() TestSuiteInfo {
func (s *disruptiveTestSuite) GetTestSuiteInfo() TestSuiteInfo {
return s.tsInfo
}
func (s *disruptiveTestSuite) skipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
func (s *disruptiveTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
skipVolTypePatterns(pattern, driver, testpatterns.NewVolTypeMap(testpatterns.PreprovisionedPV))
}
func (s *disruptiveTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
func (s *disruptiveTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
type local struct {
config *PerTestConfig
driverCleanup func()
@ -68,8 +68,8 @@ func (s *disruptiveTestSuite) defineTests(driver TestDriver, pattern testpattern
cs clientset.Interface
ns *v1.Namespace
// genericVolumeTestResource contains pv, pvc, sc, etc., owns cleaning that up
resource *genericVolumeTestResource
// VolumeResource contains pv, pvc, sc, etc., owns cleaning that up
resource *VolumeResource
pod *v1.Pod
}
var l local
@ -94,8 +94,8 @@ func (s *disruptiveTestSuite) defineTests(driver TestDriver, pattern testpattern
framework.Skipf("Driver %s doesn't support %v -- skipping", driver.GetDriverInfo().Name, pattern.VolMode)
}
testVolumeSizeRange := s.getTestSuiteInfo().supportedSizeRange
l.resource = createGenericVolumeTestResource(driver, l.config, pattern, testVolumeSizeRange)
testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange
l.resource = CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
}
cleanup := func() {
@ -108,7 +108,7 @@ func (s *disruptiveTestSuite) defineTests(driver TestDriver, pattern testpattern
}
if l.resource != nil {
err := l.resource.cleanupResource()
err := l.resource.CleanupResource()
errs = append(errs, err)
l.resource = nil
}
@ -154,9 +154,9 @@ func (s *disruptiveTestSuite) defineTests(driver TestDriver, pattern testpattern
var pvcs []*v1.PersistentVolumeClaim
var inlineSources []*v1.VolumeSource
if pattern.VolType == testpatterns.InlineVolume {
inlineSources = append(inlineSources, l.resource.volSource)
inlineSources = append(inlineSources, l.resource.VolSource)
} else {
pvcs = append(pvcs, l.resource.pvc)
pvcs = append(pvcs, l.resource.Pvc)
}
ginkgo.By("Creating a pod with pvc")
l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, pvcs, inlineSources, false, "", false, false, e2epv.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)

View File

@ -37,7 +37,7 @@ func GetDriverNameWithFeatureTags(driver TestDriver) string {
return fmt.Sprintf("[Driver: %s]%s", dInfo.Name, dInfo.FeatureTag)
}
// CreateVolume creates volume for test unless dynamicPV test
// CreateVolume creates volume for test unless dynamicPV or CSI ephemeral inline volume test
func CreateVolume(driver TestDriver, config *PerTestConfig, volType testpatterns.TestVolType) TestVolume {
switch volType {
case testpatterns.InlineVolume:
@ -46,6 +46,8 @@ func CreateVolume(driver TestDriver, config *PerTestConfig, volType testpatterns
if pDriver, ok := driver.(PreprovisionedVolumeTestDriver); ok {
return pDriver.CreateVolume(config, volType)
}
case testpatterns.CSIInlineVolume:
fallthrough
case testpatterns.DynamicPV:
// No need to create volume
default:

View File

@ -45,8 +45,8 @@ var _ TestSuite = &ephemeralTestSuite{}
func InitEphemeralTestSuite() TestSuite {
return &ephemeralTestSuite{
tsInfo: TestSuiteInfo{
name: "ephemeral",
testPatterns: []testpatterns.TestPattern{
Name: "ephemeral",
TestPatterns: []testpatterns.TestPattern{
{
Name: "inline ephemeral CSI volume",
VolType: testpatterns.CSIInlineVolume,
@ -56,14 +56,14 @@ func InitEphemeralTestSuite() TestSuite {
}
}
func (p *ephemeralTestSuite) getTestSuiteInfo() TestSuiteInfo {
func (p *ephemeralTestSuite) GetTestSuiteInfo() TestSuiteInfo {
return p.tsInfo
}
func (p *ephemeralTestSuite) skipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
func (p *ephemeralTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
}
func (p *ephemeralTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
type local struct {
config *PerTestConfig
driverCleanup func()

View File

@ -45,29 +45,29 @@ var _ TestSuite = &multiVolumeTestSuite{}
func InitMultiVolumeTestSuite() TestSuite {
return &multiVolumeTestSuite{
tsInfo: TestSuiteInfo{
name: "multiVolume [Slow]",
testPatterns: []testpatterns.TestPattern{
Name: "multiVolume [Slow]",
TestPatterns: []testpatterns.TestPattern{
testpatterns.FsVolModePreprovisionedPV,
testpatterns.FsVolModeDynamicPV,
testpatterns.BlockVolModePreprovisionedPV,
testpatterns.BlockVolModeDynamicPV,
},
supportedSizeRange: volume.SizeRange{
SupportedSizeRange: volume.SizeRange{
Min: "1Mi",
},
},
}
}
func (t *multiVolumeTestSuite) getTestSuiteInfo() TestSuiteInfo {
func (t *multiVolumeTestSuite) GetTestSuiteInfo() TestSuiteInfo {
return t.tsInfo
}
func (t *multiVolumeTestSuite) skipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
func (t *multiVolumeTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
skipVolTypePatterns(pattern, driver, testpatterns.NewVolTypeMap(testpatterns.PreprovisionedPV))
}
func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
type local struct {
config *PerTestConfig
driverCleanup func()
@ -75,7 +75,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
cs clientset.Interface
ns *v1.Namespace
driver TestDriver
resources []*genericVolumeTestResource
resources []*VolumeResource
intreeOps opCounts
migratedOps opCounts
@ -112,7 +112,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
cleanup := func() {
var errs []error
for _, resource := range l.resources {
errs = append(errs, resource.cleanupResource())
errs = append(errs, resource.CleanupResource())
}
errs = append(errs, tryFunc(l.driverCleanup))
@ -141,10 +141,10 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
numVols := 2
for i := 0; i < numVols; i++ {
testVolumeSizeRange := t.getTestSuiteInfo().supportedSizeRange
resource := createGenericVolumeTestResource(driver, l.config, pattern, testVolumeSizeRange)
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
resource := CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
l.resources = append(l.resources, resource)
pvcs = append(pvcs, resource.pvc)
pvcs = append(pvcs, resource.Pvc)
}
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
@ -184,10 +184,10 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
numVols := 2
for i := 0; i < numVols; i++ {
testVolumeSizeRange := t.getTestSuiteInfo().supportedSizeRange
resource := createGenericVolumeTestResource(driver, l.config, pattern, testVolumeSizeRange)
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
resource := CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
l.resources = append(l.resources, resource)
pvcs = append(pvcs, resource.pvc)
pvcs = append(pvcs, resource.Pvc)
}
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
@ -223,10 +223,10 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
// 1st volume should be block and set filesystem for 2nd and later volumes
curPattern.VolMode = v1.PersistentVolumeFilesystem
}
testVolumeSizeRange := t.getTestSuiteInfo().supportedSizeRange
resource := createGenericVolumeTestResource(driver, l.config, curPattern, testVolumeSizeRange)
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
resource := CreateVolumeResource(driver, l.config, curPattern, testVolumeSizeRange)
l.resources = append(l.resources, resource)
pvcs = append(pvcs, resource.pvc)
pvcs = append(pvcs, resource.Pvc)
}
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
@ -275,10 +275,10 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
// 1st volume should be block and set filesystem for 2nd and later volumes
curPattern.VolMode = v1.PersistentVolumeFilesystem
}
testVolumeSizeRange := t.getTestSuiteInfo().supportedSizeRange
resource := createGenericVolumeTestResource(driver, l.config, curPattern, testVolumeSizeRange)
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
resource := CreateVolumeResource(driver, l.config, curPattern, testVolumeSizeRange)
l.resources = append(l.resources, resource)
pvcs = append(pvcs, resource.pvc)
pvcs = append(pvcs, resource.Pvc)
}
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
@ -301,13 +301,13 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
}
// Create volume
testVolumeSizeRange := t.getTestSuiteInfo().supportedSizeRange
resource := createGenericVolumeTestResource(l.driver, l.config, pattern, testVolumeSizeRange)
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
resource := CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
l.resources = append(l.resources, resource)
// Test access to the volume from pods on different node
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
e2epod.NodeSelection{Name: l.config.ClientNodeName}, resource.pvc, numPods, true /* sameNode */)
e2epod.NodeSelection{Name: l.config.ClientNodeName}, resource.Pvc, numPods, true /* sameNode */)
})
// This tests below configuration:
@ -336,13 +336,13 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
}
// Create volume
testVolumeSizeRange := t.getTestSuiteInfo().supportedSizeRange
resource := createGenericVolumeTestResource(l.driver, l.config, pattern, testVolumeSizeRange)
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
resource := CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
l.resources = append(l.resources, resource)
// Test access to the volume from pods on different node
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
e2epod.NodeSelection{Name: l.config.ClientNodeName}, resource.pvc, numPods, false /* sameNode */)
e2epod.NodeSelection{Name: l.config.ClientNodeName}, resource.Pvc, numPods, false /* sameNode */)
})
}

View File

@ -69,26 +69,26 @@ var _ TestSuite = &provisioningTestSuite{}
func InitProvisioningTestSuite() TestSuite {
return &provisioningTestSuite{
tsInfo: TestSuiteInfo{
name: "provisioning",
testPatterns: []testpatterns.TestPattern{
Name: "provisioning",
TestPatterns: []testpatterns.TestPattern{
testpatterns.DefaultFsDynamicPV,
testpatterns.NtfsDynamicPV,
},
supportedSizeRange: volume.SizeRange{
SupportedSizeRange: volume.SizeRange{
Min: "1Mi",
},
},
}
}
func (p *provisioningTestSuite) getTestSuiteInfo() TestSuiteInfo {
func (p *provisioningTestSuite) GetTestSuiteInfo() TestSuiteInfo {
return p.tsInfo
}
func (p *provisioningTestSuite) skipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
func (p *provisioningTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
}
func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
type local struct {
config *PerTestConfig
driverCleanup func()
@ -111,7 +111,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
ginkgo.BeforeEach(func() {
// Check preconditions.
if pattern.VolType != testpatterns.DynamicPV {
framework.Skipf("Suite %q does not support %v", p.tsInfo.name, pattern.VolType)
framework.Skipf("Suite %q does not support %v", p.tsInfo.Name, pattern.VolType)
}
ok := false
dDriver, ok = driver.(DynamicPVTestDriver)
@ -133,7 +133,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
l.config, l.driverCleanup = driver.PrepareTest(f)
l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName)
l.cs = l.config.Framework.ClientSet
testVolumeSizeRange := p.getTestSuiteInfo().supportedSizeRange
testVolumeSizeRange := p.GetTestSuiteInfo().SupportedSizeRange
driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)

View File

@ -56,25 +56,25 @@ var _ TestSuite = &snapshottableTestSuite{}
func InitSnapshottableTestSuite() TestSuite {
return &snapshottableTestSuite{
tsInfo: TestSuiteInfo{
name: "snapshottable",
testPatterns: []testpatterns.TestPattern{
Name: "snapshottable",
TestPatterns: []testpatterns.TestPattern{
testpatterns.DynamicSnapshot,
},
supportedSizeRange: volume.SizeRange{
SupportedSizeRange: volume.SizeRange{
Min: "1Mi",
},
},
}
}
func (s *snapshottableTestSuite) getTestSuiteInfo() TestSuiteInfo {
func (s *snapshottableTestSuite) GetTestSuiteInfo() TestSuiteInfo {
return s.tsInfo
}
func (s *snapshottableTestSuite) skipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
func (s *snapshottableTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
}
func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
var (
sDriver SnapshottableTestDriver
dDriver DynamicPVTestDriver
@ -117,7 +117,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
if class == nil {
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", driver.GetDriverInfo().Name)
}
testVolumeSizeRange := s.getTestSuiteInfo().supportedSizeRange
testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange
driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)

View File

@ -61,37 +61,37 @@ var _ TestSuite = &subPathTestSuite{}
func InitSubPathTestSuite() TestSuite {
return &subPathTestSuite{
tsInfo: TestSuiteInfo{
name: "subPath",
testPatterns: []testpatterns.TestPattern{
Name: "subPath",
TestPatterns: []testpatterns.TestPattern{
testpatterns.DefaultFsInlineVolume,
testpatterns.DefaultFsPreprovisionedPV,
testpatterns.DefaultFsDynamicPV,
testpatterns.NtfsDynamicPV,
},
supportedSizeRange: volume.SizeRange{
SupportedSizeRange: volume.SizeRange{
Min: "1Mi",
},
},
}
}
func (s *subPathTestSuite) getTestSuiteInfo() TestSuiteInfo {
func (s *subPathTestSuite) GetTestSuiteInfo() TestSuiteInfo {
return s.tsInfo
}
func (s *subPathTestSuite) skipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
func (s *subPathTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
skipVolTypePatterns(pattern, driver, testpatterns.NewVolTypeMap(
testpatterns.PreprovisionedPV,
testpatterns.InlineVolume))
}
func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
type local struct {
config *PerTestConfig
driverCleanup func()
hostExec utils.HostExec
resource *genericVolumeTestResource
resource *VolumeResource
roVolSource *v1.VolumeSource
pod *v1.Pod
formatPod *v1.Pod
@ -118,8 +118,8 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
// Now do the more expensive test initialization.
l.config, l.driverCleanup = driver.PrepareTest(f)
l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, driver.GetDriverInfo().InTreePluginName)
testVolumeSizeRange := s.getTestSuiteInfo().supportedSizeRange
l.resource = createGenericVolumeTestResource(driver, l.config, pattern, testVolumeSizeRange)
testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange
l.resource = CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
l.hostExec = utils.NewHostExec(f)
// Setup subPath test dependent resource
@ -127,19 +127,19 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
switch volType {
case testpatterns.InlineVolume:
if iDriver, ok := driver.(InlineVolumeTestDriver); ok {
l.roVolSource = iDriver.GetVolumeSource(true, pattern.FsType, l.resource.volume)
l.roVolSource = iDriver.GetVolumeSource(true, pattern.FsType, l.resource.Volume)
}
case testpatterns.PreprovisionedPV:
l.roVolSource = &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: l.resource.pvc.Name,
ClaimName: l.resource.Pvc.Name,
ReadOnly: true,
},
}
case testpatterns.DynamicPV:
l.roVolSource = &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: l.resource.pvc.Name,
ClaimName: l.resource.Pvc.Name,
ReadOnly: true,
},
}
@ -148,11 +148,11 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
}
subPath := f.Namespace.Name
l.pod = SubpathTestPod(f, subPath, l.resource.volType, l.resource.volSource, true)
l.pod = SubpathTestPod(f, subPath, l.resource.VolType, l.resource.VolSource, true)
l.pod.Spec.NodeName = l.config.ClientNodeName
l.pod.Spec.NodeSelector = l.config.ClientNodeSelector
l.formatPod = volumeFormatPod(f, l.resource.volSource)
l.formatPod = volumeFormatPod(f, l.resource.VolSource)
l.formatPod.Spec.NodeName = l.config.ClientNodeName
l.formatPod.Spec.NodeSelector = l.config.ClientNodeSelector
@ -171,7 +171,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
}
if l.resource != nil {
errs = append(errs, l.resource.cleanupResource())
errs = append(errs, l.resource.CleanupResource())
l.resource = nil
}
@ -348,9 +348,9 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
init()
defer cleanup()
if strings.HasPrefix(l.resource.volType, "hostPath") || strings.HasPrefix(l.resource.volType, "csi-hostpath") {
if strings.HasPrefix(l.resource.VolType, "hostPath") || strings.HasPrefix(l.resource.VolType, "csi-hostpath") {
// TODO: This skip should be removed once #61446 is fixed
framework.Skipf("%s volume type does not support reconstruction, skipping", l.resource.volType)
framework.Skipf("%s volume type does not support reconstruction, skipping", l.resource.VolType)
}
testSubpathReconstruction(f, l.hostExec, l.pod, true)
@ -390,7 +390,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
init()
defer cleanup()
if l.roVolSource == nil {
framework.Skipf("Volume type %v doesn't support readOnly source", l.resource.volType)
framework.Skipf("Volume type %v doesn't support readOnly source", l.resource.VolType)
}
origpod := l.pod.DeepCopy()
@ -418,7 +418,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
init()
defer cleanup()
if l.roVolSource == nil {
framework.Skipf("Volume type %v doesn't support readOnly source", l.resource.volType)
framework.Skipf("Volume type %v doesn't support readOnly source", l.resource.VolType)
}
// Format the volume while it's writable

View File

@ -46,7 +46,7 @@ type topologyTest struct {
intreeOps opCounts
migratedOps opCounts
resource genericVolumeTestResource
resource VolumeResource
pod *v1.Pod
allTopologies []topology
}
@ -59,8 +59,8 @@ var _ TestSuite = &topologyTestSuite{}
func InitTopologyTestSuite() TestSuite {
return &topologyTestSuite{
tsInfo: TestSuiteInfo{
name: "topology",
testPatterns: []testpatterns.TestPattern{
Name: "topology",
TestPatterns: []testpatterns.TestPattern{
testpatterns.TopologyImmediate,
testpatterns.TopologyDelayed,
},
@ -68,14 +68,14 @@ func InitTopologyTestSuite() TestSuite {
}
}
func (t *topologyTestSuite) getTestSuiteInfo() TestSuiteInfo {
func (t *topologyTestSuite) GetTestSuiteInfo() TestSuiteInfo {
return t.tsInfo
}
func (t *topologyTestSuite) skipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
func (t *topologyTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
}
func (t *topologyTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
func (t *topologyTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
var (
dInfo = driver.GetDriverInfo()
dDriver DynamicPVTestDriver
@ -111,10 +111,9 @@ func (t *topologyTestSuite) defineTests(driver TestDriver, pattern testpatterns.
// Now do the more expensive test initialization.
l.config, l.driverCleanup = driver.PrepareTest(f)
l.resource = genericVolumeTestResource{
driver: driver,
config: l.config,
pattern: pattern,
l.resource = VolumeResource{
Config: l.config,
Pattern: pattern,
}
// After driver is installed, check driver topologies on nodes
@ -135,17 +134,17 @@ func (t *topologyTestSuite) defineTests(driver TestDriver, pattern testpatterns.
framework.Skipf("Not enough topologies in cluster -- skipping")
}
l.resource.sc = dDriver.GetDynamicProvisionStorageClass(l.config, pattern.FsType)
framework.ExpectNotEqual(l.resource.sc, nil, "driver failed to provide a StorageClass")
l.resource.sc.VolumeBindingMode = &pattern.BindingMode
l.resource.Sc = dDriver.GetDynamicProvisionStorageClass(l.config, pattern.FsType)
framework.ExpectNotEqual(l.resource.Sc, nil, "driver failed to provide a StorageClass")
l.resource.Sc.VolumeBindingMode = &pattern.BindingMode
testVolumeSizeRange := t.getTestSuiteInfo().supportedSizeRange
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
l.resource.pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
l.resource.Pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: claimSize,
StorageClassName: &(l.resource.sc.Name),
StorageClassName: &(l.resource.Sc.Name),
}, l.config.Framework.Namespace.Name)
l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName)
@ -153,7 +152,7 @@ func (t *topologyTestSuite) defineTests(driver TestDriver, pattern testpatterns.
}
cleanup := func(l topologyTest) {
t.cleanupResources(cs, &l)
t.CleanupResources(cs, &l)
err := tryFunc(l.driverCleanup)
l.driverCleanup = nil
framework.ExpectNoError(err, "while cleaning up driver")
@ -172,7 +171,7 @@ func (t *topologyTestSuite) defineTests(driver TestDriver, pattern testpatterns.
if len(l.allTopologies) > dInfo.NumAllowedTopologies {
excludedIndex = rand.Intn(len(l.allTopologies))
}
allowedTopologies := t.setAllowedTopologies(l.resource.sc, l.allTopologies, excludedIndex)
allowedTopologies := t.setAllowedTopologies(l.resource.Sc, l.allTopologies, excludedIndex)
t.createResources(cs, &l, nil)
@ -201,7 +200,7 @@ func (t *topologyTestSuite) defineTests(driver TestDriver, pattern testpatterns.
// Exclude one topology
excludedIndex := rand.Intn(len(l.allTopologies))
t.setAllowedTopologies(l.resource.sc, l.allTopologies, excludedIndex)
t.setAllowedTopologies(l.resource.Sc, l.allTopologies, excludedIndex)
// Set pod nodeSelector to the excluded topology
exprs := []v1.NodeSelectorRequirement{}
@ -322,19 +321,19 @@ func (t *topologyTestSuite) verifyNodeTopology(node *v1.Node, allowedTopos []top
func (t *topologyTestSuite) createResources(cs clientset.Interface, l *topologyTest, affinity *v1.Affinity) {
var err error
framework.Logf("Creating storage class object and pvc object for driver - sc: %v, pvc: %v", l.resource.sc, l.resource.pvc)
framework.Logf("Creating storage class object and pvc object for driver - sc: %v, pvc: %v", l.resource.Sc, l.resource.Pvc)
ginkgo.By("Creating sc")
l.resource.sc, err = cs.StorageV1().StorageClasses().Create(l.resource.sc)
l.resource.Sc, err = cs.StorageV1().StorageClasses().Create(l.resource.Sc)
framework.ExpectNoError(err)
ginkgo.By("Creating pvc")
l.resource.pvc, err = cs.CoreV1().PersistentVolumeClaims(l.resource.pvc.Namespace).Create(l.resource.pvc)
l.resource.Pvc, err = cs.CoreV1().PersistentVolumeClaims(l.resource.Pvc.Namespace).Create(l.resource.Pvc)
framework.ExpectNoError(err)
ginkgo.By("Creating pod")
l.pod = e2epod.MakeSecPod(l.config.Framework.Namespace.Name,
[]*v1.PersistentVolumeClaim{l.resource.pvc},
[]*v1.PersistentVolumeClaim{l.resource.Pvc},
nil,
false,
"",
@ -347,13 +346,13 @@ func (t *topologyTestSuite) createResources(cs clientset.Interface, l *topologyT
framework.ExpectNoError(err)
}
func (t *topologyTestSuite) cleanupResources(cs clientset.Interface, l *topologyTest) {
func (t *topologyTestSuite) CleanupResources(cs clientset.Interface, l *topologyTest) {
if l.pod != nil {
ginkgo.By("Deleting pod")
err := e2epod.DeletePodWithWait(cs, l.pod)
framework.ExpectNoError(err, "while deleting pod")
}
err := l.resource.cleanupResource()
err := l.resource.CleanupResource()
framework.ExpectNoError(err, "while clean up resource")
}

View File

@ -54,33 +54,33 @@ var _ TestSuite = &volumeExpandTestSuite{}
func InitVolumeExpandTestSuite() TestSuite {
return &volumeExpandTestSuite{
tsInfo: TestSuiteInfo{
name: "volume-expand",
testPatterns: []testpatterns.TestPattern{
Name: "volume-expand",
TestPatterns: []testpatterns.TestPattern{
testpatterns.DefaultFsDynamicPV,
testpatterns.BlockVolModeDynamicPV,
testpatterns.DefaultFsDynamicPVAllowExpansion,
testpatterns.BlockVolModeDynamicPVAllowExpansion,
},
supportedSizeRange: volume.SizeRange{
SupportedSizeRange: volume.SizeRange{
Min: "1Mi",
},
},
}
}
func (v *volumeExpandTestSuite) getTestSuiteInfo() TestSuiteInfo {
func (v *volumeExpandTestSuite) GetTestSuiteInfo() TestSuiteInfo {
return v.tsInfo
}
func (v *volumeExpandTestSuite) skipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
func (v *volumeExpandTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
}
func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
type local struct {
config *PerTestConfig
driverCleanup func()
resource *genericVolumeTestResource
resource *VolumeResource
pod *v1.Pod
pod2 *v1.Pod
@ -111,8 +111,8 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
// Now do the more expensive test initialization.
l.config, l.driverCleanup = driver.PrepareTest(f)
l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, driver.GetDriverInfo().InTreePluginName)
testVolumeSizeRange := v.getTestSuiteInfo().supportedSizeRange
l.resource = createGenericVolumeTestResource(driver, l.config, pattern, testVolumeSizeRange)
testVolumeSizeRange := v.GetTestSuiteInfo().SupportedSizeRange
l.resource = CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
}
cleanup := func() {
@ -132,7 +132,7 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
}
if l.resource != nil {
errs = append(errs, l.resource.cleanupResource())
errs = append(errs, l.resource.CleanupResource())
l.resource = nil
}
@ -148,13 +148,13 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
defer cleanup()
var err error
gomega.Expect(l.resource.sc.AllowVolumeExpansion).To(gomega.BeNil())
gomega.Expect(l.resource.Sc.AllowVolumeExpansion).To(gomega.BeNil())
ginkgo.By("Expanding non-expandable pvc")
currentPvcSize := l.resource.pvc.Spec.Resources.Requests[v1.ResourceStorage]
currentPvcSize := l.resource.Pvc.Spec.Resources.Requests[v1.ResourceStorage]
newSize := currentPvcSize.DeepCopy()
newSize.Add(resource.MustParse("1Gi"))
framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize)
_, err = ExpandPVCSize(l.resource.pvc, newSize, f.ClientSet)
_, err = ExpandPVCSize(l.resource.Pvc, newSize, f.ClientSet)
framework.ExpectError(err, "While updating non-expandable PVC")
})
} else {
@ -164,7 +164,7 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
var err error
ginkgo.By("Creating a pod with dynamically provisioned volume")
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
defer func() {
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")
@ -177,31 +177,31 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
// We expand the PVC while no pod is using it to ensure offline expansion
ginkgo.By("Expanding current pvc")
currentPvcSize := l.resource.pvc.Spec.Resources.Requests[v1.ResourceStorage]
currentPvcSize := l.resource.Pvc.Spec.Resources.Requests[v1.ResourceStorage]
newSize := currentPvcSize.DeepCopy()
newSize.Add(resource.MustParse("1Gi"))
framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize)
newPVC, err := ExpandPVCSize(l.resource.pvc, newSize, f.ClientSet)
newPVC, err := ExpandPVCSize(l.resource.Pvc, newSize, f.ClientSet)
framework.ExpectNoError(err, "While updating pvc for more size")
l.resource.pvc = newPVC
gomega.Expect(l.resource.pvc).NotTo(gomega.BeNil())
l.resource.Pvc = newPVC
gomega.Expect(l.resource.Pvc).NotTo(gomega.BeNil())
pvcSize := l.resource.pvc.Spec.Resources.Requests[v1.ResourceStorage]
pvcSize := l.resource.Pvc.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", l.resource.pvc.Name)
framework.Failf("error updating pvc size %q", l.resource.Pvc.Name)
}
ginkgo.By("Waiting for cloudprovider resize to finish")
err = WaitForControllerVolumeResize(l.resource.pvc, f.ClientSet, totalResizeWaitPeriod)
err = WaitForControllerVolumeResize(l.resource.Pvc, f.ClientSet, totalResizeWaitPeriod)
framework.ExpectNoError(err, "While waiting for pvc resize to finish")
ginkgo.By("Checking for conditions on pvc")
npvc, err := WaitForPendingFSResizeCondition(l.resource.pvc, f.ClientSet)
npvc, err := WaitForPendingFSResizeCondition(l.resource.Pvc, f.ClientSet)
framework.ExpectNoError(err, "While waiting for pvc to have fs resizing condition")
l.resource.pvc = npvc
l.resource.Pvc = npvc
ginkgo.By("Creating a new pod with same volume")
l.pod2, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
l.pod2, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
defer func() {
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod2)
framework.ExpectNoError(err, "while cleaning up pod before exiting resizing test")
@ -209,10 +209,10 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
framework.ExpectNoError(err, "while recreating pod for resizing")
ginkgo.By("Waiting for file system resize to finish")
l.resource.pvc, err = WaitForFSResize(l.resource.pvc, f.ClientSet)
l.resource.Pvc, err = WaitForFSResize(l.resource.Pvc, f.ClientSet)
framework.ExpectNoError(err, "while waiting for fs resize to finish")
pvcConditions := l.resource.pvc.Status.Conditions
pvcConditions := l.resource.Pvc.Status.Conditions
framework.ExpectEqual(len(pvcConditions), 0, "pvc should not have conditions")
})
@ -222,7 +222,7 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
var err error
ginkgo.By("Creating a pod with dynamically provisioned volume")
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, e2epod.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
defer func() {
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")
@ -231,29 +231,29 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte
// We expand the PVC while no pod is using it to ensure offline expansion
ginkgo.By("Expanding current pvc")
currentPvcSize := l.resource.pvc.Spec.Resources.Requests[v1.ResourceStorage]
currentPvcSize := l.resource.Pvc.Spec.Resources.Requests[v1.ResourceStorage]
newSize := currentPvcSize.DeepCopy()
newSize.Add(resource.MustParse("1Gi"))
framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize)
newPVC, err := ExpandPVCSize(l.resource.pvc, newSize, f.ClientSet)
newPVC, err := ExpandPVCSize(l.resource.Pvc, newSize, f.ClientSet)
framework.ExpectNoError(err, "While updating pvc for more size")
l.resource.pvc = newPVC
gomega.Expect(l.resource.pvc).NotTo(gomega.BeNil())
l.resource.Pvc = newPVC
gomega.Expect(l.resource.Pvc).NotTo(gomega.BeNil())
pvcSize := l.resource.pvc.Spec.Resources.Requests[v1.ResourceStorage]
pvcSize := l.resource.Pvc.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", l.resource.pvc.Name)
framework.Failf("error updating pvc size %q", l.resource.Pvc.Name)
}
ginkgo.By("Waiting for cloudprovider resize to finish")
err = WaitForControllerVolumeResize(l.resource.pvc, f.ClientSet, totalResizeWaitPeriod)
err = WaitForControllerVolumeResize(l.resource.Pvc, f.ClientSet, totalResizeWaitPeriod)
framework.ExpectNoError(err, "While waiting for pvc resize to finish")
ginkgo.By("Waiting for file system resize to finish")
l.resource.pvc, err = WaitForFSResize(l.resource.pvc, f.ClientSet)
l.resource.Pvc, err = WaitForFSResize(l.resource.Pvc, f.ClientSet)
framework.ExpectNoError(err, "while waiting for fs resize to finish")
pvcConditions := l.resource.pvc.Status.Conditions
pvcConditions := l.resource.Pvc.Status.Conditions
framework.ExpectEqual(len(pvcConditions), 0, "pvc should not have conditions")
})

View File

@ -63,35 +63,35 @@ var _ TestSuite = &volumeIOTestSuite{}
func InitVolumeIOTestSuite() TestSuite {
return &volumeIOTestSuite{
tsInfo: TestSuiteInfo{
name: "volumeIO",
testPatterns: []testpatterns.TestPattern{
Name: "volumeIO",
TestPatterns: []testpatterns.TestPattern{
testpatterns.DefaultFsInlineVolume,
testpatterns.DefaultFsPreprovisionedPV,
testpatterns.DefaultFsDynamicPV,
},
supportedSizeRange: volume.SizeRange{
SupportedSizeRange: volume.SizeRange{
Min: "1Mi",
},
},
}
}
func (t *volumeIOTestSuite) getTestSuiteInfo() TestSuiteInfo {
func (t *volumeIOTestSuite) GetTestSuiteInfo() TestSuiteInfo {
return t.tsInfo
}
func (t *volumeIOTestSuite) skipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
func (t *volumeIOTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
skipVolTypePatterns(pattern, driver, testpatterns.NewVolTypeMap(
testpatterns.PreprovisionedPV,
testpatterns.InlineVolume))
}
func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
func (t *volumeIOTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
type local struct {
config *PerTestConfig
driverCleanup func()
resource *genericVolumeTestResource
resource *VolumeResource
intreeOps opCounts
migratedOps opCounts
@ -116,9 +116,9 @@ func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns.
l.config, l.driverCleanup = driver.PrepareTest(f)
l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName)
testVolumeSizeRange := t.getTestSuiteInfo().supportedSizeRange
l.resource = createGenericVolumeTestResource(driver, l.config, pattern, testVolumeSizeRange)
if l.resource.volSource == nil {
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
l.resource = CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
if l.resource.VolSource == nil {
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
}
@ -127,7 +127,7 @@ func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns.
cleanup := func() {
var errs []error
if l.resource != nil {
errs = append(errs, l.resource.cleanupResource())
errs = append(errs, l.resource.CleanupResource())
l.resource = nil
}
@ -155,7 +155,7 @@ func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns.
podSec := v1.PodSecurityContext{
FSGroup: fsGroup,
}
err := testVolumeIO(f, cs, convertTestConfig(l.config), *l.resource.volSource, &podSec, testFile, fileSizes)
err := testVolumeIO(f, cs, convertTestConfig(l.config), *l.resource.VolSource, &podSec, testFile, fileSizes)
framework.ExpectNoError(err)
})
}

View File

@ -60,30 +60,30 @@ var _ TestSuite = &volumeLimitsTestSuite{}
func InitVolumeLimitsTestSuite() TestSuite {
return &volumeLimitsTestSuite{
tsInfo: TestSuiteInfo{
name: "volumeLimits",
testPatterns: []testpatterns.TestPattern{
Name: "volumeLimits",
TestPatterns: []testpatterns.TestPattern{
testpatterns.FsVolModeDynamicPV,
},
},
}
}
func (t *volumeLimitsTestSuite) getTestSuiteInfo() TestSuiteInfo {
func (t *volumeLimitsTestSuite) GetTestSuiteInfo() TestSuiteInfo {
return t.tsInfo
}
func (t *volumeLimitsTestSuite) skipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
func (t *volumeLimitsTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
}
func (t *volumeLimitsTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
type local struct {
config *PerTestConfig
testCleanup func()
cs clientset.Interface
ns *v1.Namespace
// genericVolumeTestResource contains pv, pvc, sc, etc. of the first pod created
resource *genericVolumeTestResource
// VolumeResource contains pv, pvc, sc, etc. of the first pod created
resource *VolumeResource
// All created PVCs, incl. the one in resource
pvcs []*v1.PersistentVolumeClaim
@ -141,14 +141,14 @@ func (t *volumeLimitsTestSuite) defineTests(driver TestDriver, pattern testpatte
framework.Logf("Node %s can handle %d volumes of driver %s", nodeName, limit, driverInfo.Name)
// Create a storage class and generate a PVC. Do not instantiate the PVC yet, keep it for the last pod.
testVolumeSizeRange := t.getTestSuiteInfo().supportedSizeRange
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, dDriver)
l.resource = createGenericVolumeTestResource(driver, l.config, pattern, testVolumeSizeRange)
l.resource = CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
defer func() {
err := l.resource.cleanupResource()
err := l.resource.CleanupResource()
framework.ExpectNoError(err, "while cleaning up resource")
}()
defer func() {
@ -161,7 +161,7 @@ func (t *volumeLimitsTestSuite) defineTests(driver TestDriver, pattern testpatte
for i := 0; i < limit; i++ {
pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: claimSize,
StorageClassName: &l.resource.sc.Name,
StorageClassName: &l.resource.Sc.Name,
}, l.ns.Name)
pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(pvc)
framework.ExpectNoError(err)
@ -186,7 +186,7 @@ func (t *volumeLimitsTestSuite) defineTests(driver TestDriver, pattern testpatte
framework.ExpectNoError(err)
ginkgo.By("Creating an extra pod with one volume to exceed the limit")
pod = e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.resource.pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil)
pod = e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil)
// Use affinity to schedule everything on the right node
e2epod.SetAffinity(&selection, nodeName)
pod.Spec.Affinity = selection.Affinity

View File

@ -57,36 +57,36 @@ var _ TestSuite = &volumeModeTestSuite{}
func InitVolumeModeTestSuite() TestSuite {
return &volumeModeTestSuite{
tsInfo: TestSuiteInfo{
name: "volumeMode",
testPatterns: []testpatterns.TestPattern{
Name: "volumeMode",
TestPatterns: []testpatterns.TestPattern{
testpatterns.FsVolModePreprovisionedPV,
testpatterns.FsVolModeDynamicPV,
testpatterns.BlockVolModePreprovisionedPV,
testpatterns.BlockVolModeDynamicPV,
},
supportedSizeRange: volume.SizeRange{
SupportedSizeRange: volume.SizeRange{
Min: "1Mi",
},
},
}
}
func (t *volumeModeTestSuite) getTestSuiteInfo() TestSuiteInfo {
func (t *volumeModeTestSuite) GetTestSuiteInfo() TestSuiteInfo {
return t.tsInfo
}
func (t *volumeModeTestSuite) skipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
func (t *volumeModeTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
}
func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
type local struct {
config *PerTestConfig
driverCleanup func()
cs clientset.Interface
ns *v1.Namespace
// genericVolumeTestResource contains pv, pvc, sc, etc., owns cleaning that up
genericVolumeTestResource
// VolumeResource contains pv, pvc, sc, etc., owns cleaning that up
VolumeResource
intreeOps opCounts
migratedOps opCounts
@ -114,7 +114,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName)
}
// manualInit initializes l.genericVolumeTestResource without creating the PV & PVC objects.
// manualInit initializes l.VolumeResource without creating the PV & PVC objects.
manualInit := func() {
init()
@ -127,14 +127,13 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
volumeNodeAffinity *v1.VolumeNodeAffinity
)
l.genericVolumeTestResource = genericVolumeTestResource{
driver: driver,
config: l.config,
pattern: pattern,
l.VolumeResource = VolumeResource{
Config: l.config,
Pattern: pattern,
}
// Create volume for pre-provisioned volume tests
l.volume = CreateVolume(driver, l.config, pattern.VolType)
l.Volume = CreateVolume(driver, l.config, pattern.VolType)
switch pattern.VolType {
case testpatterns.PreprovisionedPV:
@ -144,31 +143,31 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
scName = fmt.Sprintf("%s-%s-sc-for-file", l.ns.Name, dInfo.Name)
}
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, l.volume)
pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, l.Volume)
if pvSource == nil {
framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name)
}
storageClass, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, pattern.VolMode, *pvSource, volumeNodeAffinity)
l.sc = storageClass
l.pv = e2epv.MakePersistentVolume(pvConfig)
l.pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, l.ns.Name)
l.Sc = storageClass
l.Pv = e2epv.MakePersistentVolume(pvConfig)
l.Pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, l.ns.Name)
}
case testpatterns.DynamicPV:
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
l.sc = dDriver.GetDynamicProvisionStorageClass(l.config, fsType)
if l.sc == nil {
l.Sc = dDriver.GetDynamicProvisionStorageClass(l.config, fsType)
if l.Sc == nil {
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
}
l.sc.VolumeBindingMode = &volBindMode
testVolumeSizeRange := t.getTestSuiteInfo().supportedSizeRange
l.Sc.VolumeBindingMode = &volBindMode
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
driverVolumeSizeRange := dInfo.SupportedSizeRange
claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
l.pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
l.Pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: claimSize,
StorageClassName: &(l.sc.Name),
StorageClassName: &(l.Sc.Name),
VolumeMode: &pattern.VolMode,
}, l.ns.Name)
}
@ -179,7 +178,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
cleanup := func() {
var errs []error
errs = append(errs, l.cleanupResource())
errs = append(errs, l.CleanupResource())
errs = append(errs, tryFunc(l.driverCleanup))
l.driverCleanup = nil
framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource")
@ -198,22 +197,22 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
var err error
ginkgo.By("Creating sc")
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
l.Sc, err = l.cs.StorageV1().StorageClasses().Create(l.Sc)
framework.ExpectNoError(err, "Failed to create sc")
ginkgo.By("Creating pv and pvc")
l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv)
l.Pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.Pv)
framework.ExpectNoError(err, "Failed to create pv")
// Prebind pv
l.pvc.Spec.VolumeName = l.pv.Name
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
l.Pvc.Spec.VolumeName = l.Pv.Name
l.Pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.Pvc)
framework.ExpectNoError(err, "Failed to create pvc")
framework.ExpectNoError(e2epv.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc), "Failed to bind pv and pvc")
framework.ExpectNoError(e2epv.WaitOnPVandPVC(l.cs, l.ns.Name, l.Pv, l.Pvc), "Failed to bind pv and pvc")
ginkgo.By("Creating pod")
pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil)
pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil)
// Setting node
pod.Spec.NodeName = l.config.ClientNodeName
pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(pod)
@ -252,16 +251,16 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
var err error
ginkgo.By("Creating sc")
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
l.Sc, err = l.cs.StorageV1().StorageClasses().Create(l.Sc)
framework.ExpectNoError(err, "Failed to create sc")
ginkgo.By("Creating pv and pvc")
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
l.Pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.Pvc)
framework.ExpectNoError(err, "Failed to create pvc")
eventSelector := fields.Set{
"involvedObject.kind": "PersistentVolumeClaim",
"involvedObject.name": l.pvc.Name,
"involvedObject.name": l.Pvc.Name,
"involvedObject.namespace": l.ns.Name,
"reason": volevents.ProvisioningFailed,
}.AsSelector().String()
@ -274,7 +273,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
}
// Check the pvc is still pending
pvc, err := l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Get(l.pvc.Name, metav1.GetOptions{})
pvc, err := l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Get(l.Pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to re-read the pvc after event (or timeout)")
framework.ExpectEqual(pvc.Status.Phase, v1.ClaimPending, "PVC phase isn't pending")
})
@ -286,13 +285,13 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
ginkgo.It("should fail to use a volume in a pod with mismatched mode [Slow]", func() {
skipTestIfBlockNotSupported(driver)
init()
testVolumeSizeRange := t.getTestSuiteInfo().supportedSizeRange
l.genericVolumeTestResource = *createGenericVolumeTestResource(driver, l.config, pattern, testVolumeSizeRange)
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
l.VolumeResource = *CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
defer cleanup()
ginkgo.By("Creating pod")
var err error
pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil)
pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil)
// Change volumeMounts to volumeDevices and the other way around
pod = swapVolumeMode(pod)
@ -335,13 +334,13 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
skipTestIfBlockNotSupported(driver)
}
init()
testVolumeSizeRange := t.getTestSuiteInfo().supportedSizeRange
l.genericVolumeTestResource = *createGenericVolumeTestResource(driver, l.config, pattern, testVolumeSizeRange)
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
l.VolumeResource = *CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
defer cleanup()
ginkgo.By("Creating pod")
var err error
pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil)
pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil)
for i := range pod.Spec.Containers {
pod.Spec.Containers[i].VolumeDevices = nil
pod.Spec.Containers[i].VolumeMounts = nil

View File

@ -46,8 +46,8 @@ var _ TestSuite = &volumesTestSuite{}
func InitVolumesTestSuite() TestSuite {
return &volumesTestSuite{
tsInfo: TestSuiteInfo{
name: "volumes",
testPatterns: []testpatterns.TestPattern{
Name: "volumes",
TestPatterns: []testpatterns.TestPattern{
// Default fsType
testpatterns.DefaultFsInlineVolume,
testpatterns.DefaultFsPreprovisionedPV,
@ -72,18 +72,18 @@ func InitVolumesTestSuite() TestSuite {
testpatterns.BlockVolModePreprovisionedPV,
testpatterns.BlockVolModeDynamicPV,
},
supportedSizeRange: volume.SizeRange{
SupportedSizeRange: volume.SizeRange{
Min: "1Mi",
},
},
}
}
func (t *volumesTestSuite) getTestSuiteInfo() TestSuiteInfo {
func (t *volumesTestSuite) GetTestSuiteInfo() TestSuiteInfo {
return t.tsInfo
}
func (t *volumesTestSuite) skipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
func (t *volumesTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
}
func skipExecTest(driver TestDriver) {
@ -100,12 +100,12 @@ func skipTestIfBlockNotSupported(driver TestDriver) {
}
}
func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
func (t *volumesTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
type local struct {
config *PerTestConfig
driverCleanup func()
resource *genericVolumeTestResource
resource *VolumeResource
intreeOps opCounts
migratedOps opCounts
@ -127,9 +127,9 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
// Now do the more expensive test initialization.
l.config, l.driverCleanup = driver.PrepareTest(f)
l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName)
testVolumeSizeRange := t.getTestSuiteInfo().supportedSizeRange
l.resource = createGenericVolumeTestResource(driver, l.config, pattern, testVolumeSizeRange)
if l.resource.volSource == nil {
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
l.resource = CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
if l.resource.VolSource == nil {
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
}
}
@ -137,7 +137,7 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
cleanup := func() {
var errs []error
if l.resource != nil {
errs = append(errs, l.resource.cleanupResource())
errs = append(errs, l.resource.CleanupResource())
l.resource = nil
}
@ -160,7 +160,7 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
tests := []volume.Test{
{
Volume: *l.resource.volSource,
Volume: *l.resource.VolSource,
Mode: pattern.VolMode,
File: "index.html",
// Must match content
@ -193,7 +193,7 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
init()
defer cleanup()
testScriptInPod(f, l.resource.volType, l.resource.volSource, l.config)
testScriptInPod(f, l.resource.VolType, l.resource.VolSource, l.config)
})
}
}