mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 09:22:44 +00:00
e2e/storage: re-initialize all local variables
There is a risk that the init function does not reset one of the local variables that was set by a previous test. To avoid this, all variables set by init are now in a struct which gets cleaned completely first.
This commit is contained in:
parent
e79cd9efe1
commit
ec3655a1d4
@ -80,15 +80,19 @@ func (p *provisioningTestSuite) getTestSuiteInfo() TestSuiteInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||||
var (
|
type local struct {
|
||||||
dInfo = driver.GetDriverInfo()
|
|
||||||
dDriver DynamicPVTestDriver
|
|
||||||
config *PerTestConfig
|
config *PerTestConfig
|
||||||
testCleanup func()
|
testCleanup func()
|
||||||
testCase *StorageClassTest
|
|
||||||
cs clientset.Interface
|
testCase *StorageClassTest
|
||||||
pvc *v1.PersistentVolumeClaim
|
cs clientset.Interface
|
||||||
sc *storage.StorageClass
|
pvc *v1.PersistentVolumeClaim
|
||||||
|
sc *storage.StorageClass
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
dInfo = driver.GetDriverInfo()
|
||||||
|
dDriver DynamicPVTestDriver
|
||||||
|
l local
|
||||||
)
|
)
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
@ -110,30 +114,32 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
|
|||||||
f := framework.NewDefaultFramework("provisioning")
|
f := framework.NewDefaultFramework("provisioning")
|
||||||
|
|
||||||
init := func() {
|
init := func() {
|
||||||
|
l = local{}
|
||||||
|
|
||||||
// Now do the more expensive test initialization.
|
// Now do the more expensive test initialization.
|
||||||
config, testCleanup = driver.PrepareTest(f)
|
l.config, l.testCleanup = driver.PrepareTest(f)
|
||||||
cs = config.Framework.ClientSet
|
l.cs = l.config.Framework.ClientSet
|
||||||
claimSize := dDriver.GetClaimSize()
|
claimSize := dDriver.GetClaimSize()
|
||||||
sc = dDriver.GetDynamicProvisionStorageClass(config, "")
|
l.sc = dDriver.GetDynamicProvisionStorageClass(l.config, "")
|
||||||
if sc == nil {
|
if l.sc == nil {
|
||||||
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
|
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
|
||||||
}
|
}
|
||||||
pvc = getClaim(claimSize, config.Framework.Namespace.Name)
|
l.pvc = getClaim(claimSize, l.config.Framework.Namespace.Name)
|
||||||
pvc.Spec.StorageClassName = &sc.Name
|
l.pvc.Spec.StorageClassName = &l.sc.Name
|
||||||
framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", sc, pvc)
|
framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", l.sc, l.pvc)
|
||||||
testCase = &StorageClassTest{
|
l.testCase = &StorageClassTest{
|
||||||
Client: config.Framework.ClientSet,
|
Client: l.config.Framework.ClientSet,
|
||||||
Claim: pvc,
|
Claim: l.pvc,
|
||||||
Class: sc,
|
Class: l.sc,
|
||||||
ClaimSize: claimSize,
|
ClaimSize: claimSize,
|
||||||
ExpectedSize: claimSize,
|
ExpectedSize: claimSize,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanup := func() {
|
cleanup := func() {
|
||||||
if testCleanup != nil {
|
if l.testCleanup != nil {
|
||||||
testCleanup()
|
l.testCleanup()
|
||||||
testCleanup = nil
|
l.testCleanup = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -141,7 +147,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
|
|||||||
init()
|
init()
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
testCase.TestDynamicProvisioning()
|
l.testCase.TestDynamicProvisioning()
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should provision storage with mount options", func() {
|
It("should provision storage with mount options", func() {
|
||||||
@ -152,8 +158,8 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
|
|||||||
init()
|
init()
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List()
|
l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List()
|
||||||
testCase.TestDynamicProvisioning()
|
l.testCase.TestDynamicProvisioning()
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should access volume from different nodes", func() {
|
It("should access volume from different nodes", func() {
|
||||||
@ -164,19 +170,19 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
|
|||||||
// locked onto a single node, then the driver is
|
// locked onto a single node, then the driver is
|
||||||
// usable on all of them *and* supports accessing a volume
|
// usable on all of them *and* supports accessing a volume
|
||||||
// from any node.
|
// from any node.
|
||||||
if config.ClientNodeName != "" {
|
if l.config.ClientNodeName != "" {
|
||||||
framework.Skipf("Driver %q only supports testing on one node - skipping", dInfo.Name)
|
framework.Skipf("Driver %q only supports testing on one node - skipping", dInfo.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that we actually have more than one node.
|
// Ensure that we actually have more than one node.
|
||||||
nodes := framework.GetReadySchedulableNodesOrDie(cs)
|
nodes := framework.GetReadySchedulableNodesOrDie(l.cs)
|
||||||
if len(nodes.Items) <= 1 {
|
if len(nodes.Items) <= 1 {
|
||||||
framework.Skipf("need more than one node - skipping")
|
framework.Skipf("need more than one node - skipping")
|
||||||
}
|
}
|
||||||
testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
|
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
|
||||||
PVMultiNodeCheck(cs, claim, volume, NodeSelection{Name: config.ClientNodeName})
|
PVMultiNodeCheck(l.cs, claim, volume, NodeSelection{Name: l.config.ClientNodeName})
|
||||||
}
|
}
|
||||||
testCase.TestDynamicProvisioning()
|
l.testCase.TestDynamicProvisioning()
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should create and delete block persistent volumes", func() {
|
It("should create and delete block persistent volumes", func() {
|
||||||
@ -188,9 +194,9 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
|
|||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
block := v1.PersistentVolumeBlock
|
block := v1.PersistentVolumeBlock
|
||||||
testCase.VolumeMode = &block
|
l.testCase.VolumeMode = &block
|
||||||
pvc.Spec.VolumeMode = &block
|
l.pvc.Spec.VolumeMode = &block
|
||||||
testCase.TestDynamicProvisioning()
|
l.testCase.TestDynamicProvisioning()
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() {
|
It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() {
|
||||||
@ -206,18 +212,18 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
|
|||||||
init()
|
init()
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
dc := config.Framework.DynamicClient
|
dc := l.config.Framework.DynamicClient
|
||||||
vsc := sDriver.GetSnapshotClass(config)
|
vsc := sDriver.GetSnapshotClass(l.config)
|
||||||
dataSource, cleanupFunc := prepareDataSourceForProvisioning(NodeSelection{Name: config.ClientNodeName}, cs, dc, pvc, sc, vsc)
|
dataSource, cleanupFunc := prepareDataSourceForProvisioning(NodeSelection{Name: l.config.ClientNodeName}, l.cs, dc, l.pvc, l.sc, vsc)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
pvc.Spec.DataSource = dataSource
|
l.pvc.Spec.DataSource = dataSource
|
||||||
testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
|
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
|
||||||
By("checking whether the created volume has the pre-populated data")
|
By("checking whether the created volume has the pre-populated data")
|
||||||
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
|
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
|
||||||
RunInPodWithVolume(cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, NodeSelection{Name: config.ClientNodeName})
|
RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, NodeSelection{Name: l.config.ClientNodeName})
|
||||||
}
|
}
|
||||||
testCase.TestDynamicProvisioning()
|
l.testCase.TestDynamicProvisioning()
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should allow concurrent writes on the same node", func() {
|
It("should allow concurrent writes on the same node", func() {
|
||||||
@ -228,7 +234,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
|
|||||||
init()
|
init()
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
|
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
|
||||||
// We start two pods concurrently on the same node,
|
// We start two pods concurrently on the same node,
|
||||||
// using the same PVC. Both wait for other to create a
|
// using the same PVC. Both wait for other to create a
|
||||||
// file before returning. The pods are forced onto the
|
// file before returning. The pods are forced onto the
|
||||||
@ -241,7 +247,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
|
|||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
node := NodeSelection{
|
node := NodeSelection{
|
||||||
Name: config.ClientNodeName,
|
Name: l.config.ClientNodeName,
|
||||||
}
|
}
|
||||||
if podName == secondPodName {
|
if podName == secondPodName {
|
||||||
node.Affinity = &v1.Affinity{
|
node.Affinity = &v1.Affinity{
|
||||||
@ -259,13 +265,13 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
RunInPodWithVolume(cs, claim.Namespace, claim.Name, podName, command, node)
|
RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, podName, command, node)
|
||||||
}
|
}
|
||||||
go run(firstPodName, "touch /mnt/test/first && while ! [ -f /mnt/test/second ]; do sleep 1; done")
|
go run(firstPodName, "touch /mnt/test/first && while ! [ -f /mnt/test/second ]; do sleep 1; done")
|
||||||
go run(secondPodName, "touch /mnt/test/second && while ! [ -f /mnt/test/first ]; do sleep 1; done")
|
go run(secondPodName, "touch /mnt/test/second && while ! [ -f /mnt/test/first ]; do sleep 1; done")
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
testCase.TestDynamicProvisioning()
|
l.testCase.TestDynamicProvisioning()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/rand"
|
"k8s.io/apimachinery/pkg/util/rand"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||||
@ -73,10 +72,10 @@ func (s *subPathTestSuite) getTestSuiteInfo() TestSuiteInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||||
var (
|
type local struct {
|
||||||
config *PerTestConfig
|
config *PerTestConfig
|
||||||
testCleanup func()
|
testCleanup func()
|
||||||
cs clientset.Interface
|
|
||||||
resource *genericVolumeTestResource
|
resource *genericVolumeTestResource
|
||||||
roVolSource *v1.VolumeSource
|
roVolSource *v1.VolumeSource
|
||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
@ -84,7 +83,8 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
subPathDir string
|
subPathDir string
|
||||||
filePathInSubpath string
|
filePathInSubpath string
|
||||||
filePathInVolume string
|
filePathInVolume string
|
||||||
)
|
}
|
||||||
|
var l local
|
||||||
|
|
||||||
// No preconditions to test. Normally they would be in a BeforeEach here.
|
// No preconditions to test. Normally they would be in a BeforeEach here.
|
||||||
|
|
||||||
@ -95,33 +95,30 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
f := framework.NewDefaultFramework("provisioning")
|
f := framework.NewDefaultFramework("provisioning")
|
||||||
|
|
||||||
init := func() {
|
init := func() {
|
||||||
cs = f.ClientSet // needed for cleanup, f.ClientSet itself gets reset too early
|
l = local{}
|
||||||
|
|
||||||
// Now do the more expensive test initialization.
|
// Now do the more expensive test initialization.
|
||||||
config, testCleanup = driver.PrepareTest(f)
|
l.config, l.testCleanup = driver.PrepareTest(f)
|
||||||
fsType := pattern.FsType
|
l.resource = createGenericVolumeTestResource(driver, l.config, pattern)
|
||||||
volType := pattern.VolType
|
|
||||||
|
|
||||||
resource = createGenericVolumeTestResource(driver, config, pattern)
|
|
||||||
|
|
||||||
// Setup subPath test dependent resource
|
// Setup subPath test dependent resource
|
||||||
roVolSource = nil
|
volType := pattern.VolType
|
||||||
switch volType {
|
switch volType {
|
||||||
case testpatterns.InlineVolume:
|
case testpatterns.InlineVolume:
|
||||||
if iDriver, ok := driver.(InlineVolumeTestDriver); ok {
|
if iDriver, ok := driver.(InlineVolumeTestDriver); ok {
|
||||||
roVolSource = iDriver.GetVolumeSource(true, fsType, resource.volume)
|
l.roVolSource = iDriver.GetVolumeSource(true, pattern.FsType, l.resource.volume)
|
||||||
}
|
}
|
||||||
case testpatterns.PreprovisionedPV:
|
case testpatterns.PreprovisionedPV:
|
||||||
roVolSource = &v1.VolumeSource{
|
l.roVolSource = &v1.VolumeSource{
|
||||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||||
ClaimName: resource.pvc.Name,
|
ClaimName: l.resource.pvc.Name,
|
||||||
ReadOnly: true,
|
ReadOnly: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
case testpatterns.DynamicPV:
|
case testpatterns.DynamicPV:
|
||||||
roVolSource = &v1.VolumeSource{
|
l.roVolSource = &v1.VolumeSource{
|
||||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||||
ClaimName: resource.pvc.Name,
|
ClaimName: l.resource.pvc.Name,
|
||||||
ReadOnly: true,
|
ReadOnly: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -130,35 +127,35 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
}
|
}
|
||||||
|
|
||||||
subPath := f.Namespace.Name
|
subPath := f.Namespace.Name
|
||||||
pod = SubpathTestPod(f, subPath, resource.volType, resource.volSource, true)
|
l.pod = SubpathTestPod(f, subPath, l.resource.volType, l.resource.volSource, true)
|
||||||
pod.Spec.NodeName = config.ClientNodeName
|
l.pod.Spec.NodeName = l.config.ClientNodeName
|
||||||
pod.Spec.NodeSelector = config.ClientNodeSelector
|
l.pod.Spec.NodeSelector = l.config.ClientNodeSelector
|
||||||
|
|
||||||
formatPod = volumeFormatPod(f, resource.volSource)
|
l.formatPod = volumeFormatPod(f, l.resource.volSource)
|
||||||
formatPod.Spec.NodeName = config.ClientNodeName
|
l.formatPod.Spec.NodeName = l.config.ClientNodeName
|
||||||
formatPod.Spec.NodeSelector = config.ClientNodeSelector
|
l.formatPod.Spec.NodeSelector = l.config.ClientNodeSelector
|
||||||
|
|
||||||
subPathDir = filepath.Join(volumePath, subPath)
|
l.subPathDir = filepath.Join(volumePath, subPath)
|
||||||
filePathInSubpath = filepath.Join(volumePath, fileName)
|
l.filePathInSubpath = filepath.Join(volumePath, fileName)
|
||||||
filePathInVolume = filepath.Join(subPathDir, fileName)
|
l.filePathInVolume = filepath.Join(l.subPathDir, fileName)
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanup := func() {
|
cleanup := func() {
|
||||||
if pod != nil {
|
if l.pod != nil {
|
||||||
By("Deleting pod")
|
By("Deleting pod")
|
||||||
err := framework.DeletePodWithWait(f, cs, pod)
|
err := framework.DeletePodWithWait(f, f.ClientSet, l.pod)
|
||||||
Expect(err).ToNot(HaveOccurred(), "while deleting pod")
|
Expect(err).ToNot(HaveOccurred(), "while deleting pod")
|
||||||
pod = nil
|
l.pod = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if resource != nil {
|
if l.resource != nil {
|
||||||
resource.cleanupResource()
|
l.resource.cleanupResource()
|
||||||
resource = nil
|
l.resource = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if testCleanup != nil {
|
if l.testCleanup != nil {
|
||||||
testCleanup()
|
l.testCleanup()
|
||||||
testCleanup = nil
|
l.testCleanup = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -167,10 +164,10 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
// Write the file in the subPath from init container 1
|
// Write the file in the subPath from init container 1
|
||||||
setWriteCommand(filePathInSubpath, &pod.Spec.InitContainers[1])
|
setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1])
|
||||||
|
|
||||||
// Read it from outside the subPath from container 1
|
// Read it from outside the subPath from container 1
|
||||||
testReadFile(f, filePathInVolume, pod, 1)
|
testReadFile(f, l.filePathInVolume, l.pod, 1)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should support existing directory", func() {
|
It("should support existing directory", func() {
|
||||||
@ -178,13 +175,13 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
// Create the directory
|
// Create the directory
|
||||||
setInitCommand(pod, fmt.Sprintf("mkdir -p %s", subPathDir))
|
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir))
|
||||||
|
|
||||||
// Write the file in the subPath from init container 1
|
// Write the file in the subPath from init container 1
|
||||||
setWriteCommand(filePathInSubpath, &pod.Spec.InitContainers[1])
|
setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1])
|
||||||
|
|
||||||
// Read it from outside the subPath from container 1
|
// Read it from outside the subPath from container 1
|
||||||
testReadFile(f, filePathInVolume, pod, 1)
|
testReadFile(f, l.filePathInVolume, l.pod, 1)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should support existing single file", func() {
|
It("should support existing single file", func() {
|
||||||
@ -192,10 +189,10 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
// Create the file in the init container
|
// Create the file in the init container
|
||||||
setInitCommand(pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", subPathDir, filePathInVolume))
|
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", l.subPathDir, l.filePathInVolume))
|
||||||
|
|
||||||
// Read it from inside the subPath from container 0
|
// Read it from inside the subPath from container 0
|
||||||
testReadFile(f, filePathInSubpath, pod, 0)
|
testReadFile(f, l.filePathInSubpath, l.pod, 0)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should support file as subpath", func() {
|
It("should support file as subpath", func() {
|
||||||
@ -203,9 +200,9 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
// Create the file in the init container
|
// Create the file in the init container
|
||||||
setInitCommand(pod, fmt.Sprintf("echo %s > %s", f.Namespace.Name, subPathDir))
|
setInitCommand(l.pod, fmt.Sprintf("echo %s > %s", f.Namespace.Name, l.subPathDir))
|
||||||
|
|
||||||
TestBasicSubpath(f, f.Namespace.Name, pod)
|
TestBasicSubpath(f, f.Namespace.Name, l.pod)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should fail if subpath directory is outside the volume [Slow]", func() {
|
It("should fail if subpath directory is outside the volume [Slow]", func() {
|
||||||
@ -213,10 +210,10 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
// Create the subpath outside the volume
|
// Create the subpath outside the volume
|
||||||
setInitCommand(pod, fmt.Sprintf("ln -s /bin %s", subPathDir))
|
setInitCommand(l.pod, fmt.Sprintf("ln -s /bin %s", l.subPathDir))
|
||||||
|
|
||||||
// Pod should fail
|
// Pod should fail
|
||||||
testPodFailSubpath(f, pod, false)
|
testPodFailSubpath(f, l.pod, false)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should fail if subpath file is outside the volume [Slow]", func() {
|
It("should fail if subpath file is outside the volume [Slow]", func() {
|
||||||
@ -224,10 +221,10 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
// Create the subpath outside the volume
|
// Create the subpath outside the volume
|
||||||
setInitCommand(pod, fmt.Sprintf("ln -s /bin/sh %s", subPathDir))
|
setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/sh %s", l.subPathDir))
|
||||||
|
|
||||||
// Pod should fail
|
// Pod should fail
|
||||||
testPodFailSubpath(f, pod, false)
|
testPodFailSubpath(f, l.pod, false)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should fail if non-existent subpath is outside the volume [Slow]", func() {
|
It("should fail if non-existent subpath is outside the volume [Slow]", func() {
|
||||||
@ -235,10 +232,10 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
// Create the subpath outside the volume
|
// Create the subpath outside the volume
|
||||||
setInitCommand(pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", subPathDir))
|
setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", l.subPathDir))
|
||||||
|
|
||||||
// Pod should fail
|
// Pod should fail
|
||||||
testPodFailSubpath(f, pod, false)
|
testPodFailSubpath(f, l.pod, false)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should fail if subpath with backstepping is outside the volume [Slow]", func() {
|
It("should fail if subpath with backstepping is outside the volume [Slow]", func() {
|
||||||
@ -246,10 +243,10 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
// Create the subpath outside the volume
|
// Create the subpath outside the volume
|
||||||
setInitCommand(pod, fmt.Sprintf("ln -s ../ %s", subPathDir))
|
setInitCommand(l.pod, fmt.Sprintf("ln -s ../ %s", l.subPathDir))
|
||||||
|
|
||||||
// Pod should fail
|
// Pod should fail
|
||||||
testPodFailSubpath(f, pod, false)
|
testPodFailSubpath(f, l.pod, false)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should support creating multiple subpath from same volumes [Slow]", func() {
|
It("should support creating multiple subpath from same volumes [Slow]", func() {
|
||||||
@ -260,22 +257,22 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
subpathDir2 := filepath.Join(volumePath, "subpath2")
|
subpathDir2 := filepath.Join(volumePath, "subpath2")
|
||||||
filepath1 := filepath.Join("/test-subpath1", fileName)
|
filepath1 := filepath.Join("/test-subpath1", fileName)
|
||||||
filepath2 := filepath.Join("/test-subpath2", fileName)
|
filepath2 := filepath.Join("/test-subpath2", fileName)
|
||||||
setInitCommand(pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2))
|
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2))
|
||||||
|
|
||||||
addSubpathVolumeContainer(&pod.Spec.Containers[0], v1.VolumeMount{
|
addSubpathVolumeContainer(&l.pod.Spec.Containers[0], v1.VolumeMount{
|
||||||
Name: volumeName,
|
Name: volumeName,
|
||||||
MountPath: "/test-subpath1",
|
MountPath: "/test-subpath1",
|
||||||
SubPath: "subpath1",
|
SubPath: "subpath1",
|
||||||
})
|
})
|
||||||
addSubpathVolumeContainer(&pod.Spec.Containers[0], v1.VolumeMount{
|
addSubpathVolumeContainer(&l.pod.Spec.Containers[0], v1.VolumeMount{
|
||||||
Name: volumeName,
|
Name: volumeName,
|
||||||
MountPath: "/test-subpath2",
|
MountPath: "/test-subpath2",
|
||||||
SubPath: "subpath2",
|
SubPath: "subpath2",
|
||||||
})
|
})
|
||||||
|
|
||||||
// Write the files from container 0 and instantly read them back
|
// Write the files from container 0 and instantly read them back
|
||||||
addMultipleWrites(&pod.Spec.Containers[0], filepath1, filepath2)
|
addMultipleWrites(&l.pod.Spec.Containers[0], filepath1, filepath2)
|
||||||
testMultipleReads(f, pod, 0, filepath1, filepath2)
|
testMultipleReads(f, l.pod, 0, filepath1, filepath2)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should support restarting containers using directory as subpath [Slow]", func() {
|
It("should support restarting containers using directory as subpath [Slow]", func() {
|
||||||
@ -283,9 +280,9 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
// Create the directory
|
// Create the directory
|
||||||
setInitCommand(pod, fmt.Sprintf("mkdir -p %v; touch %v", subPathDir, probeFilePath))
|
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %v; touch %v", l.subPathDir, probeFilePath))
|
||||||
|
|
||||||
testPodContainerRestart(f, pod)
|
testPodContainerRestart(f, l.pod)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should support restarting containers using file as subpath [Slow]", func() {
|
It("should support restarting containers using file as subpath [Slow]", func() {
|
||||||
@ -293,28 +290,28 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
// Create the file
|
// Create the file
|
||||||
setInitCommand(pod, fmt.Sprintf("touch %v; touch %v", subPathDir, probeFilePath))
|
setInitCommand(l.pod, fmt.Sprintf("touch %v; touch %v", l.subPathDir, probeFilePath))
|
||||||
|
|
||||||
testPodContainerRestart(f, pod)
|
testPodContainerRestart(f, l.pod)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() {
|
It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() {
|
||||||
init()
|
init()
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
testSubpathReconstruction(f, pod, false)
|
testSubpathReconstruction(f, l.pod, false)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() {
|
It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() {
|
||||||
init()
|
init()
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
if strings.HasPrefix(resource.volType, "hostPath") || strings.HasPrefix(resource.volType, "csi-hostpath") {
|
if strings.HasPrefix(l.resource.volType, "hostPath") || strings.HasPrefix(l.resource.volType, "csi-hostpath") {
|
||||||
// TODO: This skip should be removed once #61446 is fixed
|
// TODO: This skip should be removed once #61446 is fixed
|
||||||
framework.Skipf("%s volume type does not support reconstruction, skipping", resource.volType)
|
framework.Skipf("%s volume type does not support reconstruction, skipping", l.resource.volType)
|
||||||
}
|
}
|
||||||
|
|
||||||
testSubpathReconstruction(f, pod, true)
|
testSubpathReconstruction(f, l.pod, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should support readOnly directory specified in the volumeMount", func() {
|
It("should support readOnly directory specified in the volumeMount", func() {
|
||||||
@ -322,14 +319,14 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
// Create the directory
|
// Create the directory
|
||||||
setInitCommand(pod, fmt.Sprintf("mkdir -p %s", subPathDir))
|
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir))
|
||||||
|
|
||||||
// Write the file in the volume from init container 2
|
// Write the file in the volume from init container 2
|
||||||
setWriteCommand(filePathInVolume, &pod.Spec.InitContainers[2])
|
setWriteCommand(l.filePathInVolume, &l.pod.Spec.InitContainers[2])
|
||||||
|
|
||||||
// Read it from inside the subPath from container 0
|
// Read it from inside the subPath from container 0
|
||||||
pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
|
l.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
|
||||||
testReadFile(f, filePathInSubpath, pod, 0)
|
testReadFile(f, l.filePathInSubpath, l.pod, 0)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should support readOnly file specified in the volumeMount", func() {
|
It("should support readOnly file specified in the volumeMount", func() {
|
||||||
@ -337,62 +334,62 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
// Create the file
|
// Create the file
|
||||||
setInitCommand(pod, fmt.Sprintf("touch %s", subPathDir))
|
setInitCommand(l.pod, fmt.Sprintf("touch %s", l.subPathDir))
|
||||||
|
|
||||||
// Write the file in the volume from init container 2
|
// Write the file in the volume from init container 2
|
||||||
setWriteCommand(subPathDir, &pod.Spec.InitContainers[2])
|
setWriteCommand(l.subPathDir, &l.pod.Spec.InitContainers[2])
|
||||||
|
|
||||||
// Read it from inside the subPath from container 0
|
// Read it from inside the subPath from container 0
|
||||||
pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
|
l.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
|
||||||
testReadFile(f, volumePath, pod, 0)
|
testReadFile(f, volumePath, l.pod, 0)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should support existing directories when readOnly specified in the volumeSource", func() {
|
It("should support existing directories when readOnly specified in the volumeSource", func() {
|
||||||
init()
|
init()
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
if roVolSource == nil {
|
if l.roVolSource == nil {
|
||||||
framework.Skipf("Volume type %v doesn't support readOnly source", resource.volType)
|
framework.Skipf("Volume type %v doesn't support readOnly source", l.resource.volType)
|
||||||
}
|
}
|
||||||
|
|
||||||
origpod := pod.DeepCopy()
|
origpod := l.pod.DeepCopy()
|
||||||
|
|
||||||
// Create the directory
|
// Create the directory
|
||||||
setInitCommand(pod, fmt.Sprintf("mkdir -p %s", subPathDir))
|
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir))
|
||||||
|
|
||||||
// Write the file in the subPath from init container 1
|
// Write the file in the subPath from init container 1
|
||||||
setWriteCommand(filePathInSubpath, &pod.Spec.InitContainers[1])
|
setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1])
|
||||||
|
|
||||||
// Read it from inside the subPath from container 0
|
// Read it from inside the subPath from container 0
|
||||||
testReadFile(f, filePathInSubpath, pod, 0)
|
testReadFile(f, l.filePathInSubpath, l.pod, 0)
|
||||||
|
|
||||||
// Reset the pod
|
// Reset the pod
|
||||||
pod = origpod
|
l.pod = origpod
|
||||||
|
|
||||||
// Set volume source to read only
|
// Set volume source to read only
|
||||||
pod.Spec.Volumes[0].VolumeSource = *roVolSource
|
l.pod.Spec.Volumes[0].VolumeSource = *l.roVolSource
|
||||||
|
|
||||||
// Read it from inside the subPath from container 0
|
// Read it from inside the subPath from container 0
|
||||||
testReadFile(f, filePathInSubpath, pod, 0)
|
testReadFile(f, l.filePathInSubpath, l.pod, 0)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should verify container cannot write to subpath readonly volumes", func() {
|
It("should verify container cannot write to subpath readonly volumes", func() {
|
||||||
init()
|
init()
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
if roVolSource == nil {
|
if l.roVolSource == nil {
|
||||||
framework.Skipf("Volume type %v doesn't support readOnly source", resource.volType)
|
framework.Skipf("Volume type %v doesn't support readOnly source", l.resource.volType)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Format the volume while it's writable
|
// Format the volume while it's writable
|
||||||
formatVolume(f, formatPod)
|
formatVolume(f, l.formatPod)
|
||||||
|
|
||||||
// Set volume source to read only
|
// Set volume source to read only
|
||||||
pod.Spec.Volumes[0].VolumeSource = *roVolSource
|
l.pod.Spec.Volumes[0].VolumeSource = *l.roVolSource
|
||||||
|
|
||||||
// Write the file in the volume from container 0
|
// Write the file in the volume from container 0
|
||||||
setWriteCommand(subPathDir, &pod.Spec.Containers[0])
|
setWriteCommand(l.subPathDir, &l.pod.Spec.Containers[0])
|
||||||
|
|
||||||
// Pod should fail
|
// Pod should fail
|
||||||
testPodFailSubpath(f, pod, true)
|
testPodFailSubpath(f, l.pod, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should be able to unmount after the subpath directory is deleted", func() {
|
It("should be able to unmount after the subpath directory is deleted", func() {
|
||||||
@ -400,12 +397,12 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
// Change volume container to busybox so we can exec later
|
// Change volume container to busybox so we can exec later
|
||||||
pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox)
|
l.pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox)
|
||||||
pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
|
l.pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
|
||||||
|
|
||||||
By(fmt.Sprintf("Creating pod %s", pod.Name))
|
By(fmt.Sprintf("Creating pod %s", l.pod.Name))
|
||||||
removeUnusedContainers(pod)
|
removeUnusedContainers(l.pod)
|
||||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(l.pod)
|
||||||
Expect(err).ToNot(HaveOccurred(), "while creating pod")
|
Expect(err).ToNot(HaveOccurred(), "while creating pod")
|
||||||
defer func() {
|
defer func() {
|
||||||
By(fmt.Sprintf("Deleting pod %s", pod.Name))
|
By(fmt.Sprintf("Deleting pod %s", pod.Name))
|
||||||
@ -413,12 +410,12 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// Wait for pod to be running
|
// Wait for pod to be running
|
||||||
err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
|
err = framework.WaitForPodRunningInNamespace(f.ClientSet, l.pod)
|
||||||
Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running")
|
Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running")
|
||||||
|
|
||||||
// Exec into container that mounted the volume, delete subpath directory
|
// Exec into container that mounted the volume, delete subpath directory
|
||||||
rmCmd := fmt.Sprintf("rm -rf %s", subPathDir)
|
rmCmd := fmt.Sprintf("rm -rf %s", l.subPathDir)
|
||||||
_, err = podContainerExec(pod, 1, rmCmd)
|
_, err = podContainerExec(l.pod, 1, rmCmd)
|
||||||
Expect(err).ToNot(HaveOccurred(), "while removing subpath directory")
|
Expect(err).ToNot(HaveOccurred(), "while removing subpath directory")
|
||||||
|
|
||||||
// Delete pod (from defer) and wait for it to be successfully deleted
|
// Delete pod (from defer) and wait for it to be successfully deleted
|
||||||
|
@ -75,11 +75,15 @@ func (t *volumeIOTestSuite) getTestSuiteInfo() TestSuiteInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||||
var (
|
type local struct {
|
||||||
dInfo = driver.GetDriverInfo()
|
|
||||||
config *PerTestConfig
|
config *PerTestConfig
|
||||||
testCleanup func()
|
testCleanup func()
|
||||||
resource *genericVolumeTestResource
|
|
||||||
|
resource *genericVolumeTestResource
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
dInfo = driver.GetDriverInfo()
|
||||||
|
l local
|
||||||
)
|
)
|
||||||
|
|
||||||
// No preconditions to test. Normally they would be in a BeforeEach here.
|
// No preconditions to test. Normally they would be in a BeforeEach here.
|
||||||
@ -91,23 +95,25 @@ func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns.
|
|||||||
f := framework.NewDefaultFramework("volumeio")
|
f := framework.NewDefaultFramework("volumeio")
|
||||||
|
|
||||||
init := func() {
|
init := func() {
|
||||||
|
l = local{}
|
||||||
|
|
||||||
// Now do the more expensive test initialization.
|
// Now do the more expensive test initialization.
|
||||||
config, testCleanup = driver.PrepareTest(f)
|
l.config, l.testCleanup = driver.PrepareTest(f)
|
||||||
resource = createGenericVolumeTestResource(driver, config, pattern)
|
l.resource = createGenericVolumeTestResource(driver, l.config, pattern)
|
||||||
if resource.volSource == nil {
|
if l.resource.volSource == nil {
|
||||||
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
|
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanup := func() {
|
cleanup := func() {
|
||||||
if resource != nil {
|
if l.resource != nil {
|
||||||
resource.cleanupResource()
|
l.resource.cleanupResource()
|
||||||
resource = nil
|
l.resource = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if testCleanup != nil {
|
if l.testCleanup != nil {
|
||||||
testCleanup()
|
l.testCleanup()
|
||||||
testCleanup = nil
|
l.testCleanup = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -126,7 +132,7 @@ func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns.
|
|||||||
podSec := v1.PodSecurityContext{
|
podSec := v1.PodSecurityContext{
|
||||||
FSGroup: fsGroup,
|
FSGroup: fsGroup,
|
||||||
}
|
}
|
||||||
err := testVolumeIO(f, cs, convertTestConfig(config), *resource.volSource, &podSec, testFile, fileSizes)
|
err := testVolumeIO(f, cs, convertTestConfig(l.config), *l.resource.volSource, &podSec, testFile, fileSizes)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
storagev1 "k8s.io/api/storage/v1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||||
@ -62,14 +63,20 @@ func (t *volumeModeTestSuite) getTestSuiteInfo() TestSuiteInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||||
var (
|
type local struct {
|
||||||
dInfo = driver.GetDriverInfo()
|
|
||||||
config *PerTestConfig
|
config *PerTestConfig
|
||||||
testCleanup func()
|
testCleanup func()
|
||||||
sc *storagev1.StorageClass
|
|
||||||
pvc *v1.PersistentVolumeClaim
|
cs clientset.Interface
|
||||||
pv *v1.PersistentVolume
|
ns *v1.Namespace
|
||||||
volume TestVolume
|
sc *storagev1.StorageClass
|
||||||
|
pvc *v1.PersistentVolumeClaim
|
||||||
|
pv *v1.PersistentVolume
|
||||||
|
volume TestVolume
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
dInfo = driver.GetDriverInfo()
|
||||||
|
l local
|
||||||
)
|
)
|
||||||
|
|
||||||
// No preconditions to test. Normally they would be in a BeforeEach here.
|
// No preconditions to test. Normally they would be in a BeforeEach here.
|
||||||
@ -81,10 +88,13 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
|
|||||||
f := framework.NewDefaultFramework("volumemode")
|
f := framework.NewDefaultFramework("volumemode")
|
||||||
|
|
||||||
init := func() {
|
init := func() {
|
||||||
// Now do the more expensive test initialization.
|
l = local{}
|
||||||
config, testCleanup = driver.PrepareTest(f)
|
l.ns = f.Namespace
|
||||||
|
l.cs = f.ClientSet
|
||||||
|
|
||||||
|
// Now do the more expensive test initialization.
|
||||||
|
l.config, l.testCleanup = driver.PrepareTest(f)
|
||||||
|
|
||||||
ns := f.Namespace
|
|
||||||
fsType := pattern.FsType
|
fsType := pattern.FsType
|
||||||
volBindMode := storagev1.VolumeBindingImmediate
|
volBindMode := storagev1.VolumeBindingImmediate
|
||||||
|
|
||||||
@ -95,38 +105,38 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Create volume for pre-provisioned volume tests
|
// Create volume for pre-provisioned volume tests
|
||||||
volume = CreateVolume(driver, config, pattern.VolType)
|
l.volume = CreateVolume(driver, l.config, pattern.VolType)
|
||||||
|
|
||||||
switch pattern.VolType {
|
switch pattern.VolType {
|
||||||
case testpatterns.PreprovisionedPV:
|
case testpatterns.PreprovisionedPV:
|
||||||
if pattern.VolMode == v1.PersistentVolumeBlock {
|
if pattern.VolMode == v1.PersistentVolumeBlock {
|
||||||
scName = fmt.Sprintf("%s-%s-sc-for-block", ns.Name, dInfo.Name)
|
scName = fmt.Sprintf("%s-%s-sc-for-block", l.ns.Name, dInfo.Name)
|
||||||
} else if pattern.VolMode == v1.PersistentVolumeFilesystem {
|
} else if pattern.VolMode == v1.PersistentVolumeFilesystem {
|
||||||
scName = fmt.Sprintf("%s-%s-sc-for-file", ns.Name, dInfo.Name)
|
scName = fmt.Sprintf("%s-%s-sc-for-file", l.ns.Name, dInfo.Name)
|
||||||
}
|
}
|
||||||
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
|
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
|
||||||
pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, volume)
|
pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, l.volume)
|
||||||
if pvSource == nil {
|
if pvSource == nil {
|
||||||
framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name)
|
framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
storageClass, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, pattern.VolMode, *pvSource, volumeNodeAffinity)
|
storageClass, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, pattern.VolMode, *pvSource, volumeNodeAffinity)
|
||||||
sc = storageClass
|
l.sc = storageClass
|
||||||
pv = framework.MakePersistentVolume(pvConfig)
|
l.pv = framework.MakePersistentVolume(pvConfig)
|
||||||
pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns.Name)
|
l.pvc = framework.MakePersistentVolumeClaim(pvcConfig, l.ns.Name)
|
||||||
}
|
}
|
||||||
case testpatterns.DynamicPV:
|
case testpatterns.DynamicPV:
|
||||||
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
|
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
|
||||||
sc = dDriver.GetDynamicProvisionStorageClass(config, fsType)
|
l.sc = dDriver.GetDynamicProvisionStorageClass(l.config, fsType)
|
||||||
if sc == nil {
|
if l.sc == nil {
|
||||||
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
|
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
|
||||||
}
|
}
|
||||||
sc.VolumeBindingMode = &volBindMode
|
l.sc.VolumeBindingMode = &volBindMode
|
||||||
|
|
||||||
claimSize := dDriver.GetClaimSize()
|
claimSize := dDriver.GetClaimSize()
|
||||||
pvc = getClaim(claimSize, ns.Name)
|
l.pvc = getClaim(claimSize, l.ns.Name)
|
||||||
pvc.Spec.StorageClassName = &sc.Name
|
l.pvc.Spec.StorageClassName = &l.sc.Name
|
||||||
pvc.Spec.VolumeMode = &pattern.VolMode
|
l.pvc.Spec.VolumeMode = &pattern.VolMode
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
framework.Failf("Volume mode test doesn't support: %s", pattern.VolType)
|
framework.Failf("Volume mode test doesn't support: %s", pattern.VolType)
|
||||||
@ -134,30 +144,30 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
|
|||||||
}
|
}
|
||||||
|
|
||||||
cleanup := func() {
|
cleanup := func() {
|
||||||
if pv != nil || pvc != nil {
|
if l.pv != nil || l.pvc != nil {
|
||||||
By("Deleting pv and pvc")
|
By("Deleting pv and pvc")
|
||||||
errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, pv, pvc)
|
errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, l.pv, l.pvc)
|
||||||
if len(errs) > 0 {
|
if len(errs) > 0 {
|
||||||
framework.Logf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs))
|
framework.Logf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs))
|
||||||
}
|
}
|
||||||
pv = nil
|
l.pv = nil
|
||||||
pvc = nil
|
l.pvc = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if sc != nil {
|
if l.sc != nil {
|
||||||
By("Deleting sc")
|
By("Deleting sc")
|
||||||
deleteStorageClass(f.ClientSet, sc.Name)
|
deleteStorageClass(f.ClientSet, l.sc.Name)
|
||||||
sc = nil
|
l.sc = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if volume != nil {
|
if l.volume != nil {
|
||||||
volume.DeleteVolume()
|
l.volume.DeleteVolume()
|
||||||
volume = nil
|
l.volume = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if testCleanup != nil {
|
if l.testCleanup != nil {
|
||||||
testCleanup()
|
l.testCleanup()
|
||||||
testCleanup = nil
|
l.testCleanup = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -170,31 +180,29 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
|
|||||||
init()
|
init()
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
cs := f.ClientSet
|
|
||||||
ns := f.Namespace
|
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
By("Creating sc")
|
By("Creating sc")
|
||||||
sc, err = cs.StorageV1().StorageClasses().Create(sc)
|
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
By("Creating pv and pvc")
|
By("Creating pv and pvc")
|
||||||
pv, err = cs.CoreV1().PersistentVolumes().Create(pv)
|
l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
// Prebind pv
|
// Prebind pv
|
||||||
pvc.Spec.VolumeName = pv.Name
|
l.pvc.Spec.VolumeName = l.pv.Name
|
||||||
pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
|
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, pv, pvc))
|
framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc))
|
||||||
|
|
||||||
By("Creating pod")
|
By("Creating pod")
|
||||||
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{pvc},
|
pod, err := framework.CreateSecPodWithNodeName(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc},
|
||||||
false, "", false, false, framework.SELinuxLabel,
|
false, "", false, false, framework.SELinuxLabel,
|
||||||
nil, config.ClientNodeName, framework.PodStartTimeout)
|
nil, l.config.ClientNodeName, framework.PodStartTimeout)
|
||||||
defer func() {
|
defer func() {
|
||||||
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
|
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
|
||||||
}()
|
}()
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
})
|
})
|
||||||
@ -203,31 +211,29 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
|
|||||||
init()
|
init()
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
cs := f.ClientSet
|
|
||||||
ns := f.Namespace
|
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
By("Creating sc")
|
By("Creating sc")
|
||||||
sc, err = cs.StorageV1().StorageClasses().Create(sc)
|
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
By("Creating pv and pvc")
|
By("Creating pv and pvc")
|
||||||
pv, err = cs.CoreV1().PersistentVolumes().Create(pv)
|
l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
// Prebind pv
|
// Prebind pv
|
||||||
pvc.Spec.VolumeName = pv.Name
|
l.pvc.Spec.VolumeName = l.pv.Name
|
||||||
pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
|
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, pv, pvc))
|
framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc))
|
||||||
|
|
||||||
By("Creating pod")
|
By("Creating pod")
|
||||||
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{pvc},
|
pod, err := framework.CreateSecPodWithNodeName(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc},
|
||||||
false, "", false, false, framework.SELinuxLabel,
|
false, "", false, false, framework.SELinuxLabel,
|
||||||
nil, config.ClientNodeName, framework.PodStartTimeout)
|
nil, l.config.ClientNodeName, framework.PodStartTimeout)
|
||||||
defer func() {
|
defer func() {
|
||||||
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
|
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
|
||||||
}()
|
}()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
@ -245,19 +251,17 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
|
|||||||
init()
|
init()
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
cs := f.ClientSet
|
|
||||||
ns := f.Namespace
|
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
By("Creating sc")
|
By("Creating sc")
|
||||||
sc, err = cs.StorageV1().StorageClasses().Create(sc)
|
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
By("Creating pv and pvc")
|
By("Creating pv and pvc")
|
||||||
pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
|
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
@ -265,33 +269,31 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
|
|||||||
init()
|
init()
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
cs := f.ClientSet
|
|
||||||
ns := f.Namespace
|
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
By("Creating sc")
|
By("Creating sc")
|
||||||
sc, err = cs.StorageV1().StorageClasses().Create(sc)
|
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
By("Creating pv and pvc")
|
By("Creating pv and pvc")
|
||||||
pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
|
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
|
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.pvc.Namespace).Get(l.pvc.Name, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
pv, err = cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
|
l.pv, err = l.cs.CoreV1().PersistentVolumes().Get(l.pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
By("Creating pod")
|
By("Creating pod")
|
||||||
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{pvc},
|
pod, err := framework.CreateSecPodWithNodeName(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc},
|
||||||
false, "", false, false, framework.SELinuxLabel,
|
false, "", false, false, framework.SELinuxLabel,
|
||||||
nil, config.ClientNodeName, framework.PodStartTimeout)
|
nil, l.config.ClientNodeName, framework.PodStartTimeout)
|
||||||
defer func() {
|
defer func() {
|
||||||
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
|
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
|
||||||
}()
|
}()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
@ -90,12 +90,14 @@ func skipExecTest(driver TestDriver) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||||
var (
|
type local struct {
|
||||||
dInfo = driver.GetDriverInfo()
|
|
||||||
config *PerTestConfig
|
config *PerTestConfig
|
||||||
testCleanup func()
|
testCleanup func()
|
||||||
resource *genericVolumeTestResource
|
|
||||||
)
|
resource *genericVolumeTestResource
|
||||||
|
}
|
||||||
|
var dInfo = driver.GetDriverInfo()
|
||||||
|
var l local
|
||||||
|
|
||||||
// No preconditions to test. Normally they would be in a BeforeEach here.
|
// No preconditions to test. Normally they would be in a BeforeEach here.
|
||||||
|
|
||||||
@ -106,23 +108,25 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
f := framework.NewDefaultFramework("volumeio")
|
f := framework.NewDefaultFramework("volumeio")
|
||||||
|
|
||||||
init := func() {
|
init := func() {
|
||||||
|
l = local{}
|
||||||
|
|
||||||
// Now do the more expensive test initialization.
|
// Now do the more expensive test initialization.
|
||||||
config, testCleanup = driver.PrepareTest(f)
|
l.config, l.testCleanup = driver.PrepareTest(f)
|
||||||
resource = createGenericVolumeTestResource(driver, config, pattern)
|
l.resource = createGenericVolumeTestResource(driver, l.config, pattern)
|
||||||
if resource.volSource == nil {
|
if l.resource.volSource == nil {
|
||||||
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
|
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanup := func() {
|
cleanup := func() {
|
||||||
if resource != nil {
|
if l.resource != nil {
|
||||||
resource.cleanupResource()
|
l.resource.cleanupResource()
|
||||||
resource = nil
|
l.resource = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if testCleanup != nil {
|
if l.testCleanup != nil {
|
||||||
testCleanup()
|
l.testCleanup()
|
||||||
testCleanup = nil
|
l.testCleanup = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -130,20 +134,20 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
skipPersistenceTest(driver)
|
skipPersistenceTest(driver)
|
||||||
init()
|
init()
|
||||||
defer func() {
|
defer func() {
|
||||||
framework.VolumeTestCleanup(f, convertTestConfig(config))
|
framework.VolumeTestCleanup(f, convertTestConfig(l.config))
|
||||||
cleanup()
|
cleanup()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
tests := []framework.VolumeTest{
|
tests := []framework.VolumeTest{
|
||||||
{
|
{
|
||||||
Volume: *resource.volSource,
|
Volume: *l.resource.volSource,
|
||||||
File: "index.html",
|
File: "index.html",
|
||||||
// Must match content
|
// Must match content
|
||||||
ExpectedContent: fmt.Sprintf("Hello from %s from namespace %s",
|
ExpectedContent: fmt.Sprintf("Hello from %s from namespace %s",
|
||||||
dInfo.Name, f.Namespace.Name),
|
dInfo.Name, f.Namespace.Name),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
config := convertTestConfig(config)
|
config := convertTestConfig(l.config)
|
||||||
framework.InjectHtml(f.ClientSet, config, tests[0].Volume, tests[0].ExpectedContent)
|
framework.InjectHtml(f.ClientSet, config, tests[0].Volume, tests[0].ExpectedContent)
|
||||||
var fsGroup *int64
|
var fsGroup *int64
|
||||||
if dInfo.Capabilities[CapFsGroup] {
|
if dInfo.Capabilities[CapFsGroup] {
|
||||||
@ -158,7 +162,7 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
init()
|
init()
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
testScriptInPod(f, resource.volType, resource.volSource, config.ClientNodeSelector)
|
testScriptInPod(f, l.resource.volType, l.resource.volSource, l.config.ClientNodeSelector)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user