e2e/storage: re-initialize all local variables

There is a risk that the init function does not reset one of the local
variables that was set by a previous test. To avoid this, all
variables set by init are now in a struct which gets cleaned
completely first.
This commit is contained in:
Patrick Ohly 2019-02-15 10:50:18 +01:00
parent e79cd9efe1
commit ec3655a1d4
5 changed files with 255 additions and 240 deletions

View File

@ -80,15 +80,19 @@ func (p *provisioningTestSuite) getTestSuiteInfo() TestSuiteInfo {
}
func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
var (
dInfo = driver.GetDriverInfo()
dDriver DynamicPVTestDriver
type local struct {
config *PerTestConfig
testCleanup func()
testCase *StorageClassTest
cs clientset.Interface
pvc *v1.PersistentVolumeClaim
sc *storage.StorageClass
testCase *StorageClassTest
cs clientset.Interface
pvc *v1.PersistentVolumeClaim
sc *storage.StorageClass
}
var (
dInfo = driver.GetDriverInfo()
dDriver DynamicPVTestDriver
l local
)
BeforeEach(func() {
@ -110,30 +114,32 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
f := framework.NewDefaultFramework("provisioning")
init := func() {
l = local{}
// Now do the more expensive test initialization.
config, testCleanup = driver.PrepareTest(f)
cs = config.Framework.ClientSet
l.config, l.testCleanup = driver.PrepareTest(f)
l.cs = l.config.Framework.ClientSet
claimSize := dDriver.GetClaimSize()
sc = dDriver.GetDynamicProvisionStorageClass(config, "")
if sc == nil {
l.sc = dDriver.GetDynamicProvisionStorageClass(l.config, "")
if l.sc == nil {
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
}
pvc = getClaim(claimSize, config.Framework.Namespace.Name)
pvc.Spec.StorageClassName = &sc.Name
framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", sc, pvc)
testCase = &StorageClassTest{
Client: config.Framework.ClientSet,
Claim: pvc,
Class: sc,
l.pvc = getClaim(claimSize, l.config.Framework.Namespace.Name)
l.pvc.Spec.StorageClassName = &l.sc.Name
framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", l.sc, l.pvc)
l.testCase = &StorageClassTest{
Client: l.config.Framework.ClientSet,
Claim: l.pvc,
Class: l.sc,
ClaimSize: claimSize,
ExpectedSize: claimSize,
}
}
cleanup := func() {
if testCleanup != nil {
testCleanup()
testCleanup = nil
if l.testCleanup != nil {
l.testCleanup()
l.testCleanup = nil
}
}
@ -141,7 +147,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
init()
defer cleanup()
testCase.TestDynamicProvisioning()
l.testCase.TestDynamicProvisioning()
})
It("should provision storage with mount options", func() {
@ -152,8 +158,8 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
init()
defer cleanup()
testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List()
testCase.TestDynamicProvisioning()
l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List()
l.testCase.TestDynamicProvisioning()
})
It("should access volume from different nodes", func() {
@ -164,19 +170,19 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
// locked onto a single node, then the driver is
// usable on all of them *and* supports accessing a volume
// from any node.
if config.ClientNodeName != "" {
if l.config.ClientNodeName != "" {
framework.Skipf("Driver %q only supports testing on one node - skipping", dInfo.Name)
}
// Ensure that we actually have more than one node.
nodes := framework.GetReadySchedulableNodesOrDie(cs)
nodes := framework.GetReadySchedulableNodesOrDie(l.cs)
if len(nodes.Items) <= 1 {
framework.Skipf("need more than one node - skipping")
}
testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
PVMultiNodeCheck(cs, claim, volume, NodeSelection{Name: config.ClientNodeName})
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
PVMultiNodeCheck(l.cs, claim, volume, NodeSelection{Name: l.config.ClientNodeName})
}
testCase.TestDynamicProvisioning()
l.testCase.TestDynamicProvisioning()
})
It("should create and delete block persistent volumes", func() {
@ -188,9 +194,9 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
defer cleanup()
block := v1.PersistentVolumeBlock
testCase.VolumeMode = &block
pvc.Spec.VolumeMode = &block
testCase.TestDynamicProvisioning()
l.testCase.VolumeMode = &block
l.pvc.Spec.VolumeMode = &block
l.testCase.TestDynamicProvisioning()
})
It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() {
@ -206,18 +212,18 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
init()
defer cleanup()
dc := config.Framework.DynamicClient
vsc := sDriver.GetSnapshotClass(config)
dataSource, cleanupFunc := prepareDataSourceForProvisioning(NodeSelection{Name: config.ClientNodeName}, cs, dc, pvc, sc, vsc)
dc := l.config.Framework.DynamicClient
vsc := sDriver.GetSnapshotClass(l.config)
dataSource, cleanupFunc := prepareDataSourceForProvisioning(NodeSelection{Name: l.config.ClientNodeName}, l.cs, dc, l.pvc, l.sc, vsc)
defer cleanupFunc()
pvc.Spec.DataSource = dataSource
testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
l.pvc.Spec.DataSource = dataSource
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
By("checking whether the created volume has the pre-populated data")
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
RunInPodWithVolume(cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, NodeSelection{Name: config.ClientNodeName})
RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, NodeSelection{Name: l.config.ClientNodeName})
}
testCase.TestDynamicProvisioning()
l.testCase.TestDynamicProvisioning()
})
It("should allow concurrent writes on the same node", func() {
@ -228,7 +234,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
init()
defer cleanup()
testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
// We start two pods concurrently on the same node,
// using the same PVC. Both wait for other to create a
// file before returning. The pods are forced onto the
@ -241,7 +247,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
defer GinkgoRecover()
defer wg.Done()
node := NodeSelection{
Name: config.ClientNodeName,
Name: l.config.ClientNodeName,
}
if podName == secondPodName {
node.Affinity = &v1.Affinity{
@ -259,13 +265,13 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
},
}
}
RunInPodWithVolume(cs, claim.Namespace, claim.Name, podName, command, node)
RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, podName, command, node)
}
go run(firstPodName, "touch /mnt/test/first && while ! [ -f /mnt/test/second ]; do sleep 1; done")
go run(secondPodName, "touch /mnt/test/second && while ! [ -f /mnt/test/first ]; do sleep 1; done")
wg.Wait()
}
testCase.TestDynamicProvisioning()
l.testCase.TestDynamicProvisioning()
})
}

View File

@ -26,7 +26,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -73,10 +72,10 @@ func (s *subPathTestSuite) getTestSuiteInfo() TestSuiteInfo {
}
func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
var (
config *PerTestConfig
testCleanup func()
cs clientset.Interface
type local struct {
config *PerTestConfig
testCleanup func()
resource *genericVolumeTestResource
roVolSource *v1.VolumeSource
pod *v1.Pod
@ -84,7 +83,8 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
subPathDir string
filePathInSubpath string
filePathInVolume string
)
}
var l local
// No preconditions to test. Normally they would be in a BeforeEach here.
@ -95,33 +95,30 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
f := framework.NewDefaultFramework("provisioning")
init := func() {
cs = f.ClientSet // needed for cleanup, f.ClientSet itself gets reset too early
l = local{}
// Now do the more expensive test initialization.
config, testCleanup = driver.PrepareTest(f)
fsType := pattern.FsType
volType := pattern.VolType
resource = createGenericVolumeTestResource(driver, config, pattern)
l.config, l.testCleanup = driver.PrepareTest(f)
l.resource = createGenericVolumeTestResource(driver, l.config, pattern)
// Setup subPath test dependent resource
roVolSource = nil
volType := pattern.VolType
switch volType {
case testpatterns.InlineVolume:
if iDriver, ok := driver.(InlineVolumeTestDriver); ok {
roVolSource = iDriver.GetVolumeSource(true, fsType, resource.volume)
l.roVolSource = iDriver.GetVolumeSource(true, pattern.FsType, l.resource.volume)
}
case testpatterns.PreprovisionedPV:
roVolSource = &v1.VolumeSource{
l.roVolSource = &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: resource.pvc.Name,
ClaimName: l.resource.pvc.Name,
ReadOnly: true,
},
}
case testpatterns.DynamicPV:
roVolSource = &v1.VolumeSource{
l.roVolSource = &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: resource.pvc.Name,
ClaimName: l.resource.pvc.Name,
ReadOnly: true,
},
}
@ -130,35 +127,35 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
}
subPath := f.Namespace.Name
pod = SubpathTestPod(f, subPath, resource.volType, resource.volSource, true)
pod.Spec.NodeName = config.ClientNodeName
pod.Spec.NodeSelector = config.ClientNodeSelector
l.pod = SubpathTestPod(f, subPath, l.resource.volType, l.resource.volSource, true)
l.pod.Spec.NodeName = l.config.ClientNodeName
l.pod.Spec.NodeSelector = l.config.ClientNodeSelector
formatPod = volumeFormatPod(f, resource.volSource)
formatPod.Spec.NodeName = config.ClientNodeName
formatPod.Spec.NodeSelector = config.ClientNodeSelector
l.formatPod = volumeFormatPod(f, l.resource.volSource)
l.formatPod.Spec.NodeName = l.config.ClientNodeName
l.formatPod.Spec.NodeSelector = l.config.ClientNodeSelector
subPathDir = filepath.Join(volumePath, subPath)
filePathInSubpath = filepath.Join(volumePath, fileName)
filePathInVolume = filepath.Join(subPathDir, fileName)
l.subPathDir = filepath.Join(volumePath, subPath)
l.filePathInSubpath = filepath.Join(volumePath, fileName)
l.filePathInVolume = filepath.Join(l.subPathDir, fileName)
}
cleanup := func() {
if pod != nil {
if l.pod != nil {
By("Deleting pod")
err := framework.DeletePodWithWait(f, cs, pod)
err := framework.DeletePodWithWait(f, f.ClientSet, l.pod)
Expect(err).ToNot(HaveOccurred(), "while deleting pod")
pod = nil
l.pod = nil
}
if resource != nil {
resource.cleanupResource()
resource = nil
if l.resource != nil {
l.resource.cleanupResource()
l.resource = nil
}
if testCleanup != nil {
testCleanup()
testCleanup = nil
if l.testCleanup != nil {
l.testCleanup()
l.testCleanup = nil
}
}
@ -167,10 +164,10 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
defer cleanup()
// Write the file in the subPath from init container 1
setWriteCommand(filePathInSubpath, &pod.Spec.InitContainers[1])
setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1])
// Read it from outside the subPath from container 1
testReadFile(f, filePathInVolume, pod, 1)
testReadFile(f, l.filePathInVolume, l.pod, 1)
})
It("should support existing directory", func() {
@ -178,13 +175,13 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
defer cleanup()
// Create the directory
setInitCommand(pod, fmt.Sprintf("mkdir -p %s", subPathDir))
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir))
// Write the file in the subPath from init container 1
setWriteCommand(filePathInSubpath, &pod.Spec.InitContainers[1])
setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1])
// Read it from outside the subPath from container 1
testReadFile(f, filePathInVolume, pod, 1)
testReadFile(f, l.filePathInVolume, l.pod, 1)
})
It("should support existing single file", func() {
@ -192,10 +189,10 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
defer cleanup()
// Create the file in the init container
setInitCommand(pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", subPathDir, filePathInVolume))
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", l.subPathDir, l.filePathInVolume))
// Read it from inside the subPath from container 0
testReadFile(f, filePathInSubpath, pod, 0)
testReadFile(f, l.filePathInSubpath, l.pod, 0)
})
It("should support file as subpath", func() {
@ -203,9 +200,9 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
defer cleanup()
// Create the file in the init container
setInitCommand(pod, fmt.Sprintf("echo %s > %s", f.Namespace.Name, subPathDir))
setInitCommand(l.pod, fmt.Sprintf("echo %s > %s", f.Namespace.Name, l.subPathDir))
TestBasicSubpath(f, f.Namespace.Name, pod)
TestBasicSubpath(f, f.Namespace.Name, l.pod)
})
It("should fail if subpath directory is outside the volume [Slow]", func() {
@ -213,10 +210,10 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
defer cleanup()
// Create the subpath outside the volume
setInitCommand(pod, fmt.Sprintf("ln -s /bin %s", subPathDir))
setInitCommand(l.pod, fmt.Sprintf("ln -s /bin %s", l.subPathDir))
// Pod should fail
testPodFailSubpath(f, pod, false)
testPodFailSubpath(f, l.pod, false)
})
It("should fail if subpath file is outside the volume [Slow]", func() {
@ -224,10 +221,10 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
defer cleanup()
// Create the subpath outside the volume
setInitCommand(pod, fmt.Sprintf("ln -s /bin/sh %s", subPathDir))
setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/sh %s", l.subPathDir))
// Pod should fail
testPodFailSubpath(f, pod, false)
testPodFailSubpath(f, l.pod, false)
})
It("should fail if non-existent subpath is outside the volume [Slow]", func() {
@ -235,10 +232,10 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
defer cleanup()
// Create the subpath outside the volume
setInitCommand(pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", subPathDir))
setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", l.subPathDir))
// Pod should fail
testPodFailSubpath(f, pod, false)
testPodFailSubpath(f, l.pod, false)
})
It("should fail if subpath with backstepping is outside the volume [Slow]", func() {
@ -246,10 +243,10 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
defer cleanup()
// Create the subpath outside the volume
setInitCommand(pod, fmt.Sprintf("ln -s ../ %s", subPathDir))
setInitCommand(l.pod, fmt.Sprintf("ln -s ../ %s", l.subPathDir))
// Pod should fail
testPodFailSubpath(f, pod, false)
testPodFailSubpath(f, l.pod, false)
})
It("should support creating multiple subpath from same volumes [Slow]", func() {
@ -260,22 +257,22 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
subpathDir2 := filepath.Join(volumePath, "subpath2")
filepath1 := filepath.Join("/test-subpath1", fileName)
filepath2 := filepath.Join("/test-subpath2", fileName)
setInitCommand(pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2))
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2))
addSubpathVolumeContainer(&pod.Spec.Containers[0], v1.VolumeMount{
addSubpathVolumeContainer(&l.pod.Spec.Containers[0], v1.VolumeMount{
Name: volumeName,
MountPath: "/test-subpath1",
SubPath: "subpath1",
})
addSubpathVolumeContainer(&pod.Spec.Containers[0], v1.VolumeMount{
addSubpathVolumeContainer(&l.pod.Spec.Containers[0], v1.VolumeMount{
Name: volumeName,
MountPath: "/test-subpath2",
SubPath: "subpath2",
})
// Write the files from container 0 and instantly read them back
addMultipleWrites(&pod.Spec.Containers[0], filepath1, filepath2)
testMultipleReads(f, pod, 0, filepath1, filepath2)
addMultipleWrites(&l.pod.Spec.Containers[0], filepath1, filepath2)
testMultipleReads(f, l.pod, 0, filepath1, filepath2)
})
It("should support restarting containers using directory as subpath [Slow]", func() {
@ -283,9 +280,9 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
defer cleanup()
// Create the directory
setInitCommand(pod, fmt.Sprintf("mkdir -p %v; touch %v", subPathDir, probeFilePath))
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %v; touch %v", l.subPathDir, probeFilePath))
testPodContainerRestart(f, pod)
testPodContainerRestart(f, l.pod)
})
It("should support restarting containers using file as subpath [Slow]", func() {
@ -293,28 +290,28 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
defer cleanup()
// Create the file
setInitCommand(pod, fmt.Sprintf("touch %v; touch %v", subPathDir, probeFilePath))
setInitCommand(l.pod, fmt.Sprintf("touch %v; touch %v", l.subPathDir, probeFilePath))
testPodContainerRestart(f, pod)
testPodContainerRestart(f, l.pod)
})
It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() {
init()
defer cleanup()
testSubpathReconstruction(f, pod, false)
testSubpathReconstruction(f, l.pod, false)
})
It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() {
init()
defer cleanup()
if strings.HasPrefix(resource.volType, "hostPath") || strings.HasPrefix(resource.volType, "csi-hostpath") {
if strings.HasPrefix(l.resource.volType, "hostPath") || strings.HasPrefix(l.resource.volType, "csi-hostpath") {
// TODO: This skip should be removed once #61446 is fixed
framework.Skipf("%s volume type does not support reconstruction, skipping", resource.volType)
framework.Skipf("%s volume type does not support reconstruction, skipping", l.resource.volType)
}
testSubpathReconstruction(f, pod, true)
testSubpathReconstruction(f, l.pod, true)
})
It("should support readOnly directory specified in the volumeMount", func() {
@ -322,14 +319,14 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
defer cleanup()
// Create the directory
setInitCommand(pod, fmt.Sprintf("mkdir -p %s", subPathDir))
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir))
// Write the file in the volume from init container 2
setWriteCommand(filePathInVolume, &pod.Spec.InitContainers[2])
setWriteCommand(l.filePathInVolume, &l.pod.Spec.InitContainers[2])
// Read it from inside the subPath from container 0
pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
testReadFile(f, filePathInSubpath, pod, 0)
l.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
testReadFile(f, l.filePathInSubpath, l.pod, 0)
})
It("should support readOnly file specified in the volumeMount", func() {
@ -337,62 +334,62 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
defer cleanup()
// Create the file
setInitCommand(pod, fmt.Sprintf("touch %s", subPathDir))
setInitCommand(l.pod, fmt.Sprintf("touch %s", l.subPathDir))
// Write the file in the volume from init container 2
setWriteCommand(subPathDir, &pod.Spec.InitContainers[2])
setWriteCommand(l.subPathDir, &l.pod.Spec.InitContainers[2])
// Read it from inside the subPath from container 0
pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
testReadFile(f, volumePath, pod, 0)
l.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
testReadFile(f, volumePath, l.pod, 0)
})
It("should support existing directories when readOnly specified in the volumeSource", func() {
init()
defer cleanup()
if roVolSource == nil {
framework.Skipf("Volume type %v doesn't support readOnly source", resource.volType)
if l.roVolSource == nil {
framework.Skipf("Volume type %v doesn't support readOnly source", l.resource.volType)
}
origpod := pod.DeepCopy()
origpod := l.pod.DeepCopy()
// Create the directory
setInitCommand(pod, fmt.Sprintf("mkdir -p %s", subPathDir))
setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir))
// Write the file in the subPath from init container 1
setWriteCommand(filePathInSubpath, &pod.Spec.InitContainers[1])
setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1])
// Read it from inside the subPath from container 0
testReadFile(f, filePathInSubpath, pod, 0)
testReadFile(f, l.filePathInSubpath, l.pod, 0)
// Reset the pod
pod = origpod
l.pod = origpod
// Set volume source to read only
pod.Spec.Volumes[0].VolumeSource = *roVolSource
l.pod.Spec.Volumes[0].VolumeSource = *l.roVolSource
// Read it from inside the subPath from container 0
testReadFile(f, filePathInSubpath, pod, 0)
testReadFile(f, l.filePathInSubpath, l.pod, 0)
})
It("should verify container cannot write to subpath readonly volumes", func() {
init()
defer cleanup()
if roVolSource == nil {
framework.Skipf("Volume type %v doesn't support readOnly source", resource.volType)
if l.roVolSource == nil {
framework.Skipf("Volume type %v doesn't support readOnly source", l.resource.volType)
}
// Format the volume while it's writable
formatVolume(f, formatPod)
formatVolume(f, l.formatPod)
// Set volume source to read only
pod.Spec.Volumes[0].VolumeSource = *roVolSource
l.pod.Spec.Volumes[0].VolumeSource = *l.roVolSource
// Write the file in the volume from container 0
setWriteCommand(subPathDir, &pod.Spec.Containers[0])
setWriteCommand(l.subPathDir, &l.pod.Spec.Containers[0])
// Pod should fail
testPodFailSubpath(f, pod, true)
testPodFailSubpath(f, l.pod, true)
})
It("should be able to unmount after the subpath directory is deleted", func() {
@ -400,12 +397,12 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
defer cleanup()
// Change volume container to busybox so we can exec later
pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox)
pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
l.pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox)
l.pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
By(fmt.Sprintf("Creating pod %s", pod.Name))
removeUnusedContainers(pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
By(fmt.Sprintf("Creating pod %s", l.pod.Name))
removeUnusedContainers(l.pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(l.pod)
Expect(err).ToNot(HaveOccurred(), "while creating pod")
defer func() {
By(fmt.Sprintf("Deleting pod %s", pod.Name))
@ -413,12 +410,12 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
}()
// Wait for pod to be running
err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
err = framework.WaitForPodRunningInNamespace(f.ClientSet, l.pod)
Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running")
// Exec into container that mounted the volume, delete subpath directory
rmCmd := fmt.Sprintf("rm -rf %s", subPathDir)
_, err = podContainerExec(pod, 1, rmCmd)
rmCmd := fmt.Sprintf("rm -rf %s", l.subPathDir)
_, err = podContainerExec(l.pod, 1, rmCmd)
Expect(err).ToNot(HaveOccurred(), "while removing subpath directory")
// Delete pod (from defer) and wait for it to be successfully deleted

View File

@ -75,11 +75,15 @@ func (t *volumeIOTestSuite) getTestSuiteInfo() TestSuiteInfo {
}
func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
var (
dInfo = driver.GetDriverInfo()
type local struct {
config *PerTestConfig
testCleanup func()
resource *genericVolumeTestResource
resource *genericVolumeTestResource
}
var (
dInfo = driver.GetDriverInfo()
l local
)
// No preconditions to test. Normally they would be in a BeforeEach here.
@ -91,23 +95,25 @@ func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns.
f := framework.NewDefaultFramework("volumeio")
init := func() {
l = local{}
// Now do the more expensive test initialization.
config, testCleanup = driver.PrepareTest(f)
resource = createGenericVolumeTestResource(driver, config, pattern)
if resource.volSource == nil {
l.config, l.testCleanup = driver.PrepareTest(f)
l.resource = createGenericVolumeTestResource(driver, l.config, pattern)
if l.resource.volSource == nil {
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
}
}
cleanup := func() {
if resource != nil {
resource.cleanupResource()
resource = nil
if l.resource != nil {
l.resource.cleanupResource()
l.resource = nil
}
if testCleanup != nil {
testCleanup()
testCleanup = nil
if l.testCleanup != nil {
l.testCleanup()
l.testCleanup = nil
}
}
@ -126,7 +132,7 @@ func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns.
podSec := v1.PodSecurityContext{
FSGroup: fsGroup,
}
err := testVolumeIO(f, cs, convertTestConfig(config), *resource.volSource, &podSec, testFile, fileSizes)
err := testVolumeIO(f, cs, convertTestConfig(l.config), *l.resource.volSource, &podSec, testFile, fileSizes)
Expect(err).NotTo(HaveOccurred())
})
}

View File

@ -26,6 +26,7 @@ import (
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -62,14 +63,20 @@ func (t *volumeModeTestSuite) getTestSuiteInfo() TestSuiteInfo {
}
func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
var (
dInfo = driver.GetDriverInfo()
type local struct {
config *PerTestConfig
testCleanup func()
sc *storagev1.StorageClass
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
volume TestVolume
cs clientset.Interface
ns *v1.Namespace
sc *storagev1.StorageClass
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
volume TestVolume
}
var (
dInfo = driver.GetDriverInfo()
l local
)
// No preconditions to test. Normally they would be in a BeforeEach here.
@ -81,10 +88,13 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
f := framework.NewDefaultFramework("volumemode")
init := func() {
// Now do the more expensive test initialization.
config, testCleanup = driver.PrepareTest(f)
l = local{}
l.ns = f.Namespace
l.cs = f.ClientSet
// Now do the more expensive test initialization.
l.config, l.testCleanup = driver.PrepareTest(f)
ns := f.Namespace
fsType := pattern.FsType
volBindMode := storagev1.VolumeBindingImmediate
@ -95,38 +105,38 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
)
// Create volume for pre-provisioned volume tests
volume = CreateVolume(driver, config, pattern.VolType)
l.volume = CreateVolume(driver, l.config, pattern.VolType)
switch pattern.VolType {
case testpatterns.PreprovisionedPV:
if pattern.VolMode == v1.PersistentVolumeBlock {
scName = fmt.Sprintf("%s-%s-sc-for-block", ns.Name, dInfo.Name)
scName = fmt.Sprintf("%s-%s-sc-for-block", l.ns.Name, dInfo.Name)
} else if pattern.VolMode == v1.PersistentVolumeFilesystem {
scName = fmt.Sprintf("%s-%s-sc-for-file", ns.Name, dInfo.Name)
scName = fmt.Sprintf("%s-%s-sc-for-file", l.ns.Name, dInfo.Name)
}
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, volume)
pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, l.volume)
if pvSource == nil {
framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name)
}
storageClass, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, pattern.VolMode, *pvSource, volumeNodeAffinity)
sc = storageClass
pv = framework.MakePersistentVolume(pvConfig)
pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns.Name)
l.sc = storageClass
l.pv = framework.MakePersistentVolume(pvConfig)
l.pvc = framework.MakePersistentVolumeClaim(pvcConfig, l.ns.Name)
}
case testpatterns.DynamicPV:
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
sc = dDriver.GetDynamicProvisionStorageClass(config, fsType)
if sc == nil {
l.sc = dDriver.GetDynamicProvisionStorageClass(l.config, fsType)
if l.sc == nil {
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
}
sc.VolumeBindingMode = &volBindMode
l.sc.VolumeBindingMode = &volBindMode
claimSize := dDriver.GetClaimSize()
pvc = getClaim(claimSize, ns.Name)
pvc.Spec.StorageClassName = &sc.Name
pvc.Spec.VolumeMode = &pattern.VolMode
l.pvc = getClaim(claimSize, l.ns.Name)
l.pvc.Spec.StorageClassName = &l.sc.Name
l.pvc.Spec.VolumeMode = &pattern.VolMode
}
default:
framework.Failf("Volume mode test doesn't support: %s", pattern.VolType)
@ -134,30 +144,30 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
}
cleanup := func() {
if pv != nil || pvc != nil {
if l.pv != nil || l.pvc != nil {
By("Deleting pv and pvc")
errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, pv, pvc)
errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, l.pv, l.pvc)
if len(errs) > 0 {
framework.Logf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs))
}
pv = nil
pvc = nil
l.pv = nil
l.pvc = nil
}
if sc != nil {
if l.sc != nil {
By("Deleting sc")
deleteStorageClass(f.ClientSet, sc.Name)
sc = nil
deleteStorageClass(f.ClientSet, l.sc.Name)
l.sc = nil
}
if volume != nil {
volume.DeleteVolume()
volume = nil
if l.volume != nil {
l.volume.DeleteVolume()
l.volume = nil
}
if testCleanup != nil {
testCleanup()
testCleanup = nil
if l.testCleanup != nil {
l.testCleanup()
l.testCleanup = nil
}
}
@ -170,31 +180,29 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
init()
defer cleanup()
cs := f.ClientSet
ns := f.Namespace
var err error
By("Creating sc")
sc, err = cs.StorageV1().StorageClasses().Create(sc)
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
pv, err = cs.CoreV1().PersistentVolumes().Create(pv)
l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv)
Expect(err).NotTo(HaveOccurred())
// Prebind pv
pvc.Spec.VolumeName = pv.Name
pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
l.pvc.Spec.VolumeName = l.pv.Name
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, pv, pvc))
framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc))
By("Creating pod")
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{pvc},
pod, err := framework.CreateSecPodWithNodeName(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc},
false, "", false, false, framework.SELinuxLabel,
nil, config.ClientNodeName, framework.PodStartTimeout)
nil, l.config.ClientNodeName, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
}()
Expect(err).To(HaveOccurred())
})
@ -203,31 +211,29 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
init()
defer cleanup()
cs := f.ClientSet
ns := f.Namespace
var err error
By("Creating sc")
sc, err = cs.StorageV1().StorageClasses().Create(sc)
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
pv, err = cs.CoreV1().PersistentVolumes().Create(pv)
l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv)
Expect(err).NotTo(HaveOccurred())
// Prebind pv
pvc.Spec.VolumeName = pv.Name
pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
l.pvc.Spec.VolumeName = l.pv.Name
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, pv, pvc))
framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc))
By("Creating pod")
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{pvc},
pod, err := framework.CreateSecPodWithNodeName(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc},
false, "", false, false, framework.SELinuxLabel,
nil, config.ClientNodeName, framework.PodStartTimeout)
nil, l.config.ClientNodeName, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
}()
Expect(err).NotTo(HaveOccurred())
@ -245,19 +251,17 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
init()
defer cleanup()
cs := f.ClientSet
ns := f.Namespace
var err error
By("Creating sc")
sc, err = cs.StorageV1().StorageClasses().Create(sc)
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).To(HaveOccurred())
})
} else {
@ -265,33 +269,31 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
init()
defer cleanup()
cs := f.ClientSet
ns := f.Namespace
var err error
By("Creating sc")
sc, err = cs.StorageV1().StorageClasses().Create(sc)
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
Expect(err).NotTo(HaveOccurred())
By("Creating pv and pvc")
pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.pvc.Namespace).Get(l.pvc.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pv, err = cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
l.pv, err = l.cs.CoreV1().PersistentVolumes().Get(l.pvc.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("Creating pod")
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{pvc},
pod, err := framework.CreateSecPodWithNodeName(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc},
false, "", false, false, framework.SELinuxLabel,
nil, config.ClientNodeName, framework.PodStartTimeout)
nil, l.config.ClientNodeName, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
}()
Expect(err).NotTo(HaveOccurred())

View File

@ -90,12 +90,14 @@ func skipExecTest(driver TestDriver) {
}
func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
var (
dInfo = driver.GetDriverInfo()
type local struct {
config *PerTestConfig
testCleanup func()
resource *genericVolumeTestResource
)
resource *genericVolumeTestResource
}
var dInfo = driver.GetDriverInfo()
var l local
// No preconditions to test. Normally they would be in a BeforeEach here.
@ -106,23 +108,25 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
f := framework.NewDefaultFramework("volumeio")
init := func() {
l = local{}
// Now do the more expensive test initialization.
config, testCleanup = driver.PrepareTest(f)
resource = createGenericVolumeTestResource(driver, config, pattern)
if resource.volSource == nil {
l.config, l.testCleanup = driver.PrepareTest(f)
l.resource = createGenericVolumeTestResource(driver, l.config, pattern)
if l.resource.volSource == nil {
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
}
}
cleanup := func() {
if resource != nil {
resource.cleanupResource()
resource = nil
if l.resource != nil {
l.resource.cleanupResource()
l.resource = nil
}
if testCleanup != nil {
testCleanup()
testCleanup = nil
if l.testCleanup != nil {
l.testCleanup()
l.testCleanup = nil
}
}
@ -130,20 +134,20 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
skipPersistenceTest(driver)
init()
defer func() {
framework.VolumeTestCleanup(f, convertTestConfig(config))
framework.VolumeTestCleanup(f, convertTestConfig(l.config))
cleanup()
}()
tests := []framework.VolumeTest{
{
Volume: *resource.volSource,
Volume: *l.resource.volSource,
File: "index.html",
// Must match content
ExpectedContent: fmt.Sprintf("Hello from %s from namespace %s",
dInfo.Name, f.Namespace.Name),
},
}
config := convertTestConfig(config)
config := convertTestConfig(l.config)
framework.InjectHtml(f.ClientSet, config, tests[0].Volume, tests[0].ExpectedContent)
var fsGroup *int64
if dInfo.Capabilities[CapFsGroup] {
@ -158,7 +162,7 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
init()
defer cleanup()
testScriptInPod(f, resource.volType, resource.volSource, config.ClientNodeSelector)
testScriptInPod(f, l.resource.volType, l.resource.volSource, l.config.ClientNodeSelector)
})
}