From 62b7516b636d71f345afd7af1296849977bbccf8 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Wed, 3 Jun 2020 20:36:39 +0200 Subject: [PATCH] Add e2e test for read-only volume used by multiple pods --- test/e2e/framework/pod/create.go | 3 +- test/e2e/storage/testsuites/multivolume.go | 69 +++++++++++++++++++++- 2 files changed, 68 insertions(+), 4 deletions(-) diff --git a/test/e2e/framework/pod/create.go b/test/e2e/framework/pod/create.go index 67bfa79bc0b..9b424de07bd 100644 --- a/test/e2e/framework/pod/create.go +++ b/test/e2e/framework/pod/create.go @@ -38,6 +38,7 @@ var ( type Config struct { NS string PVCs []*v1.PersistentVolumeClaim + PVCsReadOnly bool InlineVolumeSources []*v1.VolumeSource IsPrivileged bool Command string @@ -224,7 +225,7 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) { volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}) } - volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}} + volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: podConfig.PVCsReadOnly}}} volumeIndex++ } for _, src := range podConfig.InlineVolumeSources { diff --git a/test/e2e/storage/testsuites/multivolume.go b/test/e2e/storage/testsuites/multivolume.go index a7c31ecc68e..b90a2b31edd 100644 --- a/test/e2e/storage/testsuites/multivolume.go +++ b/test/e2e/storage/testsuites/multivolume.go @@ -323,7 +323,35 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter // Test access to the volume from pods on different node TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name, - l.config.ClientNodeSelection, resource.Pvc, numPods, true /* sameNode */) + l.config.ClientNodeSelection, resource.Pvc, numPods, true /* sameNode */, false /* readOnly */) + }) + + // This tests below configuration: + // [pod1] [pod2] + // [ node1 ] + // \ / <- same volume mode (read only) + // [volume1] + ginkgo.It("should concurrently access the single read-only volume from pods on the same node", func() { + init() + defer cleanup() + + numPods := 2 + + if !l.driver.GetDriverInfo().Capabilities[CapMultiPODs] { + e2eskipper.Skipf("Driver %q does not support multiple concurrent pods - skipping", dInfo.Name) + } + + // Create volume + testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange + resource := CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange) + l.resources = append(l.resources, resource) + + // Initialize the volume with a filesystem - it's going to be mounted as read-only below. + initializeVolume(l.cs, l.ns.Name, resource.Pvc, l.config.ClientNodeSelection) + + // Test access to the volume from pods on a single node + TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name, + l.config.ClientNodeSelection, resource.Pvc, numPods, true /* sameNode */, true /* readOnly */) }) // This tests below configuration: @@ -365,7 +393,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter // Test access to the volume from pods on different node TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name, - l.config.ClientNodeSelection, resource.Pvc, numPods, false /* sameNode */) + l.config.ClientNodeSelection, resource.Pvc, numPods, false /* sameNode */, false /* readOnly */) }) } @@ -443,7 +471,8 @@ func TestAccessMultipleVolumesAcrossPodRecreation(f *framework.Framework, cs cli // pod deletion doesn't affect. Pods are deployed on the same node or different nodes depending on requiresSameNode. // Read/write check are done across pod, by check reading both what pod{n-1} and pod{n} wrote from pod{n}. func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Interface, ns string, - node e2epod.NodeSelection, pvc *v1.PersistentVolumeClaim, numPods int, requiresSameNode bool) { + node e2epod.NodeSelection, pvc *v1.PersistentVolumeClaim, numPods int, requiresSameNode bool, + readOnly bool) { var pods []*v1.Pod @@ -456,6 +485,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int PVCs: []*v1.PersistentVolumeClaim{pvc}, SeLinuxLabel: e2epv.SELinuxLabel, NodeSelection: node, + PVCsReadOnly: readOnly, } pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, framework.PodStartTimeout) defer func() { @@ -484,6 +514,11 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode)) utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, path) + if readOnly { + ginkgo.By("Skipping volume content checks, volume is read-only") + continue + } + if i != 0 { ginkgo.By(fmt.Sprintf("From pod%d, checking if reading the data that pod%d write works properly", index, index-1)) // For 1st pod, no one has written data yet, so pass the read check @@ -515,6 +550,11 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int ginkgo.By(fmt.Sprintf("Rechecking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode)) utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, "/mnt/volume1") + if readOnly { + ginkgo.By("Skipping volume content checks, volume is read-only") + continue + } + if i == 0 { // This time there should be data that last pod wrote, for 1st pod ginkgo.By(fmt.Sprintf("From pod%d, rechecking if reading the data that last pod write works properly", index)) @@ -586,3 +626,26 @@ func ensureTopologyRequirements(nodeSelection *e2epod.NodeSelection, nodes *v1.N return nil } + +// initializeVolume creates a filesystem on given volume, so it can be used as read-only later +func initializeVolume(cs clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection) { + if pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock { + // Block volumes do not need to be initialized. + return + } + + ginkgo.By(fmt.Sprintf("Initializing a filesystem on PVC %s", pvc.Name)) + // Just create a pod with the volume as read-write. Kubernetes will create a filesystem there + // if it does not exist yet. + podConfig := e2epod.Config{ + NS: ns, + PVCs: []*v1.PersistentVolumeClaim{pvc}, + SeLinuxLabel: e2epv.SELinuxLabel, + NodeSelection: node, + } + pod, err := e2epod.CreateSecPod(cs, &podConfig, framework.PodStartTimeout) + defer func() { + framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod)) + }() + framework.ExpectNoError(err) +}