From 4a076578451aa27e8ac60beec1fd3f23918c5331 Mon Sep 17 00:00:00 2001 From: Saikat Roychowdhury Date: Fri, 7 Jan 2022 01:46:54 +0000 Subject: [PATCH] e2e test --- test/e2e/storage/testsuites/provisioning.go | 82 +++++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index eab48a2092c..890b1a7cb6b 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -327,6 +327,26 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, } wg.Wait() }) + + ginkgo.It("should mount multiple PV pointing to the same storage on the same node", func() { + // csi-hostpath driver does not support this test case. In this test case, we have 2 PV containing the same underlying storage. + // during the NodeStage call for the second volume, csi-hostpath fails the call, because it thinks the volume is already staged at a different path. + // Note: This is not an issue with driver like PD CSI where the NodeStage is a no-op for block mode. + if pattern.VolMode == v1.PersistentVolumeBlock { + e2eskipper.Skipf("skipping multiple PV mount test for block mode") + } + + init() + defer cleanup() + + l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { + MultiplePVMountSingleNodeCheck(l.cs, f.Timeouts, claim, l.config.ClientNodeSelection) + } + _, clearProvisionedStorageClass := SetupStorageClass(l.testCase.Client, l.testCase.Class) + defer clearProvisionedStorageClass() + + l.testCase.TestDynamicProvisioning() + }) } // SetupStorageClass ensures that a StorageClass from a spec exists, if the StorageClass already exists @@ -946,3 +966,65 @@ func preparePVCDataSourceForProvisioning( return dataSourceRef, cleanupFunc } + +// MultiplePVMountSingleNodeCheck checks that multiple PV pointing to the same underlying storage can be mounted simultaneously on a single node. +// +// Steps: +// - Start Pod1 using PVC1, PV1 (which points to a underlying volume v) on node N1. +// - Create PVC2, PV2 and prebind them. PV2 points to the same underlying volume v. +// - Start Pod2 using PVC2, PV2 (which points to a underlying volume v) on node N1. +func MultiplePVMountSingleNodeCheck(client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) { + pod1Config := e2epod.Config{ + NS: claim.Namespace, + NodeSelection: node, + PVCs: []*v1.PersistentVolumeClaim{claim}, + } + pod1, err := e2epod.CreateSecPodWithNodeSelection(client, &pod1Config, timeouts.PodStart) + framework.ExpectNoError(err) + defer func() { + ginkgo.By(fmt.Sprintf("Deleting Pod %s/%s", pod1.Namespace, pod1.Name)) + framework.ExpectNoError(e2epod.DeletePodWithWait(client, pod1)) + }() + ginkgo.By(fmt.Sprintf("Created Pod %s/%s on node %s", pod1.Namespace, pod1.Name, pod1.Spec.NodeName)) + + // Create new PV which points to the same underlying storage. Retain policy is used so that deletion of second PVC does not trigger the deletion of its bound PV and underlying storage. + e2evolume, err := getBoundPV(client, claim) + framework.ExpectNoError(err) + pv2Config := e2epv.PersistentVolumeConfig{ + NamePrefix: fmt.Sprintf("%s-", "pv"), + StorageClassName: *claim.Spec.StorageClassName, + PVSource: e2evolume.Spec.PersistentVolumeSource, + AccessModes: e2evolume.Spec.AccessModes, + VolumeMode: e2evolume.Spec.VolumeMode, + ReclaimPolicy: v1.PersistentVolumeReclaimRetain, + } + + pvc2Config := e2epv.PersistentVolumeClaimConfig{ + NamePrefix: fmt.Sprintf("%s-", "pvc"), + StorageClassName: &claim.Namespace, + AccessModes: e2evolume.Spec.AccessModes, + VolumeMode: e2evolume.Spec.VolumeMode, + } + + pv2, pvc2, err := e2epv.CreatePVCPV(client, timeouts, pv2Config, pvc2Config, claim.Namespace, true) + framework.ExpectNoError(err, "PVC, PV creation failed") + framework.Logf("Created PVC %s/%s and PV %s in namespace %s", pvc2.Namespace, pvc2.Name, pv2.Name) + + pod2Config := e2epod.Config{ + NS: pvc2.Namespace, + NodeSelection: e2epod.NodeSelection{Name: pod1.Spec.NodeName}, + PVCs: []*v1.PersistentVolumeClaim{pvc2}, + } + pod2, err := e2epod.CreateSecPodWithNodeSelection(client, &pod2Config, timeouts.PodStart) + framework.ExpectNoError(err) + ginkgo.By(fmt.Sprintf("Created Pod %s/%s on node %s", pod2.Namespace, pod2.Name, pod2.Spec.NodeName)) + + ginkgo.By(fmt.Sprintf("Deleting Pod %s/%s", pod2.Namespace, pod2.Name)) + framework.ExpectNoError(e2epod.DeletePodWithWait(client, pod2)) + + err = e2epv.DeletePersistentVolumeClaim(client, pvc2.Name, pvc2.Namespace) + framework.ExpectNoError(err, "Failed to delete PVC: %s/%s", pvc2.Namespace, pvc2.Name) + + err = e2epv.DeletePersistentVolume(client, pv2.Name) + framework.ExpectNoError(err, "Failed to delete PV: %s", pv2.Name) +}