From f8536e8e429865bcc11b793b5d65269a0a69b8a2 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Sat, 9 Feb 2019 14:07:01 +0100 Subject: [PATCH 1/8] e2e: fix snapshot skip test Even if snapshots are supported by the driver interface, the driver or suite might still want to skip a particular test, so those checks still need to be executed. --- test/e2e/storage/testsuites/base.go | 47 ++++++++++++++--------------- 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/test/e2e/storage/testsuites/base.go b/test/e2e/storage/testsuites/base.go index 3c7e58d4ff1..75425b98d33 100644 --- a/test/e2e/storage/testsuites/base.go +++ b/test/e2e/storage/testsuites/base.go @@ -91,7 +91,7 @@ func skipUnsupportedTest(suite TestSuite, driver TestDriver, pattern testpattern var isSupported bool // 1. Check if Whether SnapshotType is supported by driver from its interface - // if isSupported, so it must be a snapshot test case, we just return. + // if isSupported, we still execute the driver and suite tests if len(pattern.SnapshotType) > 0 { switch pattern.SnapshotType { case testpatterns.DynamicCreatedSnapshot: @@ -102,31 +102,30 @@ func skipUnsupportedTest(suite TestSuite, driver TestDriver, pattern testpattern if !isSupported { framework.Skipf("Driver %s doesn't support snapshot type %v -- skipping", dInfo.Name, pattern.SnapshotType) } - return - } + } else { + // 2. Check if Whether volType is supported by driver from its interface + switch pattern.VolType { + case testpatterns.InlineVolume: + _, isSupported = driver.(InlineVolumeTestDriver) + case testpatterns.PreprovisionedPV: + _, isSupported = driver.(PreprovisionedPVTestDriver) + case testpatterns.DynamicPV: + _, isSupported = driver.(DynamicPVTestDriver) + default: + isSupported = false + } - // 2. Check if Whether volType is supported by driver from its interface - switch pattern.VolType { - case testpatterns.InlineVolume: - _, isSupported = driver.(InlineVolumeTestDriver) - case testpatterns.PreprovisionedPV: - _, isSupported = driver.(PreprovisionedPVTestDriver) - case testpatterns.DynamicPV: - _, isSupported = driver.(DynamicPVTestDriver) - default: - isSupported = false - } + if !isSupported { + framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType) + } - if !isSupported { - framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType) - } - - // 3. Check if fsType is supported - if !dInfo.SupportedFsType.Has(pattern.FsType) { - framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.FsType) - } - if pattern.FsType == "xfs" && framework.NodeOSDistroIs("gci") { - framework.Skipf("Distro doesn't support xfs -- skipping") + // 3. Check if fsType is supported + if !dInfo.SupportedFsType.Has(pattern.FsType) { + framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.FsType) + } + if pattern.FsType == "xfs" && framework.NodeOSDistroIs("gci") { + framework.Skipf("Distro doesn't support xfs -- skipping") + } } // 4. Check with driver specific logic From 315266b25e01967e1956f252386d633ba39677c2 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 8 Feb 2019 20:47:08 +0100 Subject: [PATCH 2/8] e2e: refine snapshot test This addresses the two remaining change requests from https://github.com/kubernetes/kubernetes/pull/69036: - replace "csi-hostpath-v0" name check with capability check (cleaner that way) - add feature tag to "should create snapshot with defaults" because that is an alpha feature Signed-off-by: Patrick Ohly --- test/e2e/storage/testsuites/snapshottable.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/test/e2e/storage/testsuites/snapshottable.go b/test/e2e/storage/testsuites/snapshottable.go index cd3c73a50b8..a6969cb888d 100644 --- a/test/e2e/storage/testsuites/snapshottable.go +++ b/test/e2e/storage/testsuites/snapshottable.go @@ -80,6 +80,10 @@ func (s *snapshottableTestSuite) getTestSuiteInfo() TestSuiteInfo { } func (s *snapshottableTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) { + dInfo := driver.GetDriverInfo() + if !dInfo.Capabilities[CapDataSource] { + framework.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name) + } } func createSnapshottableTestInput(driver TestDriver, pattern testpatterns.TestPattern) (snapshottableTestResource, snapshottableTestInput) { @@ -187,10 +191,7 @@ type snapshottableTestInput struct { } func testSnapshot(input *snapshottableTestInput) { - It("should create snapshot with defaults", func() { - if input.dInfo.Name == "csi-hostpath-v0" { - framework.Skipf("skip test when using driver csi-hostpath-v0 - skipping") - } + It("should create snapshot with defaults [Feature:VolumeSnapshotDataSource]", func() { TestCreateSnapshot(input.testCase, input.cs, input.dc, input.pvc, input.sc, input.vsc) }) } From e92bdd14cc3f5b19e8b29155ffebd5e96d31884e Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 21 Dec 2018 10:33:48 +0100 Subject: [PATCH 3/8] e2e/storage: remove unnecessary empty string checks There is no need to check for empty strings, we can also directly initialize structs with the value. The end result is the same when the value is empty (empty string in the struct). --- test/e2e/storage/testsuites/provisioning.go | 9 ++------- test/e2e/storage/testsuites/snapshottable.go | 21 ++++++++++---------- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 4c03b002807..e5d83e8588d 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -89,6 +89,7 @@ func createProvisioningTestInput(driver TestDriver, pattern testpatterns.TestPat testCase: StorageClassTest{ ClaimSize: resource.claimSize, ExpectedSize: resource.claimSize, + NodeName: driver.GetDriverInfo().Config.ClientNodeName, }, cs: driver.GetDriverInfo().Config.Framework.ClientSet, dc: driver.GetDriverInfo().Config.Framework.DynamicClient, @@ -98,10 +99,6 @@ func createProvisioningTestInput(driver TestDriver, pattern testpatterns.TestPat dInfo: driver.GetDriverInfo(), } - if driver.GetDriverInfo().Config.ClientNodeName != "" { - input.testCase.NodeName = driver.GetDriverInfo().Config.ClientNodeName - } - return resource, input } @@ -440,6 +437,7 @@ func runInPodWithVolume(c clientset.Interface, ns, claimName, nodeName, command GenerateName: "pvc-volume-tester-", }, Spec: v1.PodSpec{ + NodeName: nodeName, Containers: []v1.Container{ { Name: "volume-tester", @@ -470,9 +468,6 @@ func runInPodWithVolume(c clientset.Interface, ns, claimName, nodeName, command }, } - if len(nodeName) != 0 { - pod.Spec.NodeName = nodeName - } pod, err := c.CoreV1().Pods(ns).Create(pod) framework.ExpectNoError(err, "Failed to create pod: %v", err) defer func() { diff --git a/test/e2e/storage/testsuites/snapshottable.go b/test/e2e/storage/testsuites/snapshottable.go index a6969cb888d..98f4ca0c8e6 100644 --- a/test/e2e/storage/testsuites/snapshottable.go +++ b/test/e2e/storage/testsuites/snapshottable.go @@ -91,18 +91,17 @@ func createSnapshottableTestInput(driver TestDriver, pattern testpatterns.TestPa resource := snapshottableTestResource{} resource.setupResource(driver, pattern) + dInfo := driver.GetDriverInfo() input := snapshottableTestInput{ - testCase: SnapshotClassTest{}, - cs: driver.GetDriverInfo().Config.Framework.ClientSet, - dc: driver.GetDriverInfo().Config.Framework.DynamicClient, - pvc: resource.pvc, - sc: resource.sc, - vsc: resource.vsc, - dInfo: driver.GetDriverInfo(), - } - - if driver.GetDriverInfo().Config.ClientNodeName != "" { - input.testCase.NodeName = driver.GetDriverInfo().Config.ClientNodeName + testCase: SnapshotClassTest{ + NodeName: dInfo.Config.ClientNodeName, + }, + cs: dInfo.Config.Framework.ClientSet, + dc: dInfo.Config.Framework.DynamicClient, + pvc: resource.pvc, + sc: resource.sc, + vsc: resource.vsc, + dInfo: dInfo, } return resource, input From 5b8826b610eab2918ac7b8b7b06d6eaa56c96c89 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 21 Dec 2018 10:36:14 +0100 Subject: [PATCH 4/8] e2e/storage: use different names for test pods When the provisioning test gets stuck, the log fills up with messages about waiting for a certain pod to run. Now the pod names are pvc-[volume-tester|snapshot]-[writer|reader] plus the random number appended by Kubernetes. This makes it easier to see where the test is stuck. --- test/e2e/storage/testsuites/provisioning.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index e5d83e8588d..6f41c87cb2f 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -295,7 +295,7 @@ func TestDynamicProvisioning(t StorageClassTest, client clientset.Interface, cla if claim.Spec.DataSource != nil { By("checking the created volume whether has the pre-populated data") command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace) - runInPodWithVolume(client, claim.Namespace, claim.Name, t.NodeName, command, t.NodeSelector, t.ExpectUnschedulable) + runInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-snapshot-reader", t.NodeName, command, t.NodeSelector, t.ExpectUnschedulable) } if !t.SkipWriteReadCheck { @@ -313,10 +313,10 @@ func TestDynamicProvisioning(t StorageClassTest, client clientset.Interface, cla command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option) } command += " || (mount | grep 'on /mnt/test'; false)" - runInPodWithVolume(client, claim.Namespace, claim.Name, t.NodeName, command, t.NodeSelector, t.ExpectUnschedulable) + runInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", t.NodeName, command, t.NodeSelector, t.ExpectUnschedulable) By("checking the created volume is readable and retains data") - runInPodWithVolume(client, claim.Namespace, claim.Name, t.NodeName, "grep 'hello world' /mnt/test/data", t.NodeSelector, t.ExpectUnschedulable) + runInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", t.NodeName, "grep 'hello world' /mnt/test/data", t.NodeSelector, t.ExpectUnschedulable) } By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name)) @@ -427,14 +427,14 @@ func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientse } // runInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory. -func runInPodWithVolume(c clientset.Interface, ns, claimName, nodeName, command string, nodeSelector map[string]string, unschedulable bool) { +func runInPodWithVolume(c clientset.Interface, ns, claimName, podName, nodeName, command string, nodeSelector map[string]string, unschedulable bool) { pod := &v1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - GenerateName: "pvc-volume-tester-", + GenerateName: podName + "-", }, Spec: v1.PodSpec{ NodeName: nodeName, @@ -525,7 +525,7 @@ func prepareDataSourceForProvisioning( // write namespace to the /mnt/test (= the volume). By("[Initialize dataSource]write data to volume") command := fmt.Sprintf("echo '%s' > /mnt/test/initialData", updatedClaim.GetNamespace()) - runInPodWithVolume(client, updatedClaim.Namespace, updatedClaim.Name, t.NodeName, command, t.NodeSelector, t.ExpectUnschedulable) + runInPodWithVolume(client, updatedClaim.Namespace, updatedClaim.Name, "pvc-snapshot-writer", t.NodeName, command, t.NodeSelector, t.ExpectUnschedulable) By("[Initialize dataSource]creating a SnapshotClass") snapshotClass, err = dynamicClient.Resource(snapshotClassGVR).Create(snapshotClass, metav1.CreateOptions{}) From 54d8f1648fbd1862bfce5f1c6f35419fbfc6dc4e Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 16 Jan 2019 12:26:38 +0100 Subject: [PATCH 5/8] e2e/storage: improve PV checking TestDynamicProvisioning had multiple ways of choosing additional checks: - the PvCheck callback - the builtin write/read check controlled by a boolean - the snapshot testing Complicating matters further, that builtin write/read test had been more customizable with new fields `NodeSelector` and `ExpectUnschedulable` which were only set by one particular test (see https://github.com/kubernetes/kubernetes/pull/70941). That is confusing and will only get more confusing when adding more checks in the future. Therefore the write/read check is now a separate function that must be enabled explicitly by tests that want to run it. The snapshot checking is also defined only for the snapshot test. The test that expects unschedulable pods now also checks for that particular situation itself. Instead of testing it with two pods (the behavior from the write/read check) that both fail to start, only a single unschedulable pod is created. Because node name, node selector and the `ExpectUnschedulable` were only used for checking, it is possible to simplify `StorageClassTest` by removing all of these fields. Expect(err).NotTo(HaveOccurred()) is an anti-pattern in Ginkgo testing because a test failure doesn't explain what failed (see https://github.com/kubernetes/kubernetes/issues/34059). We avoid it now by making the check function itself responsible for checking errors and including more information in those checks. --- test/e2e/storage/csi_volumes.go | 38 ++-- test/e2e/storage/regional_pd.go | 36 ++-- test/e2e/storage/testsuites/provisioning.go | 182 +++++++++++--------- test/e2e/storage/volume_provisioning.go | 77 ++++++--- 4 files changed, 195 insertions(+), 138 deletions(-) diff --git a/test/e2e/storage/csi_volumes.go b/test/e2e/storage/csi_volumes.go index 4886ae16afc..10d3d126793 100644 --- a/test/e2e/storage/csi_volumes.go +++ b/test/e2e/storage/csi_volumes.go @@ -261,9 +261,11 @@ var _ = utils.SIGDescribe("CSI Volumes", func() { Parameters: sc.Parameters, ClaimSize: "1Gi", ExpectedSize: "1Gi", - NodeName: nodeName, } - class, claim, pod := startPausePod(cs, scTest, ns.Name) + nodeSelection := testsuites.NodeSelection{ + Name: nodeName, + } + class, claim, pod := startPausePod(cs, scTest, nodeSelection, ns.Name) if class != nil { defer cs.StorageV1().StorageClasses().Delete(class.Name, nil) } @@ -381,16 +383,16 @@ var _ = utils.SIGDescribe("CSI Volumes", func() { Parameters: sc.Parameters, ClaimSize: "1Gi", ExpectedSize: "1Gi", - // The mock driver only works when everything runs on a single node. - NodeName: nodeName, // Provisioner and storage class name must match what's used in // csi-storageclass.yaml, plus the test-specific suffix. Provisioner: sc.Provisioner, StorageClassName: "csi-mock-sc-" + f.UniqueName, - // Mock driver does not provide any persistency. - SkipWriteReadCheck: true, } - class, claim, pod := startPausePod(cs, scTest, ns.Name) + nodeSelection := testsuites.NodeSelection{ + // The mock driver only works when everything runs on a single node. + Name: nodeName, + } + class, claim, pod := startPausePod(cs, scTest, nodeSelection, ns.Name) if class != nil { defer cs.StorageV1().StorageClasses().Delete(class.Name, nil) } @@ -429,7 +431,7 @@ func testTopologyPositive(cs clientset.Interface, suffix, namespace string, dela claim.Spec.StorageClassName = &class.Name if delayBinding { - _, node := testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class) + _, node := testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class, nil /* node selector */, false /* expect unschedulable */) Expect(node).ToNot(BeNil(), "Unexpected nil node found") } else { testsuites.TestDynamicProvisioning(test, cs, claim, class) @@ -450,16 +452,22 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela test := createGCEPDStorageClassTest() test.DelayBinding = delayBinding - test.NodeSelector = map[string]string{v1.LabelZoneFailureDomain: podZone} - test.ExpectUnschedulable = true + nodeSelector := map[string]string{v1.LabelZoneFailureDomain: podZone} class := newStorageClass(test, namespace, suffix) addSingleCSIZoneAllowedTopologyToStorageClass(cs, class, pvZone) claim := newClaim(test, namespace, suffix) claim.Spec.StorageClassName = &class.Name if delayBinding { - testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class) + testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class, nodeSelector, true /* expect unschedulable */) } else { + test.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + // Ensure that a pod cannot be scheduled in an unsuitable zone. + pod := testsuites.StartInPodWithVolume(cs, namespace, claim.Name, "pvc-tester-unschedulable", "sleep 100000", + testsuites.NodeSelection{Selector: nodeSelector}) + defer testsuites.StopPod(cs, pod) + framework.ExpectNoError(framework.WaitForPodNameUnschedulableInNamespace(cs, pod.Name, pod.Namespace), "pod should be unschedulable") + } testsuites.TestDynamicProvisioning(test, cs, claim, class) } } @@ -500,7 +508,7 @@ func getVolumeHandle(cs clientset.Interface, claim *v1.PersistentVolumeClaim) st return pv.Spec.CSI.VolumeHandle } -func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { +func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node testsuites.NodeSelection, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { class := newStorageClass(t, ns, "") class, err := cs.StorageV1().StorageClasses().Create(class) framework.ExpectNoError(err, "Failed to create class : %v", err) @@ -514,6 +522,9 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, ns str GenerateName: "pvc-volume-tester-", }, Spec: v1.PodSpec{ + NodeName: node.Name, + NodeSelector: node.Selector, + Affinity: node.Affinity, Containers: []v1.Container{ { Name: "volume-tester", @@ -541,9 +552,6 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, ns str }, } - if len(t.NodeName) != 0 { - pod.Spec.NodeName = t.NodeName - } pod, err = cs.CoreV1().Pods(ns).Create(pod) framework.ExpectNoError(err, "Failed to create pod: %v", err) return class, claim, pod diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index 176d380f88e..6673fedbe5e 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -108,12 +108,14 @@ func testVolumeProvisioning(c clientset.Interface, ns string) { }, ClaimSize: "1.5Gi", ExpectedSize: "2Gi", - PvCheck: func(volume *v1.PersistentVolume) error { - err := checkGCEPD(volume, "pd-standard") - if err != nil { - return err - } - return verifyZonesInPV(volume, sets.NewString(cloudZones...), true /* match */) + PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + var err error + err = checkGCEPD(volume, "pd-standard") + Expect(err).NotTo(HaveOccurred(), "checkGCEPD") + err = verifyZonesInPV(volume, sets.NewString(cloudZones...), true /* match */) + Expect(err).NotTo(HaveOccurred(), "verifyZonesInPV") + + testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) }, }, { @@ -126,16 +128,16 @@ func testVolumeProvisioning(c clientset.Interface, ns string) { }, ClaimSize: "1.5Gi", ExpectedSize: "2Gi", - PvCheck: func(volume *v1.PersistentVolume) error { - err := checkGCEPD(volume, "pd-standard") - if err != nil { - return err - } + PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + var err error + err = checkGCEPD(volume, "pd-standard") + Expect(err).NotTo(HaveOccurred(), "checkGCEPD") zones, err := framework.GetClusterZones(c) - if err != nil { - return err - } - return verifyZonesInPV(volume, zones, false /* match */) + Expect(err).NotTo(HaveOccurred(), "GetClusterZones") + err = verifyZonesInPV(volume, zones, false /* match */) + Expect(err).NotTo(HaveOccurred(), "verifyZonesInPV") + + testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) }, }, } @@ -317,7 +319,7 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int) claim.Spec.StorageClassName = &class.Name claims = append(claims, claim) } - pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class) + pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */) if node == nil { framework.Failf("unexpected nil node found") } @@ -374,7 +376,7 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s claim.Spec.StorageClassName = &class.Name claims = append(claims, claim) } - pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class) + pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */) if node == nil { framework.Failf("unexpected nil node found") } diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 6f41c87cb2f..6ff13dcfa5b 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -37,22 +37,19 @@ import ( imageutils "k8s.io/kubernetes/test/utils/image" ) -// StorageClassTest represents parameters to be used by provisioning tests +// StorageClassTest represents parameters to be used by provisioning tests. +// Not all parameters are used by all tests. type StorageClassTest struct { - Name string - CloudProviders []string - Provisioner string - StorageClassName string - Parameters map[string]string - DelayBinding bool - ClaimSize string - ExpectedSize string - PvCheck func(volume *v1.PersistentVolume) error - NodeName string - SkipWriteReadCheck bool - VolumeMode *v1.PersistentVolumeMode - NodeSelector map[string]string // NodeSelector for the pod - ExpectUnschedulable bool // Whether the test pod is expected to be unschedulable + Name string + CloudProviders []string + Provisioner string + StorageClassName string + Parameters map[string]string + DelayBinding bool + ClaimSize string + ExpectedSize string + PvCheck func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) + VolumeMode *v1.PersistentVolumeMode } type provisioningTestSuite struct { @@ -89,14 +86,14 @@ func createProvisioningTestInput(driver TestDriver, pattern testpatterns.TestPat testCase: StorageClassTest{ ClaimSize: resource.claimSize, ExpectedSize: resource.claimSize, - NodeName: driver.GetDriverInfo().Config.ClientNodeName, }, - cs: driver.GetDriverInfo().Config.Framework.ClientSet, - dc: driver.GetDriverInfo().Config.Framework.DynamicClient, - pvc: resource.pvc, - sc: resource.sc, - vsc: resource.vsc, - dInfo: driver.GetDriverInfo(), + cs: driver.GetDriverInfo().Config.Framework.ClientSet, + dc: driver.GetDriverInfo().Config.Framework.DynamicClient, + pvc: resource.pvc, + sc: resource.sc, + vsc: resource.vsc, + dInfo: driver.GetDriverInfo(), + nodeName: driver.GetDriverInfo().Config.ClientNodeName, } return resource, input @@ -179,10 +176,17 @@ type provisioningTestInput struct { sc *storage.StorageClass vsc *unstructured.Unstructured dInfo *DriverInfo + nodeName string } func testProvisioning(input *provisioningTestInput) { + // common checker for most of the test cases below + pvcheck := func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + PVWriteReadCheck(input.cs, claim, volume, NodeSelection{Name: input.nodeName}) + } + It("should provision storage with defaults", func() { + input.testCase.PvCheck = pvcheck TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) }) @@ -192,6 +196,7 @@ func testProvisioning(input *provisioningTestInput) { } input.sc.MountOptions = input.dInfo.SupportedMountOption.Union(input.dInfo.RequiredMountOption).List() + input.testCase.PvCheck = pvcheck TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) }) @@ -201,7 +206,6 @@ func testProvisioning(input *provisioningTestInput) { } block := v1.PersistentVolumeBlock input.testCase.VolumeMode = &block - input.testCase.SkipWriteReadCheck = true input.pvc.Spec.VolumeMode = &block TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) }) @@ -211,11 +215,15 @@ func testProvisioning(input *provisioningTestInput) { framework.Skipf("Driver %q does not support populate data from snapshot - skipping", input.dInfo.Name) } - input.testCase.SkipWriteReadCheck = true - dataSource, cleanupFunc := prepareDataSourceForProvisioning(input.testCase, input.cs, input.dc, input.pvc, input.sc, input.vsc) + dataSource, cleanupFunc := prepareDataSourceForProvisioning(NodeSelection{Name: input.nodeName}, input.cs, input.dc, input.pvc, input.sc, input.vsc) defer cleanupFunc() input.pvc.Spec.DataSource = dataSource + input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + By("checking whether the created volume has the pre-populated data") + command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace) + RunInPodWithVolume(input.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, NodeSelection{Name: input.nodeName}) + } TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) }) } @@ -288,35 +296,7 @@ func TestDynamicProvisioning(t StorageClassTest, client clientset.Interface, cla // Run the checker if t.PvCheck != nil { - err = t.PvCheck(pv) - Expect(err).NotTo(HaveOccurred()) - } - - if claim.Spec.DataSource != nil { - By("checking the created volume whether has the pre-populated data") - command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace) - runInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-snapshot-reader", t.NodeName, command, t.NodeSelector, t.ExpectUnschedulable) - } - - if !t.SkipWriteReadCheck { - // We start two pods: - // - The first writes 'hello word' to the /mnt/test (= the volume). - // - The second one runs grep 'hello world' on /mnt/test. - // If both succeed, Kubernetes actually allocated something that is - // persistent across pods. - By("checking the created volume is writable and has the PV's mount options") - command := "echo 'hello world' > /mnt/test/data" - // We give the first pod the secondary responsibility of checking the volume has - // been mounted with the PV's mount options, if the PV was provisioned with any - for _, option := range pv.Spec.MountOptions { - // Get entry, get mount options at 6th word, replace brackets with commas - command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option) - } - command += " || (mount | grep 'on /mnt/test'; false)" - runInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", t.NodeName, command, t.NodeSelector, t.ExpectUnschedulable) - - By("checking the created volume is readable and retains data") - runInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", t.NodeName, "grep 'hello world' /mnt/test/data", t.NodeSelector, t.ExpectUnschedulable) + t.PvCheck(claim, pv) } By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name)) @@ -337,15 +317,41 @@ func TestDynamicProvisioning(t StorageClassTest, client clientset.Interface, cla return pv } -func TestBindingWaitForFirstConsumer(t StorageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass) (*v1.PersistentVolume, *v1.Node) { - pvs, node := TestBindingWaitForFirstConsumerMultiPVC(t, client, []*v1.PersistentVolumeClaim{claim}, class) +// PVWriteReadCheck checks that a PV retains data. +// +// It starts two pods: +// - The first writes 'hello word' to the /mnt/test (= the volume). +// - The second one runs grep 'hello world' on /mnt/test. +// If both succeed, Kubernetes actually allocated something that is +// persistent across pods. +// +// This is a common test that can be called from a StorageClassTest.PvCheck. +func PVWriteReadCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume, node NodeSelection) { + By(fmt.Sprintf("checking the created volume is writable and has the PV's mount options on node %+v", node)) + command := "echo 'hello world' > /mnt/test/data" + // We give the first pod the secondary responsibility of checking the volume has + // been mounted with the PV's mount options, if the PV was provisioned with any + for _, option := range volume.Spec.MountOptions { + // Get entry, get mount options at 6th word, replace brackets with commas + command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option) + } + command += " || (mount | grep 'on /mnt/test'; false)" + RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node) + + By(fmt.Sprintf("checking the created volume is readable and retains data on the same node %+v", node)) + command = "grep 'hello world' /mnt/test/data" + RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, node) +} + +func TestBindingWaitForFirstConsumer(t StorageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass, nodeSelector map[string]string, expectUnschedulable bool) (*v1.PersistentVolume, *v1.Node) { + pvs, node := TestBindingWaitForFirstConsumerMultiPVC(t, client, []*v1.PersistentVolumeClaim{claim}, class, nodeSelector, expectUnschedulable) if pvs == nil { return nil, node } return pvs[0], node } -func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientset.Interface, claims []*v1.PersistentVolumeClaim, class *storage.StorageClass) ([]*v1.PersistentVolume, *v1.Node) { +func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientset.Interface, claims []*v1.PersistentVolumeClaim, class *storage.StorageClass, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) { var err error Expect(len(claims)).ToNot(Equal(0)) namespace := claims[0].Namespace @@ -388,8 +394,8 @@ func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientse By("creating a pod referring to the claims") // Create a pod referring to the claim and wait for it to get to running var pod *v1.Pod - if t.ExpectUnschedulable { - pod, err = framework.CreateUnschedulablePod(client, namespace, t.NodeSelector, createdClaims, true /* isPrivileged */, "" /* command */) + if expectUnschedulable { + pod, err = framework.CreateUnschedulablePod(client, namespace, nodeSelector, createdClaims, true /* isPrivileged */, "" /* command */) } else { pod, err = framework.CreatePod(client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */) } @@ -398,7 +404,7 @@ func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientse framework.DeletePodOrFail(client, pod.Namespace, pod.Name) framework.WaitForPodToDisappear(client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout) }() - if t.ExpectUnschedulable { + if expectUnschedulable { // Verify that no claims are provisioned. verifyPVCsPending(client, createdClaims) return nil, nil @@ -426,8 +432,25 @@ func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientse return pvs, node } -// runInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory. -func runInPodWithVolume(c clientset.Interface, ns, claimName, podName, nodeName, command string, nodeSelector map[string]string, unschedulable bool) { +// NodeSelection specifies where to run a pod, using a combination of fixed node name, +// node selector and/or affinity. +type NodeSelection struct { + Name string + Selector map[string]string + Affinity *v1.Affinity +} + +// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory. +// It starts, checks, collects output and stops it. +func RunInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node NodeSelection) { + pod := StartInPodWithVolume(c, ns, claimName, podName, command, node) + defer StopPod(c, pod) + framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace)) +} + +// StartInPodWithVolume starts a command in a pod with given claim mounted to /mnt directory +// The caller is responsible for checking the pod and deleting it. +func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node NodeSelection) *v1.Pod { pod := &v1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", @@ -437,7 +460,9 @@ func runInPodWithVolume(c clientset.Interface, ns, claimName, podName, nodeName, GenerateName: podName + "-", }, Spec: v1.PodSpec{ - NodeName: nodeName, + NodeName: node.Name, + NodeSelector: node.Selector, + Affinity: node.Affinity, Containers: []v1.Container{ { Name: "volume-tester", @@ -464,27 +489,26 @@ func runInPodWithVolume(c clientset.Interface, ns, claimName, podName, nodeName, }, }, }, - NodeSelector: nodeSelector, }, } pod, err := c.CoreV1().Pods(ns).Create(pod) framework.ExpectNoError(err, "Failed to create pod: %v", err) - defer func() { - body, err := c.CoreV1().Pods(ns).GetLogs(pod.Name, &v1.PodLogOptions{}).Do().Raw() - if err != nil { - framework.Logf("Error getting logs for pod %s: %v", pod.Name, err) - } else { - framework.Logf("Pod %s has the following logs: %s", pod.Name, body) - } - framework.DeletePodOrFail(c, ns, pod.Name) - }() + return pod +} - if unschedulable { - framework.ExpectNoError(framework.WaitForPodNameUnschedulableInNamespace(c, pod.Name, pod.Namespace)) - } else { - framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace)) +// StopPod first tries to log the output of the pod's container, then deletes the pod. +func StopPod(c clientset.Interface, pod *v1.Pod) { + if pod == nil { + return } + body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do().Raw() + if err != nil { + framework.Logf("Error getting logs for pod %s: %v", pod.Name, err) + } else { + framework.Logf("Pod %s has the following logs: %s", pod.Name, body) + } + framework.DeletePodOrFail(c, pod.Namespace, pod.Name) } func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) { @@ -497,7 +521,7 @@ func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeCl } func prepareDataSourceForProvisioning( - t StorageClassTest, + node NodeSelection, client clientset.Interface, dynamicClient dynamic.Interface, initClaim *v1.PersistentVolumeClaim, @@ -525,7 +549,7 @@ func prepareDataSourceForProvisioning( // write namespace to the /mnt/test (= the volume). By("[Initialize dataSource]write data to volume") command := fmt.Sprintf("echo '%s' > /mnt/test/initialData", updatedClaim.GetNamespace()) - runInPodWithVolume(client, updatedClaim.Namespace, updatedClaim.Name, "pvc-snapshot-writer", t.NodeName, command, t.NodeSelector, t.ExpectUnschedulable) + RunInPodWithVolume(client, updatedClaim.Namespace, updatedClaim.Name, "pvc-snapshot-writer", command, node) By("[Initialize dataSource]creating a SnapshotClass") snapshotClass, err = dynamicClient.Resource(snapshotClassGVR).Create(snapshotClass, metav1.CreateOptions{}) diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index 983c5070402..e5f840f344d 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -226,7 +226,7 @@ func testZonalDelayedBinding(c clientset.Interface, ns string, specifyAllowedTop claim.Spec.StorageClassName = &class.Name claims = append(claims, claim) } - pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class) + pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */) if node == nil { framework.Failf("unexpected nil node found") } @@ -273,8 +273,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }, ClaimSize: "1.5Gi", ExpectedSize: "2Gi", - PvCheck: func(volume *v1.PersistentVolume) error { - return checkGCEPD(volume, "pd-ssd") + PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + err := checkGCEPD(volume, "pd-ssd") + Expect(err).NotTo(HaveOccurred(), "checkGCEPD pd-ssd") + testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) }, }, { @@ -286,8 +288,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }, ClaimSize: "1.5Gi", ExpectedSize: "2Gi", - PvCheck: func(volume *v1.PersistentVolume) error { - return checkGCEPD(volume, "pd-standard") + PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + err := checkGCEPD(volume, "pd-standard") + Expect(err).NotTo(HaveOccurred(), "checkGCEPD pd-standard") + testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) }, }, // AWS @@ -301,8 +305,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }, ClaimSize: "1.5Gi", ExpectedSize: "2Gi", - PvCheck: func(volume *v1.PersistentVolume) error { - return checkAWSEBS(volume, "gp2", false) + PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + err := checkAWSEBS(volume, "gp2", false) + Expect(err).NotTo(HaveOccurred(), "checkAWSEBS gp2") + testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) }, }, { @@ -315,8 +321,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }, ClaimSize: "3.5Gi", ExpectedSize: "4Gi", // 4 GiB is minimum for io1 - PvCheck: func(volume *v1.PersistentVolume) error { - return checkAWSEBS(volume, "io1", false) + PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + err := checkAWSEBS(volume, "io1", false) + Expect(err).NotTo(HaveOccurred(), "checkAWSEBS io1") + testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) }, }, { @@ -328,8 +336,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }, ClaimSize: "500Gi", // minimum for sc1 ExpectedSize: "500Gi", - PvCheck: func(volume *v1.PersistentVolume) error { - return checkAWSEBS(volume, "sc1", false) + PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + err := checkAWSEBS(volume, "sc1", false) + Expect(err).NotTo(HaveOccurred(), "checkAWSEBS sc1") + testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) }, }, { @@ -341,8 +351,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }, ClaimSize: "500Gi", // minimum for st1 ExpectedSize: "500Gi", - PvCheck: func(volume *v1.PersistentVolume) error { - return checkAWSEBS(volume, "st1", false) + PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + err := checkAWSEBS(volume, "st1", false) + Expect(err).NotTo(HaveOccurred(), "checkAWSEBS st1") + testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) }, }, { @@ -354,8 +366,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }, ClaimSize: "1Gi", ExpectedSize: "1Gi", - PvCheck: func(volume *v1.PersistentVolume) error { - return checkAWSEBS(volume, "gp2", true) + PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + err := checkAWSEBS(volume, "gp2", true) + Expect(err).NotTo(HaveOccurred(), "checkAWSEBS gp2 encrypted") + testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) }, }, // OpenStack generic tests (works on all OpenStack deployments) @@ -366,7 +380,9 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { Parameters: map[string]string{}, ClaimSize: "1.5Gi", ExpectedSize: "2Gi", - PvCheck: nil, // there is currently nothing to check on OpenStack + PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) + }, }, { Name: "Cinder volume with empty volume type and zone on OpenStack", @@ -378,7 +394,9 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }, ClaimSize: "1.5Gi", ExpectedSize: "2Gi", - PvCheck: nil, // there is currently nothing to check on OpenStack + PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) + }, }, // vSphere generic test { @@ -388,7 +406,9 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { Parameters: map[string]string{}, ClaimSize: "1.5Gi", ExpectedSize: "1.5Gi", - PvCheck: nil, + PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) + }, }, // Azure { @@ -398,7 +418,9 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { Parameters: map[string]string{}, ClaimSize: "1Gi", ExpectedSize: "1Gi", - PvCheck: nil, + PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) + }, }, } @@ -451,8 +473,10 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }, ClaimSize: "1Gi", ExpectedSize: "1Gi", - PvCheck: func(volume *v1.PersistentVolume) error { - return checkGCEPD(volume, "pd-standard") + PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + err := checkGCEPD(volume, "pd-standard") + Expect(err).NotTo(HaveOccurred(), "checkGCEPD") + testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) }, } class := newStorageClass(test, ns, "reclaimpolicy") @@ -793,12 +817,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { serverUrl := "https://" + pod.Status.PodIP + ":8081" By("creating a StorageClass") test := testsuites.StorageClassTest{ - Name: "Gluster Dynamic provisioner test", - Provisioner: "kubernetes.io/glusterfs", - ClaimSize: "2Gi", - ExpectedSize: "2Gi", - Parameters: map[string]string{"resturl": serverUrl}, - SkipWriteReadCheck: true, + Name: "Gluster Dynamic provisioner test", + Provisioner: "kubernetes.io/glusterfs", + ClaimSize: "2Gi", + ExpectedSize: "2Gi", + Parameters: map[string]string{"resturl": serverUrl}, } suffix := fmt.Sprintf("glusterdptest") class := newStorageClass(test, ns, suffix) From ca42cf4993fe21feb0105b56e62bec5b5ab95877 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 21 Dec 2018 11:18:34 +0100 Subject: [PATCH 6/8] e2e/storage: test provisioned volume on multiple nodes Whether the read test after writing was done on the same node was random for drivers that weren't locked onto a single node. Now it is deterministic: it always happens on the same node. The case with reading on another node is covered separately for test configurations that support it (not locked onto a single node, more than one node in the test cluster). As before, the TestConfig.ClientNodeSelector is ignored by the provisioning testsuite. --- test/e2e/storage/regional_pd.go | 4 +- test/e2e/storage/testsuites/provisioning.go | 119 ++++++++++++++++++-- test/e2e/storage/volume_provisioning.go | 24 ++-- 3 files changed, 125 insertions(+), 22 deletions(-) diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index 6673fedbe5e..57e7eaf8ae3 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -115,7 +115,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) { err = verifyZonesInPV(volume, sets.NewString(cloudZones...), true /* match */) Expect(err).NotTo(HaveOccurred(), "verifyZonesInPV") - testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) + testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) }, }, { @@ -137,7 +137,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) { err = verifyZonesInPV(volume, zones, false /* match */) Expect(err).NotTo(HaveOccurred(), "verifyZonesInPV") - testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) + testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) }, }, } diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 6ff13dcfa5b..b1ef197cc4f 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -182,7 +182,7 @@ type provisioningTestInput struct { func testProvisioning(input *provisioningTestInput) { // common checker for most of the test cases below pvcheck := func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { - PVWriteReadCheck(input.cs, claim, volume, NodeSelection{Name: input.nodeName}) + PVWriteReadSingleNodeCheck(input.cs, claim, volume, NodeSelection{Name: input.nodeName}) } It("should provision storage with defaults", func() { @@ -200,6 +200,25 @@ func testProvisioning(input *provisioningTestInput) { TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) }) + It("should access volume from different nodes", func() { + // The assumption is that if the test hasn't been + // locked onto a single node, then the driver is + // usable on all of them *and* supports accessing a volume + // from any node. + if input.nodeName != "" { + framework.Skipf("Driver %q only supports testing on one node - skipping", input.dInfo.Name) + } + // Ensure that we actually have more than one node. + nodes := framework.GetReadySchedulableNodesOrDie(input.cs) + if len(nodes.Items) <= 1 { + framework.Skipf("need more than one node - skipping") + } + input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + PVMultiNodeCheck(input.cs, claim, volume, NodeSelection{Name: input.nodeName}) + } + TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) + }) + It("should create and delete block persistent volumes", func() { if !input.dInfo.Capabilities[CapBlock] { framework.Skipf("Driver %q does not support BlockVolume - skipping", input.dInfo.Name) @@ -317,16 +336,20 @@ func TestDynamicProvisioning(t StorageClassTest, client clientset.Interface, cla return pv } -// PVWriteReadCheck checks that a PV retains data. +// PVWriteReadSingleNodeCheck checks that a PV retains data on a single node. // // It starts two pods: -// - The first writes 'hello word' to the /mnt/test (= the volume). -// - The second one runs grep 'hello world' on /mnt/test. +// - The first pod writes 'hello word' to the /mnt/test (= the volume) on one node. +// - The second pod runs grep 'hello world' on /mnt/test on the same node. +// +// The node is selected by Kubernetes when scheduling the first +// pod. It's then selected via its name for the second pod. +// // If both succeed, Kubernetes actually allocated something that is // persistent across pods. // // This is a common test that can be called from a StorageClassTest.PvCheck. -func PVWriteReadCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume, node NodeSelection) { +func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume, node NodeSelection) { By(fmt.Sprintf("checking the created volume is writable and has the PV's mount options on node %+v", node)) command := "echo 'hello world' > /mnt/test/data" // We give the first pod the secondary responsibility of checking the volume has @@ -336,11 +359,91 @@ func PVWriteReadCheck(client clientset.Interface, claim *v1.PersistentVolumeClai command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option) } command += " || (mount | grep 'on /mnt/test'; false)" - RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node) + pod := StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node) + defer func() { + // pod might be nil now. + StopPod(client, pod) + }() + framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace)) + runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred(), "get pod") + actualNodeName := runningPod.Spec.NodeName + StopPod(client, pod) + pod = nil // Don't stop twice. - By(fmt.Sprintf("checking the created volume is readable and retains data on the same node %+v", node)) + By(fmt.Sprintf("checking the created volume is readable and retains data on the same node %q", actualNodeName)) command = "grep 'hello world' /mnt/test/data" - RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, node) + RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, NodeSelection{Name: actualNodeName}) +} + +// PVMultiNodeCheck checks that a PV retains data when moved between nodes. +// +// It starts these pods: +// - The first pod writes 'hello word' to the /mnt/test (= the volume) on one node. +// - The second pod runs grep 'hello world' on /mnt/test on another node. +// +// The first node is selected by Kubernetes when scheduling the first pod. The second pod uses the same criteria, except that a special anti-affinity +// for the first node gets added. This test can only pass if the cluster has more than one +// suitable node. The caller has to ensure that. +// +// If all succeeds, Kubernetes actually allocated something that is +// persistent across pods and across nodes. +// +// This is a common test that can be called from a StorageClassTest.PvCheck. +func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume, node NodeSelection) { + Expect(node.Name).To(Equal(""), "this test only works when not locked onto a single node") + + var pod *v1.Pod + defer func() { + // passing pod = nil is okay. + StopPod(client, pod) + }() + + By(fmt.Sprintf("checking the created volume is writable and has the PV's mount options on node %+v", node)) + command := "echo 'hello world' > /mnt/test/data" + pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node) + framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace)) + runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred(), "get pod") + actualNodeName := runningPod.Spec.NodeName + StopPod(client, pod) + pod = nil // Don't stop twice. + + // Add node-anti-affinity. + secondNode := node + if secondNode.Affinity == nil { + secondNode.Affinity = &v1.Affinity{} + } + if secondNode.Affinity.NodeAffinity == nil { + secondNode.Affinity.NodeAffinity = &v1.NodeAffinity{} + } + if secondNode.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil { + secondNode.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{} + } + secondNode.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(secondNode.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + v1.NodeSelectorTerm{ + // https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity warns + // that "the value of kubernetes.io/hostname may be the same as the Node name in some environments and a different value in other environments". + // So this might be cleaner: + // MatchFields: []v1.NodeSelectorRequirement{ + // {Key: "name", Operator: v1.NodeSelectorOpNotIn, Values: []string{actualNodeName}}, + // }, + // However, "name", "Name", "ObjectMeta.Name" all got rejected with "not a valid field selector key". + + MatchExpressions: []v1.NodeSelectorRequirement{ + {Key: "kubernetes.io/hostname", Operator: v1.NodeSelectorOpNotIn, Values: []string{actualNodeName}}, + }, + }) + + By(fmt.Sprintf("checking the created volume is readable and retains data on another node %+v", secondNode)) + command = "grep 'hello world' /mnt/test/data" + pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-reader-node2", command, secondNode) + framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace)) + runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred(), "get pod") + Expect(runningPod.Spec.NodeName).NotTo(Equal(actualNodeName), "second pod should have run on a different node") + StopPod(client, pod) + pod = nil } func TestBindingWaitForFirstConsumer(t StorageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass, nodeSelector map[string]string, expectUnschedulable bool) (*v1.PersistentVolume, *v1.Node) { diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index e5f840f344d..3793b765417 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -276,7 +276,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { err := checkGCEPD(volume, "pd-ssd") Expect(err).NotTo(HaveOccurred(), "checkGCEPD pd-ssd") - testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) + testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) }, }, { @@ -291,7 +291,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { err := checkGCEPD(volume, "pd-standard") Expect(err).NotTo(HaveOccurred(), "checkGCEPD pd-standard") - testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) + testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) }, }, // AWS @@ -308,7 +308,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { err := checkAWSEBS(volume, "gp2", false) Expect(err).NotTo(HaveOccurred(), "checkAWSEBS gp2") - testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) + testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) }, }, { @@ -324,7 +324,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { err := checkAWSEBS(volume, "io1", false) Expect(err).NotTo(HaveOccurred(), "checkAWSEBS io1") - testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) + testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) }, }, { @@ -339,7 +339,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { err := checkAWSEBS(volume, "sc1", false) Expect(err).NotTo(HaveOccurred(), "checkAWSEBS sc1") - testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) + testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) }, }, { @@ -354,7 +354,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { err := checkAWSEBS(volume, "st1", false) Expect(err).NotTo(HaveOccurred(), "checkAWSEBS st1") - testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) + testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) }, }, { @@ -369,7 +369,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { err := checkAWSEBS(volume, "gp2", true) Expect(err).NotTo(HaveOccurred(), "checkAWSEBS gp2 encrypted") - testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) + testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) }, }, // OpenStack generic tests (works on all OpenStack deployments) @@ -381,7 +381,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ClaimSize: "1.5Gi", ExpectedSize: "2Gi", PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { - testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) + testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) }, }, { @@ -395,7 +395,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ClaimSize: "1.5Gi", ExpectedSize: "2Gi", PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { - testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) + testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) }, }, // vSphere generic test @@ -407,7 +407,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ClaimSize: "1.5Gi", ExpectedSize: "1.5Gi", PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { - testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) + testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) }, }, // Azure @@ -419,7 +419,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ClaimSize: "1Gi", ExpectedSize: "1Gi", PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { - testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) + testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) }, }, } @@ -476,7 +476,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { err := checkGCEPD(volume, "pd-standard") Expect(err).NotTo(HaveOccurred(), "checkGCEPD") - testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{}) + testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) }, } class := newStorageClass(test, ns, "reclaimpolicy") From 03d352f7aa505b5a799fd95935837a5d37634ef6 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 12 Dec 2018 19:48:24 +0100 Subject: [PATCH 7/8] e2e/storage: test usage of volume in multiple pods at once This is a special case that both kubelet and the volume driver should support, because users might expect it. One Kubernetes mechanism to deploy pods like this is via pod affinity. However, strictly speaking the CSI spec does not allow this usage mode (see https://github.com/container-storage-interface/spec/pull/150) and there is an on-going debate to enable it (see https://github.com/container-storage-interface/spec/issues/178). Therefore this test gets skipped unless explicitly enabled for a driver. CSI drivers which create a block device for a remote volume in NodePublishVolume fail this test. They have to make the volume available in NodeStageVolume and then in NodePublishVolume merely do a bind mount (as for example in https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver/blob/master/pkg/gce-pd-csi-driver/node.go#L150). --- test/e2e/storage/drivers/csi.go | 4 +- test/e2e/storage/testsuites/provisioning.go | 48 +++++++++++++++++++++ test/e2e/storage/testsuites/testdriver.go | 7 +++ 3 files changed, 57 insertions(+), 2 deletions(-) diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index c15e94c5f78..c2b13313ecb 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -84,7 +84,7 @@ var _ testsuites.SnapshottableTestDriver = &hostpathCSIDriver{} // InitHostPathCSIDriver returns hostpathCSIDriver that implements TestDriver interface func InitHostPathCSIDriver(config testsuites.TestConfig) testsuites.TestDriver { return initHostPathCSIDriver("csi-hostpath", config, - map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapDataSource: true}, + map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapDataSource: true, testsuites.CapMultiPODs: true}, "test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml", "test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml", "test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml", @@ -259,7 +259,7 @@ func (m *mockCSIDriver) CleanupDriver() { // InitHostPathV0CSIDriver returns a variant of hostpathCSIDriver with different manifests. func InitHostPathV0CSIDriver(config testsuites.TestConfig) testsuites.TestDriver { return initHostPathCSIDriver("csi-hostpath-v0", config, - map[testsuites.Capability]bool{testsuites.CapPersistence: true}, + map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapMultiPODs: true}, "test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml", "test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml", "test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml", diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index b1ef197cc4f..74072542de8 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -18,6 +18,7 @@ package testsuites import ( "fmt" + "sync" "time" . "github.com/onsi/ginkgo" @@ -245,6 +246,50 @@ func testProvisioning(input *provisioningTestInput) { } TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) }) + + It("should allow concurrent writes on the same node", func() { + if !input.dInfo.Capabilities[CapMultiPODs] { + framework.Skipf("Driver %q does not support multiple concurrent pods - skipping", input.dInfo.Name) + } + input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + // We start two pods concurrently on the same node, + // using the same PVC. Both wait for other to create a + // file before returning. The pods are forced onto the + // same node via pod affinity. + wg := sync.WaitGroup{} + wg.Add(2) + firstPodName := "pvc-tester-first" + secondPodName := "pvc-tester-second" + run := func(podName, command string) { + defer GinkgoRecover() + defer wg.Done() + node := NodeSelection{ + Name: input.nodeName, + } + if podName == secondPodName { + node.Affinity = &v1.Affinity{ + PodAffinity: &v1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + {LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + // Set by RunInPodWithVolume. + "app": firstPodName, + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + } + } + RunInPodWithVolume(input.cs, claim.Namespace, claim.Name, podName, command, node) + } + go run(firstPodName, "touch /mnt/test/first && while ! [ -f /mnt/test/second ]; do sleep 1; done") + go run(secondPodName, "touch /mnt/test/second && while ! [ -f /mnt/test/first ]; do sleep 1; done") + wg.Wait() + } + TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) + }) } // TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest and storageClass @@ -561,6 +606,9 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command }, ObjectMeta: metav1.ObjectMeta{ GenerateName: podName + "-", + Labels: map[string]string{ + "app": podName, + }, }, Spec: v1.PodSpec{ NodeName: node.Name, diff --git a/test/e2e/storage/testsuites/testdriver.go b/test/e2e/storage/testsuites/testdriver.go index 7e483dc2443..4e35afc2da8 100644 --- a/test/e2e/storage/testsuites/testdriver.go +++ b/test/e2e/storage/testsuites/testdriver.go @@ -97,6 +97,13 @@ const ( CapFsGroup Capability = "fsGroup" // volume ownership via fsGroup CapExec Capability = "exec" // exec a file in the volume CapDataSource Capability = "dataSource" // support populate data from snapshot + + // multiple pods on a node can use the same volume concurrently; + // for CSI, see: + // - https://github.com/container-storage-interface/spec/pull/150 + // - https://github.com/container-storage-interface/spec/issues/178 + // - NodeStageVolume in the spec + CapMultiPODs Capability = "multipods" ) // DriverInfo represents a combination of parameters to be used in implementation of TestDriver From ecc0c4e4b4730e01874feb324ff01b1ce6b15343 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 16 Jan 2019 14:54:21 +0100 Subject: [PATCH 8/8] e2e/storage: enable concurrent writes for gcepd The driver should support multiple pods using the same volume on the same node. --- test/e2e/storage/drivers/csi.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index c2b13313ecb..8bfbabf579f 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -297,6 +297,7 @@ func InitGcePDCSIDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapPersistence: true, testsuites.CapFsGroup: true, testsuites.CapExec: true, + testsuites.CapMultiPODs: true, }, Config: config, @@ -409,6 +410,7 @@ func InitGcePDExternalCSIDriver(config testsuites.TestConfig) testsuites.TestDri testsuites.CapPersistence: true, testsuites.CapFsGroup: true, testsuites.CapExec: true, + testsuites.CapMultiPODs: true, }, Config: config,