Add e2e test for a volume + its clone used on the same node

CSI driver need to pass special mount opts to XFS filesystem to be able to
mount a volume + its clone or its restored snapshot on the same node. Add a
test to exhibit this behavior.

The test is optional for now, giving CSI drivers time to fix it.
This commit is contained in:
Jan Safranek 2021-06-02 11:04:52 +02:00
parent 31f6cca256
commit 28511e82ad
3 changed files with 186 additions and 26 deletions

View File

@ -179,9 +179,11 @@ var (
}
// Ext4DynamicPV is TestPattern for "Dynamic PV (ext4)"
Ext4DynamicPV = TestPattern{
Name: "Dynamic PV (ext4)",
VolType: DynamicPV,
FsType: "ext4",
Name: "Dynamic PV (ext4)",
VolType: DynamicPV,
FsType: "ext4",
SnapshotType: DynamicCreatedSnapshot,
SnapshotDeletionPolicy: DeleteSnapshot,
}
// Definitions for xfs
@ -216,10 +218,12 @@ var (
}
// XfsDynamicPV is TestPattern for "Dynamic PV (xfs)"
XfsDynamicPV = TestPattern{
Name: "Dynamic PV (xfs)",
VolType: DynamicPV,
FsType: "xfs",
FeatureTag: "[Slow]",
Name: "Dynamic PV (xfs)",
VolType: DynamicPV,
FsType: "xfs",
FeatureTag: "[Slow]",
SnapshotType: DynamicCreatedSnapshot,
SnapshotDeletionPolicy: DeleteSnapshot,
}
// Definitions for ntfs

View File

@ -22,7 +22,6 @@ import (
"time"
"github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/errors"
@ -66,6 +65,8 @@ func InitMultiVolumeTestSuite() storageframework.TestSuite {
storageframework.FsVolModeDynamicPV,
storageframework.BlockVolModePreprovisionedPV,
storageframework.BlockVolModeDynamicPV,
storageframework.Ext4DynamicPV,
storageframework.XfsDynamicPV,
}
return InitCustomMultiVolumeTestSuite(patterns)
}
@ -329,6 +330,106 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
l.config.ClientNodeSelection, resource.Pvc, numPods, true /* sameNode */, false /* readOnly */)
})
// This tests below configuration:
// [pod1] [pod2]
// [ node1 ]
// | | <- same volume mode
// [volume1] -> [restored volume1 snapshot]
ginkgo.It("should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly][Feature:VolumeSnapshotDataSource][Feature:VolumeSourceXFS]", func() {
init()
defer cleanup()
if !l.driver.GetDriverInfo().Capabilities[storageframework.CapSnapshotDataSource] {
e2eskipper.Skipf("Driver %q does not support volume snapshots - skipping", dInfo.Name)
}
if pattern.SnapshotType == "" {
e2eskipper.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name)
}
// Create a volume
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
resource := storageframework.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
l.resources = append(l.resources, resource)
pvcs := []*v1.PersistentVolumeClaim{resource.Pvc}
// Create snapshot of it
sDriver, ok := driver.(storageframework.SnapshottableTestDriver)
if !ok {
framework.Failf("Driver %q has CapSnapshotDataSource but does not implement SnapshottableTestDriver", dInfo.Name)
}
testConfig := storageframework.ConvertTestConfig(l.config)
dc := l.config.Framework.DynamicClient
dataSource, cleanupFunc := prepareSnapshotDataSourceForProvisioning(f, testConfig, l.config, pattern, l.cs, dc, resource.Pvc, resource.Sc, sDriver, pattern.VolMode, "injected content")
defer cleanupFunc()
// Create 2nd PVC for testing
pvc2 := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: resource.Pvc.Name + "-restored",
Namespace: resource.Pvc.Namespace,
},
}
resource.Pvc.Spec.DeepCopyInto(&pvc2.Spec)
pvc2.Spec.VolumeName = ""
pvc2.Spec.DataSource = dataSource
pvc2, err := l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Create(context.TODO(), pvc2, metav1.CreateOptions{})
framework.ExpectNoError(err)
pvcs = append(pvcs, pvc2)
defer func() {
l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Delete(context.TODO(), pvc2.Name, metav1.DeleteOptions{})
}()
// Test access to both volumes on the same node.
TestConcurrentAccessToRelatedVolumes(l.config.Framework, l.cs, l.ns.Name,
l.config.ClientNodeSelection, pvcs, true /* sameNode */, false /* readOnly */)
})
// This tests below configuration:
// [pod1] [pod2]
// [ node1 ]
// | | <- same volume mode
// [volume1] -> [cloned volume1]
ginkgo.It("should concurrently access the volume and its clone from pods on the same node [LinuxOnly][Feature:VolumeSnapshotDataSource][Feature:VolumeSourceXFS]", func() {
init()
defer cleanup()
if !l.driver.GetDriverInfo().Capabilities[storageframework.CapPVCDataSource] {
e2eskipper.Skipf("Driver %q does not support volume clone - skipping", dInfo.Name)
}
// Create a volume
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
resource := storageframework.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
l.resources = append(l.resources, resource)
pvcs := []*v1.PersistentVolumeClaim{resource.Pvc}
testConfig := storageframework.ConvertTestConfig(l.config)
dataSource, cleanupFunc := preparePVCDataSourceForProvisioning(f, testConfig, l.cs, resource.Pvc, resource.Sc, pattern.VolMode, "injected content")
defer cleanupFunc()
// Create 2nd PVC for testing
pvc2 := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: resource.Pvc.Name + "-cloned",
Namespace: resource.Pvc.Namespace,
},
}
resource.Pvc.Spec.DeepCopyInto(&pvc2.Spec)
pvc2.Spec.VolumeName = ""
pvc2.Spec.DataSource = dataSource
pvc2, err := l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Create(context.TODO(), pvc2, metav1.CreateOptions{})
framework.ExpectNoError(err)
pvcs = append(pvcs, pvc2)
defer func() {
l.cs.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Delete(context.TODO(), pvc2.Name, metav1.DeleteOptions{})
}()
// Test access to both volumes on the same node.
TestConcurrentAccessToRelatedVolumes(l.config.Framework, l.cs, l.ns.Name,
l.config.ClientNodeSelection, pvcs, true /* sameNode */, false /* readOnly */)
})
// This tests below configuration:
// [pod1] [pod2]
// [ node1 ]
@ -552,10 +653,10 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
}
// Delete the last pod and remove from slice of pods
if len(pods) < 2 {
framework.Failf("Number of pods shouldn't be less than 2, but got %d", len(pods))
}
// Delete the last pod and remove from slice of pods
lastPod := pods[len(pods)-1]
framework.ExpectNoError(e2epod.DeletePodWithWait(cs, lastPod))
pods = pods[:len(pods)-1]
@ -591,6 +692,49 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
}
}
// TestConcurrentAccessToRelatedVolumes tests access to multiple volumes from multiple pods.
// Each provided PVC is used by a single pod. The test ensures that volumes created from
// another volume (=clone) or volume snapshot can be used together with the original volume.
func TestConcurrentAccessToRelatedVolumes(f *framework.Framework, cs clientset.Interface, ns string,
node e2epod.NodeSelection, pvcs []*v1.PersistentVolumeClaim, requiresSameNode bool,
readOnly bool) {
var pods []*v1.Pod
// Create each pod with pvc
for i := range pvcs {
index := i + 1
ginkgo.By(fmt.Sprintf("Creating pod%d with a volume on %+v", index, node))
podConfig := e2epod.Config{
NS: ns,
PVCs: []*v1.PersistentVolumeClaim{pvcs[i]},
SeLinuxLabel: e2epod.GetLinuxLabel(),
NodeSelection: node,
PVCsReadOnly: readOnly,
ImageID: e2epod.GetTestImageID(imageutils.JessieDnsutils),
}
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, f.Timeouts.PodStart)
defer func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod))
}()
framework.ExpectNoError(err)
pods = append(pods, pod)
actualNodeName := pod.Spec.NodeName
// Set affinity depending on requiresSameNode
if requiresSameNode {
e2epod.SetAffinity(&node, actualNodeName)
} else {
e2epod.SetAntiAffinity(&node, actualNodeName)
}
}
// Delete the last pod and remove from slice of pods
if len(pods) < len(pvcs) {
framework.Failf("Number of pods shouldn't be less than %d, but got %d", len(pvcs), len(pods))
}
}
// getCurrentTopologies() goes through all Nodes and returns unique driver topologies and count of Nodes per topology
func getCurrentTopologiesNumber(cs clientset.Interface, nodes *v1.NodeList, keys []string) ([]topology, []int, error) {
topos := []topology{}

View File

@ -837,14 +837,22 @@ func prepareSnapshotDataSourceForProvisioning(
) (*v1.TypedLocalObjectReference, func()) {
_, clearComputedStorageClass := SetupStorageClass(client, class)
ginkgo.By("[Initialize dataSource]creating a initClaim")
updatedClaim, err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Create(context.TODO(), initClaim, metav1.CreateOptions{})
framework.ExpectNoError(err)
if initClaim.ResourceVersion != "" {
ginkgo.By("Skipping creation of PVC, it already exists")
} else {
ginkgo.By("[Initialize dataSource]creating a initClaim")
updatedClaim, err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Create(context.TODO(), initClaim, metav1.CreateOptions{})
if apierrors.IsAlreadyExists(err) {
err = nil
}
framework.ExpectNoError(err)
initClaim = updatedClaim
}
// write namespace to the /mnt/test (= the volume).
tests := []e2evolume.Test{
{
Volume: *storageutils.CreateVolumeSource(updatedClaim.Name, false /* readOnly */),
Volume: *storageutils.CreateVolumeSource(initClaim.Name, false /* readOnly */),
Mode: mode,
File: "index.html",
ExpectedContent: injectContent,
@ -853,8 +861,7 @@ func prepareSnapshotDataSourceForProvisioning(
e2evolume.InjectContent(f, config, nil, "", tests)
parameters := map[string]string{}
snapshotResource := storageframework.CreateSnapshotResource(sDriver, perTestConfig, pattern, updatedClaim.GetName(), updatedClaim.GetNamespace(), f.Timeouts, parameters)
snapshotResource := storageframework.CreateSnapshotResource(sDriver, perTestConfig, pattern, initClaim.GetName(), initClaim.GetNamespace(), f.Timeouts, parameters)
group := "snapshot.storage.k8s.io"
dataSourceRef := &v1.TypedLocalObjectReference{
APIGroup: &group,
@ -863,10 +870,10 @@ func prepareSnapshotDataSourceForProvisioning(
}
cleanupFunc := func() {
framework.Logf("deleting initClaim %q/%q", updatedClaim.Namespace, updatedClaim.Name)
err = client.CoreV1().PersistentVolumeClaims(updatedClaim.Namespace).Delete(context.TODO(), updatedClaim.Name, metav1.DeleteOptions{})
framework.Logf("deleting initClaim %q/%q", initClaim.Namespace, initClaim.Name)
err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Delete(context.TODO(), initClaim.Name, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
framework.Failf("Error deleting initClaim %q. Error: %v", updatedClaim.Name, err)
framework.Failf("Error deleting initClaim %q. Error: %v", initClaim.Name, err)
}
err = snapshotResource.CleanupResource(f.Timeouts)
@ -890,13 +897,18 @@ func preparePVCDataSourceForProvisioning(
) (*v1.TypedLocalObjectReference, func()) {
_, clearComputedStorageClass := SetupStorageClass(client, class)
ginkgo.By("[Initialize dataSource]creating a source PVC")
sourcePVC, err := client.CoreV1().PersistentVolumeClaims(source.Namespace).Create(context.TODO(), source, metav1.CreateOptions{})
framework.ExpectNoError(err)
if source.ResourceVersion != "" {
ginkgo.By("Skipping creation of PVC, it already exists")
} else {
ginkgo.By("[Initialize dataSource]creating a source PVC")
var err error
source, err = client.CoreV1().PersistentVolumeClaims(source.Namespace).Create(context.TODO(), source, metav1.CreateOptions{})
framework.ExpectNoError(err)
}
tests := []e2evolume.Test{
{
Volume: *storageutils.CreateVolumeSource(sourcePVC.Name, false /* readOnly */),
Volume: *storageutils.CreateVolumeSource(source.Name, false /* readOnly */),
Mode: mode,
File: "index.html",
ExpectedContent: injectContent,
@ -906,14 +918,14 @@ func preparePVCDataSourceForProvisioning(
dataSourceRef := &v1.TypedLocalObjectReference{
Kind: "PersistentVolumeClaim",
Name: sourcePVC.GetName(),
Name: source.GetName(),
}
cleanupFunc := func() {
framework.Logf("deleting source PVC %q/%q", sourcePVC.Namespace, sourcePVC.Name)
err := client.CoreV1().PersistentVolumeClaims(sourcePVC.Namespace).Delete(context.TODO(), sourcePVC.Name, metav1.DeleteOptions{})
framework.Logf("deleting source PVC %q/%q", source.Namespace, source.Name)
err := client.CoreV1().PersistentVolumeClaims(source.Namespace).Delete(context.TODO(), source.Name, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
framework.Failf("Error deleting source PVC %q. Error: %v", sourcePVC.Name, err)
framework.Failf("Error deleting source PVC %q. Error: %v", source.Name, err)
}
clearComputedStorageClass()