storage e2e: check raw block support for generic ephemeral volumes

This adds a new test pattern and uses it for the inline volume tests. Because
the kind of volume now varies more, validation of the mount or block device is
always done by the caller of TestEphemeral.
This commit is contained in:
Patrick Ohly 2021-10-14 18:04:31 +02:00
parent a40d2eb18c
commit a90a3c6a9c
3 changed files with 42 additions and 13 deletions

View File

@ -297,6 +297,13 @@ var (
SnapshotType: DynamicCreatedSnapshot, SnapshotType: DynamicCreatedSnapshot,
SnapshotDeletionPolicy: DeleteSnapshot, SnapshotDeletionPolicy: DeleteSnapshot,
} }
// BlockVolModeGenericEphemeralVolume is for generic ephemeral inline volumes in raw block mode.
BlockVolModeGenericEphemeralVolume = TestPattern{
Name: "Generic Ephemeral-volume (block volmode) (late-binding)",
VolType: GenericEphemeralVolume,
VolMode: v1.PersistentVolumeBlock,
BindingMode: storagev1.VolumeBindingWaitForFirstConsumer,
}
// Definitions for snapshot case // Definitions for snapshot case

View File

@ -108,7 +108,7 @@ func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern Test
driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange) claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange) framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
r.VolSource = createEphemeralVolumeSource(r.Sc.Name, dInfo.RequiredAccessModes, claimSize) r.VolSource = createEphemeralVolumeSource(r.Sc.Name, pattern.VolMode, dInfo.RequiredAccessModes, claimSize)
} }
} }
case CSIInlineVolume: case CSIInlineVolume:
@ -133,16 +133,20 @@ func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern Test
return &r return &r
} }
func createEphemeralVolumeSource(scName string, accessModes []v1.PersistentVolumeAccessMode, claimSize string) *v1.VolumeSource { func createEphemeralVolumeSource(scName string, volMode v1.PersistentVolumeMode, accessModes []v1.PersistentVolumeAccessMode, claimSize string) *v1.VolumeSource {
if len(accessModes) == 0 { if len(accessModes) == 0 {
accessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce} accessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
} }
if volMode == "" {
volMode = v1.PersistentVolumeFilesystem
}
return &v1.VolumeSource{ return &v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{ Ephemeral: &v1.EphemeralVolumeSource{
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{ VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
Spec: v1.PersistentVolumeClaimSpec{ Spec: v1.PersistentVolumeClaimSpec{
StorageClassName: &scName, StorageClassName: &scName,
AccessModes: accessModes, AccessModes: accessModes,
VolumeMode: &volMode,
Resources: v1.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{ Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse(claimSize), v1.ResourceStorage: resource.MustParse(claimSize),

View File

@ -67,6 +67,7 @@ func GenericEphemeralTestPatterns() []storageframework.TestPattern {
return []storageframework.TestPattern{ return []storageframework.TestPattern{
genericLateBinding, genericLateBinding,
genericImmediateBinding, genericImmediateBinding,
storageframework.BlockVolModeGenericEphemeralVolume,
} }
} }
@ -95,6 +96,9 @@ func (p *ephemeralTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
} }
func (p *ephemeralTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { func (p *ephemeralTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
if pattern.VolMode == v1.PersistentVolumeBlock {
skipTestIfBlockNotSupported(driver)
}
} }
func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
@ -164,6 +168,10 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
} }
ginkgo.It("should create read-only inline ephemeral volume", func() { ginkgo.It("should create read-only inline ephemeral volume", func() {
if pattern.VolMode == v1.PersistentVolumeBlock {
e2eskipper.Skipf("raw block volumes cannot be read-only")
}
init() init()
defer cleanup() defer cleanup()
@ -191,6 +199,9 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
// attempt to create a dummy file and expect for it to be created // attempt to create a dummy file and expect for it to be created
command = "ls /mnt/test* && touch /mnt/test-0/hello-world && [ -f /mnt/test-0/hello-world ]" command = "ls /mnt/test* && touch /mnt/test-0/hello-world && [ -f /mnt/test-0/hello-world ]"
} }
if pattern.VolMode == v1.PersistentVolumeBlock {
command = "if ! [ -b /mnt/test-0 ]; then echo /mnt/test-0 is not a block device; exit 1; fi"
}
e2evolume.VerifyExecInPodSucceed(f, pod, command) e2evolume.VerifyExecInPodSucceed(f, pod, command)
return nil return nil
} }
@ -222,7 +233,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
// between pods, then we can check whether // between pods, then we can check whether
// data written in one pod is really not // data written in one pod is really not
// visible in the other. // visible in the other.
if !readOnly && !shared { if pattern.VolMode != v1.PersistentVolumeBlock && !readOnly && !shared {
ginkgo.By("writing data in one pod and checking for it in the second") ginkgo.By("writing data in one pod and checking for it in the second")
e2evolume.VerifyExecInPodSucceed(f, pod, "touch /mnt/test-0/hello-world") e2evolume.VerifyExecInPodSucceed(f, pod, "touch /mnt/test-0/hello-world")
e2evolume.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]") e2evolume.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]")
@ -299,10 +310,7 @@ func (t EphemeralTest) TestEphemeral() {
gomega.Expect(client).NotTo(gomega.BeNil(), "EphemeralTest.Client is required") gomega.Expect(client).NotTo(gomega.BeNil(), "EphemeralTest.Client is required")
ginkgo.By(fmt.Sprintf("checking the requested inline volume exists in the pod running on node %+v", t.Node)) ginkgo.By(fmt.Sprintf("checking the requested inline volume exists in the pod running on node %+v", t.Node))
command := "mount | grep /mnt/test && sleep 10000" command := "sleep 10000"
if framework.NodeOSDistroIs("windows") {
command = "ls /mnt/test* && sleep 10000"
}
var volumes []v1.VolumeSource var volumes []v1.VolumeSource
numVolumes := t.NumInlineVolumes numVolumes := t.NumInlineVolumes
@ -390,12 +398,22 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri
for i, volume := range volumes { for i, volume := range volumes {
name := fmt.Sprintf("my-volume-%d", i) name := fmt.Sprintf("my-volume-%d", i)
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, path := fmt.Sprintf("/mnt/test-%d", i)
v1.VolumeMount{ if volume.Ephemeral != nil && volume.Ephemeral.VolumeClaimTemplate.Spec.VolumeMode != nil &&
Name: name, *volume.Ephemeral.VolumeClaimTemplate.Spec.VolumeMode == v1.PersistentVolumeBlock {
MountPath: fmt.Sprintf("/mnt/test-%d", i), pod.Spec.Containers[0].VolumeDevices = append(pod.Spec.Containers[0].VolumeDevices,
ReadOnly: readOnly, v1.VolumeDevice{
}) Name: name,
DevicePath: path,
})
} else {
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts,
v1.VolumeMount{
Name: name,
MountPath: path,
ReadOnly: readOnly,
})
}
pod.Spec.Volumes = append(pod.Spec.Volumes, pod.Spec.Volumes = append(pod.Spec.Volumes,
v1.Volume{ v1.Volume{
Name: name, Name: name,