From a40d2eb18c7a256f33b4839a3a46172321128aa8 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Thu, 14 Oct 2021 18:01:09 +0200 Subject: [PATCH 1/2] storage validation: accept generic ephemeral volumes as volume device Raw block devices are possible with generic ephemeral volumes, so rejecting a pod with that combination is wrong. --- pkg/apis/core/validation/validation.go | 11 +++++--- pkg/apis/core/validation/validation_test.go | 28 ++++++++++++++++++++- 2 files changed, 34 insertions(+), 5 deletions(-) diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index 781e9787e8e..d8fb8935ee7 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -416,9 +416,12 @@ func IsMatchedVolume(name string, volumes map[string]core.VolumeSource) bool { return false } -func isMatchedDevice(name string, volumes map[string]core.VolumeSource) (bool, bool) { +// isMatched checks whether the volume with the given name is used by a +// container and if so, if it involves a PVC. +func isMatchedDevice(name string, volumes map[string]core.VolumeSource) (isMatched bool, isPVC bool) { if source, ok := volumes[name]; ok { - if source.PersistentVolumeClaim != nil { + if source.PersistentVolumeClaim != nil || + source.Ephemeral != nil { return true, true } return true, false @@ -2609,9 +2612,9 @@ func ValidateVolumeDevices(devices []core.VolumeDevice, volmounts map[string]str if devicename.Has(devName) { allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "must be unique")) } - // Must be PersistentVolumeClaim volume source + // Must be based on PersistentVolumeClaim (PVC reference or generic ephemeral inline volume) if didMatch && !isPVC { - allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "can only use volume source type of PersistentVolumeClaim for block mode")) + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "can only use volume source type of PersistentVolumeClaim or Ephemeral for block mode")) } if !didMatch { allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), devName)) diff --git a/pkg/apis/core/validation/validation_test.go b/pkg/apis/core/validation/validation_test.go index 76c995ed2fb..4943f2defa3 100644 --- a/pkg/apis/core/validation/validation_test.go +++ b/pkg/apis/core/validation/validation_test.go @@ -5532,6 +5532,18 @@ func TestValidateVolumeMounts(t *testing.T) { {Name: "abc", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim1"}}}, {Name: "abc-123", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim2"}}}, {Name: "123", VolumeSource: core.VolumeSource{HostPath: &core.HostPathVolumeSource{Path: "/foo/baz", Type: newHostPathType(string(core.HostPathUnset))}}}, + {Name: "ephemeral", VolumeSource: core.VolumeSource{Ephemeral: &core.EphemeralVolumeSource{VolumeClaimTemplate: &core.PersistentVolumeClaimTemplate{ + Spec: core.PersistentVolumeClaimSpec{ + AccessModes: []core.PersistentVolumeAccessMode{ + core.ReadWriteOnce, + }, + Resources: core.ResourceRequirements{ + Requests: core.ResourceList{ + core.ResourceName(core.ResourceStorage): resource.MustParse("10G"), + }, + }, + }, + }}}}, } vols, v1err := ValidateVolumes(volumes, nil, field.NewPath("field"), PodValidationOptions{}) if len(v1err) > 0 { @@ -5554,6 +5566,7 @@ func TestValidateVolumeMounts(t *testing.T) { {Name: "abc-123", MountPath: "G:\\mount", SubPath: ""}, {Name: "abc-123", MountPath: "/bac", SubPath: ".baz"}, {Name: "abc-123", MountPath: "/bad", SubPath: "..baz"}, + {Name: "ephemeral", MountPath: "/foobar"}, } goodVolumeDevices := []core.VolumeDevice{ {Name: "xyz", DevicePath: "/foofoo"}, @@ -5851,6 +5864,18 @@ func TestAlphaValidateVolumeDevices(t *testing.T) { {Name: "abc", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim1"}}}, {Name: "abc-123", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim2"}}}, {Name: "def", VolumeSource: core.VolumeSource{HostPath: &core.HostPathVolumeSource{Path: "/foo/baz", Type: newHostPathType(string(core.HostPathUnset))}}}, + {Name: "ephemeral", VolumeSource: core.VolumeSource{Ephemeral: &core.EphemeralVolumeSource{VolumeClaimTemplate: &core.PersistentVolumeClaimTemplate{ + Spec: core.PersistentVolumeClaimSpec{ + AccessModes: []core.PersistentVolumeAccessMode{ + core.ReadWriteOnce, + }, + Resources: core.ResourceRequirements{ + Requests: core.ResourceList{ + core.ResourceName(core.ResourceStorage): resource.MustParse("10G"), + }, + }, + }, + }}}}, } vols, v1err := ValidateVolumes(volumes, nil, field.NewPath("field"), PodValidationOptions{}) @@ -5862,6 +5887,7 @@ func TestAlphaValidateVolumeDevices(t *testing.T) { successCase := []core.VolumeDevice{ {Name: "abc", DevicePath: "/foo"}, {Name: "abc-123", DevicePath: "/usr/share/test"}, + {Name: "ephemeral", DevicePath: "/disk"}, } goodVolumeMounts := []core.VolumeMount{ {Name: "xyz", MountPath: "/foofoo"}, @@ -5887,7 +5913,7 @@ func TestAlphaValidateVolumeDevices(t *testing.T) { } // Success Cases: - // Validate normal success cases - only PVC volumeSource + // Validate normal success cases - only PVC volumeSource or generic ephemeral volume if errs := ValidateVolumeDevices(successCase, GetVolumeMountMap(goodVolumeMounts), vols, field.NewPath("field")); len(errs) != 0 { t.Errorf("expected success: %v", errs) } From a90a3c6a9c06f6b4ad12ee1fa0da7b4c3ed01275 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Thu, 14 Oct 2021 18:04:31 +0200 Subject: [PATCH 2/2] storage e2e: check raw block support for generic ephemeral volumes This adds a new test pattern and uses it for the inline volume tests. Because the kind of volume now varies more, validation of the mount or block device is always done by the caller of TestEphemeral. --- test/e2e/storage/framework/testpattern.go | 7 ++++ test/e2e/storage/framework/volume_resource.go | 8 +++- test/e2e/storage/testsuites/ephemeral.go | 40 ++++++++++++++----- 3 files changed, 42 insertions(+), 13 deletions(-) diff --git a/test/e2e/storage/framework/testpattern.go b/test/e2e/storage/framework/testpattern.go index 3a61d1ba1c6..d9dbfe293aa 100644 --- a/test/e2e/storage/framework/testpattern.go +++ b/test/e2e/storage/framework/testpattern.go @@ -297,6 +297,13 @@ var ( SnapshotType: DynamicCreatedSnapshot, SnapshotDeletionPolicy: DeleteSnapshot, } + // BlockVolModeGenericEphemeralVolume is for generic ephemeral inline volumes in raw block mode. + BlockVolModeGenericEphemeralVolume = TestPattern{ + Name: "Generic Ephemeral-volume (block volmode) (late-binding)", + VolType: GenericEphemeralVolume, + VolMode: v1.PersistentVolumeBlock, + BindingMode: storagev1.VolumeBindingWaitForFirstConsumer, + } // Definitions for snapshot case diff --git a/test/e2e/storage/framework/volume_resource.go b/test/e2e/storage/framework/volume_resource.go index e65e98a842e..0b9d3896a9e 100644 --- a/test/e2e/storage/framework/volume_resource.go +++ b/test/e2e/storage/framework/volume_resource.go @@ -108,7 +108,7 @@ func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern Test driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange) framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange) - r.VolSource = createEphemeralVolumeSource(r.Sc.Name, dInfo.RequiredAccessModes, claimSize) + r.VolSource = createEphemeralVolumeSource(r.Sc.Name, pattern.VolMode, dInfo.RequiredAccessModes, claimSize) } } case CSIInlineVolume: @@ -133,16 +133,20 @@ func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern Test return &r } -func createEphemeralVolumeSource(scName string, accessModes []v1.PersistentVolumeAccessMode, claimSize string) *v1.VolumeSource { +func createEphemeralVolumeSource(scName string, volMode v1.PersistentVolumeMode, accessModes []v1.PersistentVolumeAccessMode, claimSize string) *v1.VolumeSource { if len(accessModes) == 0 { accessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce} } + if volMode == "" { + volMode = v1.PersistentVolumeFilesystem + } return &v1.VolumeSource{ Ephemeral: &v1.EphemeralVolumeSource{ VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{ Spec: v1.PersistentVolumeClaimSpec{ StorageClassName: &scName, AccessModes: accessModes, + VolumeMode: &volMode, Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceStorage: resource.MustParse(claimSize), diff --git a/test/e2e/storage/testsuites/ephemeral.go b/test/e2e/storage/testsuites/ephemeral.go index 655f517eabf..3598024dfff 100644 --- a/test/e2e/storage/testsuites/ephemeral.go +++ b/test/e2e/storage/testsuites/ephemeral.go @@ -67,6 +67,7 @@ func GenericEphemeralTestPatterns() []storageframework.TestPattern { return []storageframework.TestPattern{ genericLateBinding, genericImmediateBinding, + storageframework.BlockVolModeGenericEphemeralVolume, } } @@ -95,6 +96,9 @@ func (p *ephemeralTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo { } func (p *ephemeralTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { + if pattern.VolMode == v1.PersistentVolumeBlock { + skipTestIfBlockNotSupported(driver) + } } func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { @@ -164,6 +168,10 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat } ginkgo.It("should create read-only inline ephemeral volume", func() { + if pattern.VolMode == v1.PersistentVolumeBlock { + e2eskipper.Skipf("raw block volumes cannot be read-only") + } + init() defer cleanup() @@ -191,6 +199,9 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat // attempt to create a dummy file and expect for it to be created command = "ls /mnt/test* && touch /mnt/test-0/hello-world && [ -f /mnt/test-0/hello-world ]" } + if pattern.VolMode == v1.PersistentVolumeBlock { + command = "if ! [ -b /mnt/test-0 ]; then echo /mnt/test-0 is not a block device; exit 1; fi" + } e2evolume.VerifyExecInPodSucceed(f, pod, command) return nil } @@ -222,7 +233,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat // between pods, then we can check whether // data written in one pod is really not // visible in the other. - if !readOnly && !shared { + if pattern.VolMode != v1.PersistentVolumeBlock && !readOnly && !shared { ginkgo.By("writing data in one pod and checking for it in the second") e2evolume.VerifyExecInPodSucceed(f, pod, "touch /mnt/test-0/hello-world") e2evolume.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]") @@ -299,10 +310,7 @@ func (t EphemeralTest) TestEphemeral() { gomega.Expect(client).NotTo(gomega.BeNil(), "EphemeralTest.Client is required") ginkgo.By(fmt.Sprintf("checking the requested inline volume exists in the pod running on node %+v", t.Node)) - command := "mount | grep /mnt/test && sleep 10000" - if framework.NodeOSDistroIs("windows") { - command = "ls /mnt/test* && sleep 10000" - } + command := "sleep 10000" var volumes []v1.VolumeSource numVolumes := t.NumInlineVolumes @@ -390,12 +398,22 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri for i, volume := range volumes { name := fmt.Sprintf("my-volume-%d", i) - pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, - v1.VolumeMount{ - Name: name, - MountPath: fmt.Sprintf("/mnt/test-%d", i), - ReadOnly: readOnly, - }) + path := fmt.Sprintf("/mnt/test-%d", i) + if volume.Ephemeral != nil && volume.Ephemeral.VolumeClaimTemplate.Spec.VolumeMode != nil && + *volume.Ephemeral.VolumeClaimTemplate.Spec.VolumeMode == v1.PersistentVolumeBlock { + pod.Spec.Containers[0].VolumeDevices = append(pod.Spec.Containers[0].VolumeDevices, + v1.VolumeDevice{ + Name: name, + DevicePath: path, + }) + } else { + pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, + v1.VolumeMount{ + Name: name, + MountPath: path, + ReadOnly: readOnly, + }) + } pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{ Name: name,