Merge pull request #105682 from pohly/generic-ephemeral-volume-raw-block

storage validation: accept generic ephemeral volumes as volume device
This commit is contained in:
Kubernetes Prow Robot 2021-10-22 18:04:50 -07:00 committed by GitHub
commit 7fbb384e15
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 76 additions and 18 deletions

View File

@ -423,9 +423,12 @@ func IsMatchedVolume(name string, volumes map[string]core.VolumeSource) bool {
return false return false
} }
func isMatchedDevice(name string, volumes map[string]core.VolumeSource) (bool, bool) { // isMatched checks whether the volume with the given name is used by a
// container and if so, if it involves a PVC.
func isMatchedDevice(name string, volumes map[string]core.VolumeSource) (isMatched bool, isPVC bool) {
if source, ok := volumes[name]; ok { if source, ok := volumes[name]; ok {
if source.PersistentVolumeClaim != nil { if source.PersistentVolumeClaim != nil ||
source.Ephemeral != nil {
return true, true return true, true
} }
return true, false return true, false
@ -2616,9 +2619,9 @@ func ValidateVolumeDevices(devices []core.VolumeDevice, volmounts map[string]str
if devicename.Has(devName) { if devicename.Has(devName) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "must be unique")) allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "must be unique"))
} }
// Must be PersistentVolumeClaim volume source // Must be based on PersistentVolumeClaim (PVC reference or generic ephemeral inline volume)
if didMatch && !isPVC { if didMatch && !isPVC {
allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "can only use volume source type of PersistentVolumeClaim for block mode")) allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "can only use volume source type of PersistentVolumeClaim or Ephemeral for block mode"))
} }
if !didMatch { if !didMatch {
allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), devName)) allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), devName))

View File

@ -5533,6 +5533,18 @@ func TestValidateVolumeMounts(t *testing.T) {
{Name: "abc", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim1"}}}, {Name: "abc", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim1"}}},
{Name: "abc-123", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim2"}}}, {Name: "abc-123", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim2"}}},
{Name: "123", VolumeSource: core.VolumeSource{HostPath: &core.HostPathVolumeSource{Path: "/foo/baz", Type: newHostPathType(string(core.HostPathUnset))}}}, {Name: "123", VolumeSource: core.VolumeSource{HostPath: &core.HostPathVolumeSource{Path: "/foo/baz", Type: newHostPathType(string(core.HostPathUnset))}}},
{Name: "ephemeral", VolumeSource: core.VolumeSource{Ephemeral: &core.EphemeralVolumeSource{VolumeClaimTemplate: &core.PersistentVolumeClaimTemplate{
Spec: core.PersistentVolumeClaimSpec{
AccessModes: []core.PersistentVolumeAccessMode{
core.ReadWriteOnce,
},
Resources: core.ResourceRequirements{
Requests: core.ResourceList{
core.ResourceName(core.ResourceStorage): resource.MustParse("10G"),
},
},
},
}}}},
} }
vols, v1err := ValidateVolumes(volumes, nil, field.NewPath("field"), PodValidationOptions{}) vols, v1err := ValidateVolumes(volumes, nil, field.NewPath("field"), PodValidationOptions{})
if len(v1err) > 0 { if len(v1err) > 0 {
@ -5555,6 +5567,7 @@ func TestValidateVolumeMounts(t *testing.T) {
{Name: "abc-123", MountPath: "G:\\mount", SubPath: ""}, {Name: "abc-123", MountPath: "G:\\mount", SubPath: ""},
{Name: "abc-123", MountPath: "/bac", SubPath: ".baz"}, {Name: "abc-123", MountPath: "/bac", SubPath: ".baz"},
{Name: "abc-123", MountPath: "/bad", SubPath: "..baz"}, {Name: "abc-123", MountPath: "/bad", SubPath: "..baz"},
{Name: "ephemeral", MountPath: "/foobar"},
} }
goodVolumeDevices := []core.VolumeDevice{ goodVolumeDevices := []core.VolumeDevice{
{Name: "xyz", DevicePath: "/foofoo"}, {Name: "xyz", DevicePath: "/foofoo"},
@ -5852,6 +5865,18 @@ func TestAlphaValidateVolumeDevices(t *testing.T) {
{Name: "abc", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim1"}}}, {Name: "abc", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim1"}}},
{Name: "abc-123", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim2"}}}, {Name: "abc-123", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim2"}}},
{Name: "def", VolumeSource: core.VolumeSource{HostPath: &core.HostPathVolumeSource{Path: "/foo/baz", Type: newHostPathType(string(core.HostPathUnset))}}}, {Name: "def", VolumeSource: core.VolumeSource{HostPath: &core.HostPathVolumeSource{Path: "/foo/baz", Type: newHostPathType(string(core.HostPathUnset))}}},
{Name: "ephemeral", VolumeSource: core.VolumeSource{Ephemeral: &core.EphemeralVolumeSource{VolumeClaimTemplate: &core.PersistentVolumeClaimTemplate{
Spec: core.PersistentVolumeClaimSpec{
AccessModes: []core.PersistentVolumeAccessMode{
core.ReadWriteOnce,
},
Resources: core.ResourceRequirements{
Requests: core.ResourceList{
core.ResourceName(core.ResourceStorage): resource.MustParse("10G"),
},
},
},
}}}},
} }
vols, v1err := ValidateVolumes(volumes, nil, field.NewPath("field"), PodValidationOptions{}) vols, v1err := ValidateVolumes(volumes, nil, field.NewPath("field"), PodValidationOptions{})
@ -5863,6 +5888,7 @@ func TestAlphaValidateVolumeDevices(t *testing.T) {
successCase := []core.VolumeDevice{ successCase := []core.VolumeDevice{
{Name: "abc", DevicePath: "/foo"}, {Name: "abc", DevicePath: "/foo"},
{Name: "abc-123", DevicePath: "/usr/share/test"}, {Name: "abc-123", DevicePath: "/usr/share/test"},
{Name: "ephemeral", DevicePath: "/disk"},
} }
goodVolumeMounts := []core.VolumeMount{ goodVolumeMounts := []core.VolumeMount{
{Name: "xyz", MountPath: "/foofoo"}, {Name: "xyz", MountPath: "/foofoo"},
@ -5888,7 +5914,7 @@ func TestAlphaValidateVolumeDevices(t *testing.T) {
} }
// Success Cases: // Success Cases:
// Validate normal success cases - only PVC volumeSource // Validate normal success cases - only PVC volumeSource or generic ephemeral volume
if errs := ValidateVolumeDevices(successCase, GetVolumeMountMap(goodVolumeMounts), vols, field.NewPath("field")); len(errs) != 0 { if errs := ValidateVolumeDevices(successCase, GetVolumeMountMap(goodVolumeMounts), vols, field.NewPath("field")); len(errs) != 0 {
t.Errorf("expected success: %v", errs) t.Errorf("expected success: %v", errs)
} }

View File

@ -297,6 +297,13 @@ var (
SnapshotType: DynamicCreatedSnapshot, SnapshotType: DynamicCreatedSnapshot,
SnapshotDeletionPolicy: DeleteSnapshot, SnapshotDeletionPolicy: DeleteSnapshot,
} }
// BlockVolModeGenericEphemeralVolume is for generic ephemeral inline volumes in raw block mode.
BlockVolModeGenericEphemeralVolume = TestPattern{
Name: "Generic Ephemeral-volume (block volmode) (late-binding)",
VolType: GenericEphemeralVolume,
VolMode: v1.PersistentVolumeBlock,
BindingMode: storagev1.VolumeBindingWaitForFirstConsumer,
}
// Definitions for snapshot case // Definitions for snapshot case

View File

@ -108,7 +108,7 @@ func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern Test
driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange) claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange) framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
r.VolSource = createEphemeralVolumeSource(r.Sc.Name, dInfo.RequiredAccessModes, claimSize) r.VolSource = createEphemeralVolumeSource(r.Sc.Name, pattern.VolMode, dInfo.RequiredAccessModes, claimSize)
} }
} }
case CSIInlineVolume: case CSIInlineVolume:
@ -133,16 +133,20 @@ func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern Test
return &r return &r
} }
func createEphemeralVolumeSource(scName string, accessModes []v1.PersistentVolumeAccessMode, claimSize string) *v1.VolumeSource { func createEphemeralVolumeSource(scName string, volMode v1.PersistentVolumeMode, accessModes []v1.PersistentVolumeAccessMode, claimSize string) *v1.VolumeSource {
if len(accessModes) == 0 { if len(accessModes) == 0 {
accessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce} accessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
} }
if volMode == "" {
volMode = v1.PersistentVolumeFilesystem
}
return &v1.VolumeSource{ return &v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{ Ephemeral: &v1.EphemeralVolumeSource{
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{ VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
Spec: v1.PersistentVolumeClaimSpec{ Spec: v1.PersistentVolumeClaimSpec{
StorageClassName: &scName, StorageClassName: &scName,
AccessModes: accessModes, AccessModes: accessModes,
VolumeMode: &volMode,
Resources: v1.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{ Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse(claimSize), v1.ResourceStorage: resource.MustParse(claimSize),

View File

@ -67,6 +67,7 @@ func GenericEphemeralTestPatterns() []storageframework.TestPattern {
return []storageframework.TestPattern{ return []storageframework.TestPattern{
genericLateBinding, genericLateBinding,
genericImmediateBinding, genericImmediateBinding,
storageframework.BlockVolModeGenericEphemeralVolume,
} }
} }
@ -95,6 +96,9 @@ func (p *ephemeralTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
} }
func (p *ephemeralTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { func (p *ephemeralTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
if pattern.VolMode == v1.PersistentVolumeBlock {
skipTestIfBlockNotSupported(driver)
}
} }
func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
@ -164,6 +168,10 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
} }
ginkgo.It("should create read-only inline ephemeral volume", func() { ginkgo.It("should create read-only inline ephemeral volume", func() {
if pattern.VolMode == v1.PersistentVolumeBlock {
e2eskipper.Skipf("raw block volumes cannot be read-only")
}
init() init()
defer cleanup() defer cleanup()
@ -191,6 +199,9 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
// attempt to create a dummy file and expect for it to be created // attempt to create a dummy file and expect for it to be created
command = "ls /mnt/test* && touch /mnt/test-0/hello-world && [ -f /mnt/test-0/hello-world ]" command = "ls /mnt/test* && touch /mnt/test-0/hello-world && [ -f /mnt/test-0/hello-world ]"
} }
if pattern.VolMode == v1.PersistentVolumeBlock {
command = "if ! [ -b /mnt/test-0 ]; then echo /mnt/test-0 is not a block device; exit 1; fi"
}
e2evolume.VerifyExecInPodSucceed(f, pod, command) e2evolume.VerifyExecInPodSucceed(f, pod, command)
return nil return nil
} }
@ -222,7 +233,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
// between pods, then we can check whether // between pods, then we can check whether
// data written in one pod is really not // data written in one pod is really not
// visible in the other. // visible in the other.
if !readOnly && !shared { if pattern.VolMode != v1.PersistentVolumeBlock && !readOnly && !shared {
ginkgo.By("writing data in one pod and checking for it in the second") ginkgo.By("writing data in one pod and checking for it in the second")
e2evolume.VerifyExecInPodSucceed(f, pod, "touch /mnt/test-0/hello-world") e2evolume.VerifyExecInPodSucceed(f, pod, "touch /mnt/test-0/hello-world")
e2evolume.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]") e2evolume.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]")
@ -299,10 +310,7 @@ func (t EphemeralTest) TestEphemeral() {
gomega.Expect(client).NotTo(gomega.BeNil(), "EphemeralTest.Client is required") gomega.Expect(client).NotTo(gomega.BeNil(), "EphemeralTest.Client is required")
ginkgo.By(fmt.Sprintf("checking the requested inline volume exists in the pod running on node %+v", t.Node)) ginkgo.By(fmt.Sprintf("checking the requested inline volume exists in the pod running on node %+v", t.Node))
command := "mount | grep /mnt/test && sleep 10000" command := "sleep 10000"
if framework.NodeOSDistroIs("windows") {
command = "ls /mnt/test* && sleep 10000"
}
var volumes []v1.VolumeSource var volumes []v1.VolumeSource
numVolumes := t.NumInlineVolumes numVolumes := t.NumInlineVolumes
@ -390,12 +398,22 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri
for i, volume := range volumes { for i, volume := range volumes {
name := fmt.Sprintf("my-volume-%d", i) name := fmt.Sprintf("my-volume-%d", i)
path := fmt.Sprintf("/mnt/test-%d", i)
if volume.Ephemeral != nil && volume.Ephemeral.VolumeClaimTemplate.Spec.VolumeMode != nil &&
*volume.Ephemeral.VolumeClaimTemplate.Spec.VolumeMode == v1.PersistentVolumeBlock {
pod.Spec.Containers[0].VolumeDevices = append(pod.Spec.Containers[0].VolumeDevices,
v1.VolumeDevice{
Name: name,
DevicePath: path,
})
} else {
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts,
v1.VolumeMount{ v1.VolumeMount{
Name: name, Name: name,
MountPath: fmt.Sprintf("/mnt/test-%d", i), MountPath: path,
ReadOnly: readOnly, ReadOnly: readOnly,
}) })
}
pod.Spec.Volumes = append(pod.Spec.Volumes, pod.Spec.Volumes = append(pod.Spec.Volumes,
v1.Volume{ v1.Volume{
Name: name, Name: name,