diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index 89dadb3ca73..c9f705567e1 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -27,6 +27,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/testpatterns" ) @@ -94,6 +95,11 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern // Now do the more expensive test initialization. l.config, l.testCleanup = driver.PrepareTest(f) l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName) + } + + // manualInit initializes l.genericVolumeTestResource without creating the PV & PVC objects. + manualInit := func() { + init() fsType := pattern.FsType volBindMode := storagev1.VolumeBindingImmediate @@ -167,7 +173,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern case testpatterns.PreprovisionedPV: if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported { ginkgo.It("should fail to create pod by failing to mount volume [Slow]", func() { - init() + manualInit() defer cleanup() var err error @@ -196,13 +202,12 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern }() framework.ExpectError(err) }) - // TODO(mkimuram): Add more tests } case testpatterns.DynamicPV: if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported { ginkgo.It("should fail in binding dynamic provisioned PV to PVC [Slow]", func() { - init() + manualInit() defer cleanup() var err error @@ -218,12 +223,36 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) framework.ExpectError(err) }) - // TODO(mkimuram): Add more tests } default: e2elog.Failf("Volume mode test doesn't support volType: %v", pattern.VolType) } + ginkgo.It("should fail to use a volume in a pod with mismatched mode [Slow]", func() { + skipBlockTest(driver) + init() + l.genericVolumeTestResource = *createGenericVolumeTestResource(driver, l.config, pattern) + defer cleanup() + + ginkgo.By("Creating pod") + var err error + pod := framework.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, false, "", false, false, framework.SELinuxLabel, nil) + // Change volumeMounts to volumeDevices and the other way around + pod = swapVolumeMode(pod) + + // Run the pod + pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(pod) + framework.ExpectNoError(err) + defer func() { + framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod)) + }() + + // TODO: find a faster way how to check the pod can't start, + // perhaps when https://github.com/kubernetes/kubernetes/issues/79794 is fixed. + err = e2epod.WaitTimeoutForPodRunningInNamespace(l.cs, pod.Name, l.ns.Name, framework.PodStartTimeout) + framework.ExpectError(err, "pod with mismatched block/filesystem volumes should not start") + }) + } func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1.VolumeBindingMode, @@ -254,3 +283,29 @@ func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1 return scConfig, pvConfig, pvcConfig } + +// swapVolumeMode changes volumeMounts to volumeDevices and the other way around +func swapVolumeMode(podTemplate *v1.Pod) *v1.Pod { + pod := podTemplate.DeepCopy() + for c := range pod.Spec.Containers { + container := &pod.Spec.Containers[c] + container.VolumeDevices = []v1.VolumeDevice{} + container.VolumeMounts = []v1.VolumeMount{} + + // Change VolumeMounts to VolumeDevices + for _, volumeMount := range podTemplate.Spec.Containers[c].VolumeMounts { + container.VolumeDevices = append(container.VolumeDevices, v1.VolumeDevice{ + Name: volumeMount.Name, + DevicePath: volumeMount.MountPath, + }) + } + // Change VolumeDevices to VolumeMounts + for _, volumeDevice := range podTemplate.Spec.Containers[c].VolumeDevices { + container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{ + Name: volumeDevice.Name, + MountPath: volumeDevice.DevicePath, + }) + } + } + return pod +}