diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index 8973f8f5691..f7a3babc3d9 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -21,6 +21,7 @@ import ( "encoding/json" "fmt" "regexp" + "strconv" "strings" "time" @@ -131,7 +132,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } } - createPod := func() (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { + createPod := func(ephemeral bool) (class *storagev1.StorageClass, claim *v1.PersistentVolumeClaim, pod *v1.Pod) { ginkgo.By("Creating pod") var sc *storagev1.StorageClass if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok { @@ -162,17 +163,24 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { Selector: m.nodeLabel, } } - class, claim, pod := startPausePod(f.ClientSet, scTest, nodeSelection, f.Namespace.Name) - if class != nil { - m.sc[class.Name] = class + if ephemeral { + pod = startPausePodInline(f.ClientSet, scTest, nodeSelection, f.Namespace.Name) + if pod != nil { + m.pods = append(m.pods, pod) + } + } else { + class, claim, pod = startPausePod(f.ClientSet, scTest, nodeSelection, f.Namespace.Name) + if class != nil { + m.sc[class.Name] = class + } + if claim != nil { + m.pvcs = append(m.pvcs, claim) + } + if pod != nil { + m.pods = append(m.pods, pod) + } } - if claim != nil { - m.pvcs = append(m.pvcs, claim) - } - if pod != nil { - m.pods = append(m.pods, pod) - } - return class, claim, pod + return // result variables set above } createPodWithPVC := func(pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) { @@ -257,7 +265,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { init(testParameters{registerDriver: test.deployClusterRegistrar, disableAttach: test.disableAttach}) defer cleanup() - _, claim, pod := createPod() + _, claim, pod := createPod(false) if pod == nil { return } @@ -297,29 +305,42 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { podInfoOnMount *bool deployClusterRegistrar bool expectPodInfo bool + expectEphemeral bool }{ { name: "should not be passed when podInfoOnMount=nil", podInfoOnMount: nil, deployClusterRegistrar: true, expectPodInfo: false, + expectEphemeral: false, }, { name: "should be passed when podInfoOnMount=true", podInfoOnMount: &podInfoTrue, deployClusterRegistrar: true, expectPodInfo: true, + expectEphemeral: false, + }, + { + // TODO(pohly): remove the feature tag when moving to beta + name: "contain ephemeral=true when using inline volume [Feature:CSIInlineVolume]", + podInfoOnMount: &podInfoTrue, + deployClusterRegistrar: true, + expectPodInfo: true, + expectEphemeral: true, }, { name: "should not be passed when podInfoOnMount=false", podInfoOnMount: &podInfoFalse, deployClusterRegistrar: true, expectPodInfo: false, + expectEphemeral: false, }, { name: "should not be passed when CSIDriver does not exist", deployClusterRegistrar: false, expectPodInfo: false, + expectEphemeral: false, }, } for _, t := range tests { @@ -332,17 +353,27 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { defer cleanup() - _, _, pod := createPod() + _, _, pod := createPod(test.expectEphemeral) if pod == nil { return } err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "Failed to start pod: %v", err) - ginkgo.By("Checking CSI driver logs") + // If we expect an ephemeral volume, the feature has to be enabled. + // Otherwise need to check if we expect pod info, because the content + // of that depends on whether the feature is enabled or not. + csiInlineVolumesEnabled := test.expectEphemeral + if test.expectPodInfo { + ginkgo.By("checking for CSIInlineVolumes feature") + csiInlineVolumesEnabled, err = testsuites.CSIInlineVolumesEnabled(m.cs, f.Namespace.Name) + framework.ExpectNoError(err, "failed to test for CSIInlineVolumes") + } + + ginkgo.By("Checking CSI driver logs") // The driver is deployed as a statefulset with stable pod names driverPodName := "csi-mockplugin-0" - err = checkPodInfo(m.cs, f.Namespace.Name, driverPodName, "mock", pod, test.expectPodInfo) + err = checkPodInfo(m.cs, f.Namespace.Name, driverPodName, "mock", pod, test.expectPodInfo, test.expectEphemeral, csiInlineVolumesEnabled) framework.ExpectNoError(err) }) } @@ -364,19 +395,19 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { gomega.Expect(csiNodeAttachLimit).To(gomega.BeNumerically("==", 2)) - _, _, pod1 := createPod() + _, _, pod1 := createPod(false) gomega.Expect(pod1).NotTo(gomega.BeNil(), "while creating first pod") err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod1.Name, pod1.Namespace) framework.ExpectNoError(err, "Failed to start pod1: %v", err) - _, _, pod2 := createPod() + _, _, pod2 := createPod(false) gomega.Expect(pod2).NotTo(gomega.BeNil(), "while creating second pod") err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod2.Name, pod2.Namespace) framework.ExpectNoError(err, "Failed to start pod2: %v", err) - _, _, pod3 := createPod() + _, _, pod3 := createPod(false) gomega.Expect(pod3).NotTo(gomega.BeNil(), "while creating third pod") err = waitForMaxVolumeCondition(pod3, m.cs) framework.ExpectNoError(err, "while waiting for max volume condition on pod : %+v", pod3) @@ -429,7 +460,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { defer cleanup() ns := f.Namespace.Name - sc, pvc, pod := createPod() + sc, pvc, pod := createPod(false) gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing") gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion") @@ -520,7 +551,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { defer cleanup() - sc, pvc, pod := createPod() + sc, pvc, pod := createPod(false) gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing") gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion") @@ -611,51 +642,41 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node f _, err = framework.WaitForPVClaimBoundPhase(cs, pvcClaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) - pod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "pvc-volume-tester-", - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "volume-tester", - Image: imageutils.GetE2EImage(imageutils.Pause), - VolumeMounts: []v1.VolumeMount{ - { - Name: "my-volume", - MountPath: "/mnt/test", - }, - }, - }, - }, - RestartPolicy: v1.RestartPolicyNever, - Volumes: []v1.Volume{ - { - Name: "my-volume", - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: claim.Name, - ReadOnly: false, - }, - }, - }, - }, - }, - } - - if node.Name != "" { - pod.Spec.NodeName = node.Name - } - if len(node.Selector) != 0 { - pod.Spec.NodeSelector = node.Selector - } - - pod, err = cs.CoreV1().Pods(ns).Create(pod) + pod, err := startPausePodWithClaim(cs, claim, node, ns) framework.ExpectNoError(err, "Failed to create pod: %v", err) return class, claim, pod } +func startPausePodInline(cs clientset.Interface, t testsuites.StorageClassTest, node framework.NodeSelection, ns string) *v1.Pod { + pod, err := startPausePodWithInlineVolume(cs, + &v1.CSIVolumeSource{ + Driver: t.Provisioner, + }, + node, ns) + framework.ExpectNoError(err, "Failed to create pod: %v", err) + return pod +} + func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node framework.NodeSelection, ns string) (*v1.Pod, error) { + return startPausePodWithVolumeSource(cs, + v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc.Name, + ReadOnly: false, + }, + }, + node, ns) +} + +func startPausePodWithInlineVolume(cs clientset.Interface, inlineVolume *v1.CSIVolumeSource, node framework.NodeSelection, ns string) (*v1.Pod, error) { + return startPausePodWithVolumeSource(cs, + v1.VolumeSource{ + CSI: inlineVolume, + }, + node, ns) +} + +func startPausePodWithVolumeSource(cs clientset.Interface, volumeSource v1.VolumeSource, node framework.NodeSelection, ns string) (*v1.Pod, error) { pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "pvc-volume-tester-", @@ -676,13 +697,8 @@ func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClai RestartPolicy: v1.RestartPolicyNever, Volumes: []v1.Volume{ { - Name: "my-volume", - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: pvc.Name, - ReadOnly: false, - }, - }, + Name: "my-volume", + VolumeSource: volumeSource, }, }, }, @@ -699,13 +715,18 @@ func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClai } // checkPodInfo tests that NodePublish was called with expected volume_context -func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContainerName string, pod *v1.Pod, expectPodInfo bool) error { +func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContainerName string, pod *v1.Pod, expectPodInfo, ephemeralVolume, csiInlineVolumesEnabled bool) error { expectedAttributes := map[string]string{ "csi.storage.k8s.io/pod.name": pod.Name, "csi.storage.k8s.io/pod.namespace": namespace, "csi.storage.k8s.io/pod.uid": string(pod.UID), "csi.storage.k8s.io/serviceAccount.name": "default", } + if csiInlineVolumesEnabled { + // This is only passed in 1.15 when the CSIInlineVolume feature gate is set. + expectedAttributes["csi.storage.k8s.io/ephemeral"] = strconv.FormatBool(ephemeralVolume) + } + // Load logs of driver pod log, err := e2epod.GetPodLogs(cs, namespace, driverPodName, driverContainerName) if err != nil { diff --git a/test/e2e/storage/testsuites/ephemeral.go b/test/e2e/storage/testsuites/ephemeral.go index 674e7e83032..ee9cfcbf71a 100644 --- a/test/e2e/storage/testsuites/ephemeral.go +++ b/test/e2e/storage/testsuites/ephemeral.go @@ -23,6 +23,7 @@ import ( "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" @@ -228,3 +229,57 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri framework.ExpectNoError(err, "failed to create pod") return pod } + +// CSIInlineVolumesEnabled checks whether the running cluster has the CSIInlineVolumes feature gate enabled. +// It does that by trying to create a pod that uses that feature. +func CSIInlineVolumesEnabled(c clientset.Interface, ns string) (bool, error) { + pod := &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "csi-inline-volume-", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "csi-volume-tester", + Image: "no-such-registry/no-such-image", + VolumeMounts: []v1.VolumeMount{ + { + Name: "my-volume", + MountPath: "/mnt/test", + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + Volumes: []v1.Volume{ + { + Name: "my-volume", + VolumeSource: v1.VolumeSource{ + CSI: &v1.CSIVolumeSource{ + Driver: "no-such-driver.example.com", + }, + }, + }, + }, + }, + } + + pod, err := c.CoreV1().Pods(ns).Create(pod) + + switch { + case err == nil: + // Pod was created, feature supported. + StopPod(c, pod) + return true, nil + case errors.IsInvalid(err): + // "Invalid" because it uses a feature that isn't supported. + return false, nil + default: + // Unexpected error. + return false, err + } +} diff --git a/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml b/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml index edc71b011e0..af4b06c1a56 100644 --- a/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml +++ b/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml @@ -48,7 +48,9 @@ spec: - mountPath: /registration name: registration-dir - name: mock - image: quay.io/k8scsi/mock-driver:v1.1.1 + image: quay.io/k8scsi/mock-driver:v2.1.0 + args: + - "--permissive-target-path" # because of https://github.com/kubernetes/kubernetes/issues/75535 env: - name: CSI_ENDPOINT value: /csi/csi.sock