e2e storage: csi-mock tests for ephemeral inline volumes

The PodInfo tests can be extended to also cover the new
csi.storage.k8s.io/ephemeral flag. However, the presence of that flag
depends on whether inline volume support is enabled, so tests that run
with and without the feature have to detect that at runtime.

Other tests have a feature tag and thus can assume that they only run
when that feature is enabled. However, we need a newer csi-mock driver
before we can actually ask it to publish an ephemeral inline volume.
This commit is contained in:
Patrick Ohly 2019-07-19 16:02:02 +02:00
parent 555ff7ef10
commit 608b0e7097
3 changed files with 147 additions and 69 deletions

View File

@ -21,6 +21,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"regexp" "regexp"
"strconv"
"strings" "strings"
"time" "time"
@ -131,7 +132,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
} }
} }
createPod := func() (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { createPod := func(ephemeral bool) (class *storagev1.StorageClass, claim *v1.PersistentVolumeClaim, pod *v1.Pod) {
ginkgo.By("Creating pod") ginkgo.By("Creating pod")
var sc *storagev1.StorageClass var sc *storagev1.StorageClass
if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok { if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok {
@ -162,17 +163,24 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
Selector: m.nodeLabel, Selector: m.nodeLabel,
} }
} }
class, claim, pod := startPausePod(f.ClientSet, scTest, nodeSelection, f.Namespace.Name) if ephemeral {
if class != nil { pod = startPausePodInline(f.ClientSet, scTest, nodeSelection, f.Namespace.Name)
m.sc[class.Name] = class if pod != nil {
m.pods = append(m.pods, pod)
}
} else {
class, claim, pod = startPausePod(f.ClientSet, scTest, nodeSelection, f.Namespace.Name)
if class != nil {
m.sc[class.Name] = class
}
if claim != nil {
m.pvcs = append(m.pvcs, claim)
}
if pod != nil {
m.pods = append(m.pods, pod)
}
} }
if claim != nil { return // result variables set above
m.pvcs = append(m.pvcs, claim)
}
if pod != nil {
m.pods = append(m.pods, pod)
}
return class, claim, pod
} }
createPodWithPVC := func(pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) { createPodWithPVC := func(pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
@ -257,7 +265,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
init(testParameters{registerDriver: test.deployClusterRegistrar, disableAttach: test.disableAttach}) init(testParameters{registerDriver: test.deployClusterRegistrar, disableAttach: test.disableAttach})
defer cleanup() defer cleanup()
_, claim, pod := createPod() _, claim, pod := createPod(false)
if pod == nil { if pod == nil {
return return
} }
@ -297,29 +305,42 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
podInfoOnMount *bool podInfoOnMount *bool
deployClusterRegistrar bool deployClusterRegistrar bool
expectPodInfo bool expectPodInfo bool
expectEphemeral bool
}{ }{
{ {
name: "should not be passed when podInfoOnMount=nil", name: "should not be passed when podInfoOnMount=nil",
podInfoOnMount: nil, podInfoOnMount: nil,
deployClusterRegistrar: true, deployClusterRegistrar: true,
expectPodInfo: false, expectPodInfo: false,
expectEphemeral: false,
}, },
{ {
name: "should be passed when podInfoOnMount=true", name: "should be passed when podInfoOnMount=true",
podInfoOnMount: &podInfoTrue, podInfoOnMount: &podInfoTrue,
deployClusterRegistrar: true, deployClusterRegistrar: true,
expectPodInfo: true, expectPodInfo: true,
expectEphemeral: false,
},
{
// TODO(pohly): remove the feature tag when moving to beta
name: "contain ephemeral=true when using inline volume [Feature:CSIInlineVolume]",
podInfoOnMount: &podInfoTrue,
deployClusterRegistrar: true,
expectPodInfo: true,
expectEphemeral: true,
}, },
{ {
name: "should not be passed when podInfoOnMount=false", name: "should not be passed when podInfoOnMount=false",
podInfoOnMount: &podInfoFalse, podInfoOnMount: &podInfoFalse,
deployClusterRegistrar: true, deployClusterRegistrar: true,
expectPodInfo: false, expectPodInfo: false,
expectEphemeral: false,
}, },
{ {
name: "should not be passed when CSIDriver does not exist", name: "should not be passed when CSIDriver does not exist",
deployClusterRegistrar: false, deployClusterRegistrar: false,
expectPodInfo: false, expectPodInfo: false,
expectEphemeral: false,
}, },
} }
for _, t := range tests { for _, t := range tests {
@ -332,17 +353,27 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
defer cleanup() defer cleanup()
_, _, pod := createPod() _, _, pod := createPod(test.expectEphemeral)
if pod == nil { if pod == nil {
return return
} }
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "Failed to start pod: %v", err) framework.ExpectNoError(err, "Failed to start pod: %v", err)
ginkgo.By("Checking CSI driver logs")
// If we expect an ephemeral volume, the feature has to be enabled.
// Otherwise need to check if we expect pod info, because the content
// of that depends on whether the feature is enabled or not.
csiInlineVolumesEnabled := test.expectEphemeral
if test.expectPodInfo {
ginkgo.By("checking for CSIInlineVolumes feature")
csiInlineVolumesEnabled, err = testsuites.CSIInlineVolumesEnabled(m.cs, f.Namespace.Name)
framework.ExpectNoError(err, "failed to test for CSIInlineVolumes")
}
ginkgo.By("Checking CSI driver logs")
// The driver is deployed as a statefulset with stable pod names // The driver is deployed as a statefulset with stable pod names
driverPodName := "csi-mockplugin-0" driverPodName := "csi-mockplugin-0"
err = checkPodInfo(m.cs, f.Namespace.Name, driverPodName, "mock", pod, test.expectPodInfo) err = checkPodInfo(m.cs, f.Namespace.Name, driverPodName, "mock", pod, test.expectPodInfo, test.expectEphemeral, csiInlineVolumesEnabled)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
} }
@ -364,19 +395,19 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
gomega.Expect(csiNodeAttachLimit).To(gomega.BeNumerically("==", 2)) gomega.Expect(csiNodeAttachLimit).To(gomega.BeNumerically("==", 2))
_, _, pod1 := createPod() _, _, pod1 := createPod(false)
gomega.Expect(pod1).NotTo(gomega.BeNil(), "while creating first pod") gomega.Expect(pod1).NotTo(gomega.BeNil(), "while creating first pod")
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod1.Name, pod1.Namespace) err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod1.Name, pod1.Namespace)
framework.ExpectNoError(err, "Failed to start pod1: %v", err) framework.ExpectNoError(err, "Failed to start pod1: %v", err)
_, _, pod2 := createPod() _, _, pod2 := createPod(false)
gomega.Expect(pod2).NotTo(gomega.BeNil(), "while creating second pod") gomega.Expect(pod2).NotTo(gomega.BeNil(), "while creating second pod")
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod2.Name, pod2.Namespace) err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod2.Name, pod2.Namespace)
framework.ExpectNoError(err, "Failed to start pod2: %v", err) framework.ExpectNoError(err, "Failed to start pod2: %v", err)
_, _, pod3 := createPod() _, _, pod3 := createPod(false)
gomega.Expect(pod3).NotTo(gomega.BeNil(), "while creating third pod") gomega.Expect(pod3).NotTo(gomega.BeNil(), "while creating third pod")
err = waitForMaxVolumeCondition(pod3, m.cs) err = waitForMaxVolumeCondition(pod3, m.cs)
framework.ExpectNoError(err, "while waiting for max volume condition on pod : %+v", pod3) framework.ExpectNoError(err, "while waiting for max volume condition on pod : %+v", pod3)
@ -429,7 +460,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
defer cleanup() defer cleanup()
ns := f.Namespace.Name ns := f.Namespace.Name
sc, pvc, pod := createPod() sc, pvc, pod := createPod(false)
gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing") gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion") gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion")
@ -520,7 +551,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
defer cleanup() defer cleanup()
sc, pvc, pod := createPod() sc, pvc, pod := createPod(false)
gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing") gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion") gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion")
@ -611,51 +642,41 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node f
_, err = framework.WaitForPVClaimBoundPhase(cs, pvcClaims, framework.ClaimProvisionTimeout) _, err = framework.WaitForPVClaimBoundPhase(cs, pvcClaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
pod := &v1.Pod{ pod, err := startPausePodWithClaim(cs, claim, node, ns)
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-volume-tester-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "volume-tester",
Image: imageutils.GetE2EImage(imageutils.Pause),
VolumeMounts: []v1.VolumeMount{
{
Name: "my-volume",
MountPath: "/mnt/test",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "my-volume",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claim.Name,
ReadOnly: false,
},
},
},
},
},
}
if node.Name != "" {
pod.Spec.NodeName = node.Name
}
if len(node.Selector) != 0 {
pod.Spec.NodeSelector = node.Selector
}
pod, err = cs.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err, "Failed to create pod: %v", err) framework.ExpectNoError(err, "Failed to create pod: %v", err)
return class, claim, pod return class, claim, pod
} }
func startPausePodInline(cs clientset.Interface, t testsuites.StorageClassTest, node framework.NodeSelection, ns string) *v1.Pod {
pod, err := startPausePodWithInlineVolume(cs,
&v1.CSIVolumeSource{
Driver: t.Provisioner,
},
node, ns)
framework.ExpectNoError(err, "Failed to create pod: %v", err)
return pod
}
func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node framework.NodeSelection, ns string) (*v1.Pod, error) { func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node framework.NodeSelection, ns string) (*v1.Pod, error) {
return startPausePodWithVolumeSource(cs,
v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
ReadOnly: false,
},
},
node, ns)
}
func startPausePodWithInlineVolume(cs clientset.Interface, inlineVolume *v1.CSIVolumeSource, node framework.NodeSelection, ns string) (*v1.Pod, error) {
return startPausePodWithVolumeSource(cs,
v1.VolumeSource{
CSI: inlineVolume,
},
node, ns)
}
func startPausePodWithVolumeSource(cs clientset.Interface, volumeSource v1.VolumeSource, node framework.NodeSelection, ns string) (*v1.Pod, error) {
pod := &v1.Pod{ pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-volume-tester-", GenerateName: "pvc-volume-tester-",
@ -676,13 +697,8 @@ func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClai
RestartPolicy: v1.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{ Volumes: []v1.Volume{
{ {
Name: "my-volume", Name: "my-volume",
VolumeSource: v1.VolumeSource{ VolumeSource: volumeSource,
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
ReadOnly: false,
},
},
}, },
}, },
}, },
@ -699,13 +715,18 @@ func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClai
} }
// checkPodInfo tests that NodePublish was called with expected volume_context // checkPodInfo tests that NodePublish was called with expected volume_context
func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContainerName string, pod *v1.Pod, expectPodInfo bool) error { func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContainerName string, pod *v1.Pod, expectPodInfo, ephemeralVolume, csiInlineVolumesEnabled bool) error {
expectedAttributes := map[string]string{ expectedAttributes := map[string]string{
"csi.storage.k8s.io/pod.name": pod.Name, "csi.storage.k8s.io/pod.name": pod.Name,
"csi.storage.k8s.io/pod.namespace": namespace, "csi.storage.k8s.io/pod.namespace": namespace,
"csi.storage.k8s.io/pod.uid": string(pod.UID), "csi.storage.k8s.io/pod.uid": string(pod.UID),
"csi.storage.k8s.io/serviceAccount.name": "default", "csi.storage.k8s.io/serviceAccount.name": "default",
} }
if csiInlineVolumesEnabled {
// This is only passed in 1.15 when the CSIInlineVolume feature gate is set.
expectedAttributes["csi.storage.k8s.io/ephemeral"] = strconv.FormatBool(ephemeralVolume)
}
// Load logs of driver pod // Load logs of driver pod
log, err := e2epod.GetPodLogs(cs, namespace, driverPodName, driverContainerName) log, err := e2epod.GetPodLogs(cs, namespace, driverPodName, driverContainerName)
if err != nil { if err != nil {

View File

@ -23,6 +23,7 @@ import (
"github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -228,3 +229,57 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri
framework.ExpectNoError(err, "failed to create pod") framework.ExpectNoError(err, "failed to create pod")
return pod return pod
} }
// CSIInlineVolumesEnabled checks whether the running cluster has the CSIInlineVolumes feature gate enabled.
// It does that by trying to create a pod that uses that feature.
func CSIInlineVolumesEnabled(c clientset.Interface, ns string) (bool, error) {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "csi-inline-volume-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "csi-volume-tester",
Image: "no-such-registry/no-such-image",
VolumeMounts: []v1.VolumeMount{
{
Name: "my-volume",
MountPath: "/mnt/test",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "my-volume",
VolumeSource: v1.VolumeSource{
CSI: &v1.CSIVolumeSource{
Driver: "no-such-driver.example.com",
},
},
},
},
},
}
pod, err := c.CoreV1().Pods(ns).Create(pod)
switch {
case err == nil:
// Pod was created, feature supported.
StopPod(c, pod)
return true, nil
case errors.IsInvalid(err):
// "Invalid" because it uses a feature that isn't supported.
return false, nil
default:
// Unexpected error.
return false, err
}
}

View File

@ -48,7 +48,9 @@ spec:
- mountPath: /registration - mountPath: /registration
name: registration-dir name: registration-dir
- name: mock - name: mock
image: quay.io/k8scsi/mock-driver:v1.1.1 image: quay.io/k8scsi/mock-driver:v2.1.0
args:
- "--permissive-target-path" # because of https://github.com/kubernetes/kubernetes/issues/75535
env: env:
- name: CSI_ENDPOINT - name: CSI_ENDPOINT
value: /csi/csi.sock value: /csi/csi.sock