Merge pull request #79983 from pohly/persistent-and-ephemeral-csi-volumes

persistent and ephemeral csi volumes
This commit is contained in:
Kubernetes Prow Robot 2019-07-25 16:01:54 -07:00 committed by GitHub
commit a3750501b0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 561 additions and 177 deletions

View File

@ -23,6 +23,7 @@ import (
"os"
"path"
"path/filepath"
"strconv"
"k8s.io/klog"
@ -44,14 +45,14 @@ var (
driverName,
nodeName,
attachmentID,
driverMode string
csiVolumeMode string
}{
"specVolID",
"volumeHandle",
"driverName",
"nodeName",
"attachmentID",
"driverMode",
"csiVolumeMode",
}
)
@ -60,7 +61,7 @@ type csiMountMgr struct {
k8s kubernetes.Interface
plugin *csiPlugin
driverName csiDriverName
driverMode driverMode
csiVolumeMode csiVolumeMode
volumeID string
specVolumeID string
readOnly bool
@ -146,8 +147,8 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) {
return fmt.Errorf("CSIInlineVolume feature required")
}
if c.driverMode != ephemeralDriverMode {
return fmt.Errorf("unexpected driver mode: %s", c.driverMode)
if c.csiVolumeMode != ephemeralVolumeMode {
return fmt.Errorf("unexpected volume mode: %s", c.csiVolumeMode)
}
if volSrc.FSType != nil {
fsType = *volSrc.FSType
@ -161,8 +162,8 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
secretRef = &api.SecretReference{Name: secretName, Namespace: ns}
}
case pvSrc != nil:
if c.driverMode != persistentDriverMode {
return fmt.Errorf("unexpected driver mode: %s", c.driverMode)
if c.csiVolumeMode != persistentVolumeMode {
return fmt.Errorf("unexpected driver mode: %s", c.csiVolumeMode)
}
fsType = pvSrc.FSType
@ -324,6 +325,10 @@ func (c *csiMountMgr) podAttributes() (map[string]string, error) {
"csi.storage.k8s.io/pod.uid": string(c.pod.UID),
"csi.storage.k8s.io/serviceAccount.name": c.pod.Spec.ServiceAccountName,
}
if utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) {
attrs["csi.storage.k8s.io/ephemeral"] = strconv.FormatBool(c.csiVolumeMode == ephemeralVolumeMode)
}
klog.V(4).Infof(log("CSIDriver %q requires pod information", c.driverName))
return attrs, nil
}

View File

@ -99,6 +99,7 @@ func MounterSetUpTests(t *testing.T, podInfoEnabled bool) {
driver string
volumeContext map[string]string
expectedVolumeContext map[string]string
csiInlineVolume bool
}{
{
name: "no pod info",
@ -136,6 +137,13 @@ func MounterSetUpTests(t *testing.T, podInfoEnabled bool) {
volumeContext: map[string]string{"foo": "bar"},
expectedVolumeContext: map[string]string{"foo": "bar", "csi.storage.k8s.io/pod.uid": "test-pod", "csi.storage.k8s.io/serviceAccount.name": "test-service-account", "csi.storage.k8s.io/pod.name": "test-pod", "csi.storage.k8s.io/pod.namespace": "test-ns"},
},
{
name: "CSIInlineVolume pod info",
driver: "info",
volumeContext: nil,
expectedVolumeContext: map[string]string{"csi.storage.k8s.io/pod.uid": "test-pod", "csi.storage.k8s.io/serviceAccount.name": "test-service-account", "csi.storage.k8s.io/pod.name": "test-pod", "csi.storage.k8s.io/pod.namespace": "test-ns", "csi.storage.k8s.io/ephemeral": "false"},
csiInlineVolume: true,
},
}
noPodMountInfo := false
@ -143,6 +151,9 @@ func MounterSetUpTests(t *testing.T, podInfoEnabled bool) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
klog.Infof("Starting test %s", test.name)
if test.csiInlineVolume {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
}
fakeClient := fakeclient.NewSimpleClientset(
getTestCSIDriver("no-info", &noPodMountInfo, nil),
getTestCSIDriver("info", &currentPodInfoMount, nil),
@ -267,7 +278,7 @@ func TestMounterSetUpSimple(t *testing.T) {
testCases := []struct {
name string
podUID types.UID
mode driverMode
mode csiVolumeMode
fsType string
options []string
spec func(string, []string) *volume.Spec
@ -276,7 +287,7 @@ func TestMounterSetUpSimple(t *testing.T) {
{
name: "setup with vol source",
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
mode: ephemeralDriverMode,
mode: ephemeralVolumeMode,
fsType: "ext4",
shouldFail: true,
spec: func(fsType string, options []string) *volume.Spec {
@ -288,7 +299,7 @@ func TestMounterSetUpSimple(t *testing.T) {
{
name: "setup with persistent source",
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
mode: persistentDriverMode,
mode: persistentVolumeMode,
fsType: "zfs",
spec: func(fsType string, options []string) *volume.Spec {
pvSrc := makeTestPV("pv1", 20, testDriver, "vol1")
@ -300,7 +311,7 @@ func TestMounterSetUpSimple(t *testing.T) {
{
name: "setup with persistent source without unspecified fstype and options",
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
mode: persistentDriverMode,
mode: persistentVolumeMode,
spec: func(fsType string, options []string) *volume.Spec {
return volume.NewSpecFromPersistentVolume(makeTestPV("pv1", 20, testDriver, "vol2"), false)
},
@ -334,8 +345,8 @@ func TestMounterSetUpSimple(t *testing.T) {
csiMounter := mounter.(*csiMountMgr)
csiMounter.csiClient = setupClient(t, true)
if csiMounter.driverMode != persistentDriverMode {
t.Fatal("unexpected driver mode: ", csiMounter.driverMode)
if csiMounter.csiVolumeMode != persistentVolumeMode {
t.Fatal("unexpected volume mode: ", csiMounter.csiVolumeMode)
}
attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
@ -393,7 +404,7 @@ func TestMounterSetUpWithInline(t *testing.T) {
testCases := []struct {
name string
podUID types.UID
mode driverMode
mode csiVolumeMode
fsType string
options []string
spec func(string, []string) *volume.Spec
@ -402,7 +413,7 @@ func TestMounterSetUpWithInline(t *testing.T) {
{
name: "setup with vol source",
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
mode: ephemeralDriverMode,
mode: ephemeralVolumeMode,
fsType: "ext4",
spec: func(fsType string, options []string) *volume.Spec {
volSrc := makeTestVol("pv1", testDriver)
@ -413,7 +424,7 @@ func TestMounterSetUpWithInline(t *testing.T) {
{
name: "setup with persistent source",
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
mode: persistentDriverMode,
mode: persistentVolumeMode,
fsType: "zfs",
spec: func(fsType string, options []string) *volume.Spec {
pvSrc := makeTestPV("pv1", 20, testDriver, "vol1")
@ -425,7 +436,7 @@ func TestMounterSetUpWithInline(t *testing.T) {
{
name: "setup with persistent source without unspecified fstype and options",
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
mode: persistentDriverMode,
mode: persistentVolumeMode,
spec: func(fsType string, options []string) *volume.Spec {
return volume.NewSpecFromPersistentVolume(makeTestPV("pv1", 20, testDriver, "vol2"), false)
},
@ -459,15 +470,15 @@ func TestMounterSetUpWithInline(t *testing.T) {
csiMounter := mounter.(*csiMountMgr)
csiMounter.csiClient = setupClient(t, true)
if csiMounter.driverMode != tc.mode {
t.Fatal("unexpected driver mode: ", csiMounter.driverMode)
if csiMounter.csiVolumeMode != tc.mode {
t.Fatal("unexpected volume mode: ", csiMounter.csiVolumeMode)
}
if csiMounter.driverMode == ephemeralDriverMode && csiMounter.volumeID != makeVolumeHandle(string(tc.podUID), csiMounter.specVolumeID) {
if csiMounter.csiVolumeMode == ephemeralVolumeMode && csiMounter.volumeID != makeVolumeHandle(string(tc.podUID), csiMounter.specVolumeID) {
t.Fatal("unexpected generated volumeHandle:", csiMounter.volumeID)
}
if csiMounter.driverMode == persistentDriverMode {
if csiMounter.csiVolumeMode == persistentVolumeMode {
attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
attachment := makeTestAttachment(attachID, "test-node", csiMounter.spec.Name())
_, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(attachment)
@ -492,10 +503,10 @@ func TestMounterSetUpWithInline(t *testing.T) {
}
// validate stagingTargetPath
if tc.mode == ephemeralDriverMode && vol.DeviceMountPath != "" {
if tc.mode == ephemeralVolumeMode && vol.DeviceMountPath != "" {
t.Errorf("unexpected devicePathTarget sent to driver: %s", vol.DeviceMountPath)
}
if tc.mode == persistentDriverMode {
if tc.mode == persistentVolumeMode {
devicePath, err := makeDeviceMountPath(plug, csiMounter.spec)
if err != nil {
t.Fatal(err)

View File

@ -77,6 +77,12 @@ type driverMode string
const persistentDriverMode driverMode = "persistent"
const ephemeralDriverMode driverMode = "ephemeral"
const combinedDriverMode driverMode = "persistent+ephemeral"
type csiVolumeMode string
const persistentVolumeMode csiVolumeMode = "persistent"
const ephemeralVolumeMode csiVolumeMode = "ephemeral"
// ProbeVolumePlugins returns implemented plugins
func ProbeVolumePlugins() []volume.VolumePlugin {
@ -381,11 +387,16 @@ func (p *csiPlugin) NewMounter(
return nil, fmt.Errorf("volume source not found in volume.Spec")
}
driverMode, err := p.getDriverMode(spec)
csiVolumeMode, err := p.getCSIVolumeMode(spec)
if err != nil {
return nil, err
}
// TODO(pohly): check CSIDriver.Spec.Mode to ensure that the CSI driver
// supports the current csiVolumeMode.
// In alpha it is assumed that drivers are used correctly without
// the additional sanity check.
k8s := p.host.GetKubeClient()
if k8s == nil {
klog.Error(log("failed to get a kubernetes client"))
@ -399,17 +410,17 @@ func (p *csiPlugin) NewMounter(
}
mounter := &csiMountMgr{
plugin: p,
k8s: k8s,
spec: spec,
pod: pod,
podUID: pod.UID,
driverName: csiDriverName(driverName),
driverMode: driverMode,
volumeID: volumeHandle,
specVolumeID: spec.Name(),
readOnly: readOnly,
kubeVolHost: kvh,
plugin: p,
k8s: k8s,
spec: spec,
pod: pod,
podUID: pod.UID,
driverName: csiDriverName(driverName),
csiVolumeMode: csiVolumeMode,
volumeID: volumeHandle,
specVolumeID: spec.Name(),
readOnly: readOnly,
kubeVolHost: kvh,
}
mounter.csiClientGetter.driverName = csiDriverName(driverName)
@ -428,11 +439,11 @@ func (p *csiPlugin) NewMounter(
// persist volume info data for teardown
node := string(p.host.GetNodeName())
volData := map[string]string{
volDataKey.specVolID: spec.Name(),
volDataKey.volHandle: volumeHandle,
volDataKey.driverName: driverName,
volDataKey.nodeName: node,
volDataKey.driverMode: string(driverMode),
volDataKey.specVolID: spec.Name(),
volDataKey.volHandle: volumeHandle,
volDataKey.driverName: driverName,
volDataKey.nodeName: node,
volDataKey.csiVolumeMode: string(csiVolumeMode),
}
attachID := getAttachmentName(volumeHandle, driverName, node)
@ -496,16 +507,13 @@ func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.S
var spec *volume.Spec
inlineEnabled := utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume)
// If inlineEnabled is true and mode is ephemeralDriverMode,
// If inlineEnabled is true and mode is ephemeralVolumeMode,
// use constructVolSourceSpec to construct volume source spec.
// If inlineEnabled is false or mode is persistentDriverMode,
// If inlineEnabled is false or mode is persistentVolumeMode,
// use constructPVSourceSpec to construct volume construct pv source spec.
if inlineEnabled {
if driverMode(volData[volDataKey.driverMode]) == ephemeralDriverMode {
spec = p.constructVolSourceSpec(volData[volDataKey.specVolID], volData[volDataKey.driverName])
return spec, nil
}
if inlineEnabled && csiVolumeMode(volData[volDataKey.csiVolumeMode]) == ephemeralVolumeMode {
spec = p.constructVolSourceSpec(volData[volDataKey.specVolID], volData[volDataKey.driverName])
return spec, nil
}
spec = p.constructPVSourceSpec(volData[volDataKey.specVolID], volData[volDataKey.driverName], volData[volDataKey.volHandle])
@ -576,14 +584,17 @@ func (p *csiPlugin) NewDetacher() (volume.Detacher, error) {
}
func (p *csiPlugin) CanAttach(spec *volume.Spec) (bool, error) {
driverMode, err := p.getDriverMode(spec)
if err != nil {
return false, err
}
inlineEnabled := utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume)
if inlineEnabled {
csiVolumeMode, err := p.getCSIVolumeMode(spec)
if err != nil {
return false, err
}
if driverMode == ephemeralDriverMode {
klog.V(5).Info(log("plugin.CanAttach = false, ephemeral mode detected for spec %v", spec.Name()))
return false, nil
if csiVolumeMode == ephemeralVolumeMode {
klog.V(5).Info(log("plugin.CanAttach = false, ephemeral mode detected for spec %v", spec.Name()))
return false, nil
}
}
pvSrc, err := getCSISourceFromSpec(spec)
@ -603,16 +614,23 @@ func (p *csiPlugin) CanAttach(spec *volume.Spec) (bool, error) {
// CanDeviceMount returns true if the spec supports device mount
func (p *csiPlugin) CanDeviceMount(spec *volume.Spec) (bool, error) {
driverMode, err := p.getDriverMode(spec)
inlineEnabled := utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume)
if !inlineEnabled {
// No need to check anything, we assume it is a persistent volume.
return true, nil
}
csiVolumeMode, err := p.getCSIVolumeMode(spec)
if err != nil {
return false, err
}
if driverMode == ephemeralDriverMode {
if csiVolumeMode == ephemeralVolumeMode {
klog.V(5).Info(log("plugin.CanDeviceMount skipped ephemeral mode detected for spec %v", spec.Name()))
return false, nil
}
// Persistent volumes support device mount.
return true, nil
}
@ -783,13 +801,11 @@ func (p *csiPlugin) skipAttach(driver string) (bool, error) {
return false, nil
}
// getDriverMode returns the driver mode for the specified spec: {persistent|ephemeral}.
// getCSIVolumeMode returns the mode for the specified spec: {persistent|ephemeral}.
// 1) If mode cannot be determined, it will default to "persistent".
// 2) If Mode cannot be resolved to either {persistent | ephemeral}, an error is returned
// See https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/20190122-csi-inline-volumes.md
func (p *csiPlugin) getDriverMode(spec *volume.Spec) (driverMode, error) {
// TODO (vladimirvivien) ultimately, mode will be retrieved from CSIDriver.Spec.Mode.
// However, in alpha version, mode is determined by the volume source:
func (p *csiPlugin) getCSIVolumeMode(spec *volume.Spec) (csiVolumeMode, error) {
// 1) if volume.Spec.Volume.CSI != nil -> mode is ephemeral
// 2) if volume.Spec.PersistentVolume.Spec.CSI != nil -> persistent
volSrc, _, err := getSourceFromSpec(spec)
@ -798,9 +814,9 @@ func (p *csiPlugin) getDriverMode(spec *volume.Spec) (driverMode, error) {
}
if volSrc != nil && utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) {
return ephemeralDriverMode, nil
return ephemeralVolumeMode, nil
}
return persistentDriverMode, nil
return persistentVolumeMode, nil
}
func (p *csiPlugin) getPublishContext(client clientset.Interface, handle, driver, nodeName string) (map[string]string, error) {

View File

@ -520,27 +520,27 @@ func TestPluginNewMounter(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
tests := []struct {
name string
spec *volume.Spec
podUID types.UID
namespace string
driverMode driverMode
shouldFail bool
name string
spec *volume.Spec
podUID types.UID
namespace string
csiVolumeMode csiVolumeMode
shouldFail bool
}{
{
name: "mounter from persistent volume source",
spec: volume.NewSpecFromPersistentVolume(makeTestPV("test-pv1", 20, testDriver, testVol), true),
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
namespace: "test-ns1",
driverMode: persistentDriverMode,
name: "mounter from persistent volume source",
spec: volume.NewSpecFromPersistentVolume(makeTestPV("test-pv1", 20, testDriver, testVol), true),
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
namespace: "test-ns1",
csiVolumeMode: persistentVolumeMode,
},
{
name: "mounter from volume source",
spec: volume.NewSpecFromVolume(makeTestVol("test-vol1", testDriver)),
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
namespace: "test-ns2",
driverMode: ephemeralDriverMode,
shouldFail: true, // csi inline not enabled
name: "mounter from volume source",
spec: volume.NewSpecFromVolume(makeTestVol("test-vol1", testDriver)),
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
namespace: "test-ns2",
csiVolumeMode: ephemeralVolumeMode,
shouldFail: true, // csi inline not enabled
},
{
name: "mounter from no spec provided",
@ -590,8 +590,8 @@ func TestPluginNewMounter(t *testing.T) {
if csiClient == nil {
t.Error("mounter csiClient is nil")
}
if csiMounter.driverMode != test.driverMode {
t.Error("unexpected driver mode:", csiMounter.driverMode)
if csiMounter.csiVolumeMode != test.csiVolumeMode {
t.Error("unexpected driver mode:", csiMounter.csiVolumeMode)
}
// ensure data file is created
@ -620,8 +620,8 @@ func TestPluginNewMounter(t *testing.T) {
if data[volDataKey.nodeName] != string(csiMounter.plugin.host.GetNodeName()) {
t.Error("volume data file unexpected nodeName:", data[volDataKey.nodeName])
}
if data[volDataKey.driverMode] != string(test.driverMode) {
t.Error("volume data file unexpected driverMode:", data[volDataKey.driverMode])
if data[volDataKey.csiVolumeMode] != string(test.csiVolumeMode) {
t.Error("volume data file unexpected csiVolumeMode:", data[volDataKey.csiVolumeMode])
}
})
}
@ -631,12 +631,12 @@ func TestPluginNewMounterWithInline(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
tests := []struct {
name string
spec *volume.Spec
podUID types.UID
namespace string
driverMode driverMode
shouldFail bool
name string
spec *volume.Spec
podUID types.UID
namespace string
csiVolumeMode csiVolumeMode
shouldFail bool
}{
{
name: "mounter with missing spec",
@ -652,18 +652,18 @@ func TestPluginNewMounterWithInline(t *testing.T) {
shouldFail: true,
},
{
name: "mounter with persistent volume source",
spec: volume.NewSpecFromPersistentVolume(makeTestPV("test-pv1", 20, testDriver, testVol), true),
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
namespace: "test-ns1",
driverMode: persistentDriverMode,
name: "mounter with persistent volume source",
spec: volume.NewSpecFromPersistentVolume(makeTestPV("test-pv1", 20, testDriver, testVol), true),
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
namespace: "test-ns1",
csiVolumeMode: persistentVolumeMode,
},
{
name: "mounter with volume source",
spec: volume.NewSpecFromVolume(makeTestVol("test-vol1", testDriver)),
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
namespace: "test-ns2",
driverMode: ephemeralDriverMode,
name: "mounter with volume source",
spec: volume.NewSpecFromVolume(makeTestVol("test-vol1", testDriver)),
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
namespace: "test-ns2",
csiVolumeMode: ephemeralVolumeMode,
},
}
@ -709,8 +709,8 @@ func TestPluginNewMounterWithInline(t *testing.T) {
if csiClient == nil {
t.Error("mounter csiClient is nil")
}
if csiMounter.driverMode != test.driverMode {
t.Error("unexpected driver mode:", csiMounter.driverMode)
if csiMounter.csiVolumeMode != test.csiVolumeMode {
t.Error("unexpected driver mode:", csiMounter.csiVolumeMode)
}
// ensure data file is created
@ -739,8 +739,8 @@ func TestPluginNewMounterWithInline(t *testing.T) {
if data[volDataKey.nodeName] != string(csiMounter.plugin.host.GetNodeName()) {
t.Error("volume data file unexpected nodeName:", data[volDataKey.nodeName])
}
if data[volDataKey.driverMode] != string(csiMounter.driverMode) {
t.Error("volume data file unexpected driverMode:", data[volDataKey.driverMode])
if data[volDataKey.csiVolumeMode] != string(csiMounter.csiVolumeMode) {
t.Error("volume data file unexpected csiVolumeMode:", data[volDataKey.csiVolumeMode])
}
})
}

View File

@ -21,6 +21,7 @@ import (
"encoding/json"
"fmt"
"regexp"
"strconv"
"strings"
"time"
@ -131,7 +132,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
}
}
createPod := func() (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
createPod := func(ephemeral bool) (class *storagev1.StorageClass, claim *v1.PersistentVolumeClaim, pod *v1.Pod) {
ginkgo.By("Creating pod")
var sc *storagev1.StorageClass
if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok {
@ -162,17 +163,24 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
Selector: m.nodeLabel,
}
}
class, claim, pod := startPausePod(f.ClientSet, scTest, nodeSelection, f.Namespace.Name)
if class != nil {
m.sc[class.Name] = class
if ephemeral {
pod = startPausePodInline(f.ClientSet, scTest, nodeSelection, f.Namespace.Name)
if pod != nil {
m.pods = append(m.pods, pod)
}
} else {
class, claim, pod = startPausePod(f.ClientSet, scTest, nodeSelection, f.Namespace.Name)
if class != nil {
m.sc[class.Name] = class
}
if claim != nil {
m.pvcs = append(m.pvcs, claim)
}
if pod != nil {
m.pods = append(m.pods, pod)
}
}
if claim != nil {
m.pvcs = append(m.pvcs, claim)
}
if pod != nil {
m.pods = append(m.pods, pod)
}
return class, claim, pod
return // result variables set above
}
createPodWithPVC := func(pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
@ -257,7 +265,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
init(testParameters{registerDriver: test.deployClusterRegistrar, disableAttach: test.disableAttach})
defer cleanup()
_, claim, pod := createPod()
_, claim, pod := createPod(false)
if pod == nil {
return
}
@ -297,29 +305,42 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
podInfoOnMount *bool
deployClusterRegistrar bool
expectPodInfo bool
expectEphemeral bool
}{
{
name: "should not be passed when podInfoOnMount=nil",
podInfoOnMount: nil,
deployClusterRegistrar: true,
expectPodInfo: false,
expectEphemeral: false,
},
{
name: "should be passed when podInfoOnMount=true",
podInfoOnMount: &podInfoTrue,
deployClusterRegistrar: true,
expectPodInfo: true,
expectEphemeral: false,
},
{
// TODO(pohly): remove the feature tag when moving to beta
name: "contain ephemeral=true when using inline volume [Feature:CSIInlineVolume]",
podInfoOnMount: &podInfoTrue,
deployClusterRegistrar: true,
expectPodInfo: true,
expectEphemeral: true,
},
{
name: "should not be passed when podInfoOnMount=false",
podInfoOnMount: &podInfoFalse,
deployClusterRegistrar: true,
expectPodInfo: false,
expectEphemeral: false,
},
{
name: "should not be passed when CSIDriver does not exist",
deployClusterRegistrar: false,
expectPodInfo: false,
expectEphemeral: false,
},
}
for _, t := range tests {
@ -332,17 +353,27 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
defer cleanup()
_, _, pod := createPod()
_, _, pod := createPod(test.expectEphemeral)
if pod == nil {
return
}
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "Failed to start pod: %v", err)
ginkgo.By("Checking CSI driver logs")
// If we expect an ephemeral volume, the feature has to be enabled.
// Otherwise need to check if we expect pod info, because the content
// of that depends on whether the feature is enabled or not.
csiInlineVolumesEnabled := test.expectEphemeral
if test.expectPodInfo {
ginkgo.By("checking for CSIInlineVolumes feature")
csiInlineVolumesEnabled, err = testsuites.CSIInlineVolumesEnabled(m.cs, f.Namespace.Name)
framework.ExpectNoError(err, "failed to test for CSIInlineVolumes")
}
ginkgo.By("Checking CSI driver logs")
// The driver is deployed as a statefulset with stable pod names
driverPodName := "csi-mockplugin-0"
err = checkPodInfo(m.cs, f.Namespace.Name, driverPodName, "mock", pod, test.expectPodInfo)
err = checkPodInfo(m.cs, f.Namespace.Name, driverPodName, "mock", pod, test.expectPodInfo, test.expectEphemeral, csiInlineVolumesEnabled)
framework.ExpectNoError(err)
})
}
@ -364,19 +395,19 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
gomega.Expect(csiNodeAttachLimit).To(gomega.BeNumerically("==", 2))
_, _, pod1 := createPod()
_, _, pod1 := createPod(false)
gomega.Expect(pod1).NotTo(gomega.BeNil(), "while creating first pod")
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod1.Name, pod1.Namespace)
framework.ExpectNoError(err, "Failed to start pod1: %v", err)
_, _, pod2 := createPod()
_, _, pod2 := createPod(false)
gomega.Expect(pod2).NotTo(gomega.BeNil(), "while creating second pod")
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod2.Name, pod2.Namespace)
framework.ExpectNoError(err, "Failed to start pod2: %v", err)
_, _, pod3 := createPod()
_, _, pod3 := createPod(false)
gomega.Expect(pod3).NotTo(gomega.BeNil(), "while creating third pod")
err = waitForMaxVolumeCondition(pod3, m.cs)
framework.ExpectNoError(err, "while waiting for max volume condition on pod : %+v", pod3)
@ -429,7 +460,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
defer cleanup()
ns := f.Namespace.Name
sc, pvc, pod := createPod()
sc, pvc, pod := createPod(false)
gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion")
@ -520,7 +551,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
defer cleanup()
sc, pvc, pod := createPod()
sc, pvc, pod := createPod(false)
gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion")
@ -614,51 +645,41 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node f
_, err = framework.WaitForPVClaimBoundPhase(cs, pvcClaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-volume-tester-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "volume-tester",
Image: imageutils.GetE2EImage(imageutils.Pause),
VolumeMounts: []v1.VolumeMount{
{
Name: "my-volume",
MountPath: "/mnt/test",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "my-volume",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claim.Name,
ReadOnly: false,
},
},
},
},
},
}
if node.Name != "" {
pod.Spec.NodeName = node.Name
}
if len(node.Selector) != 0 {
pod.Spec.NodeSelector = node.Selector
}
pod, err = cs.CoreV1().Pods(ns).Create(pod)
pod, err := startPausePodWithClaim(cs, claim, node, ns)
framework.ExpectNoError(err, "Failed to create pod: %v", err)
return class, claim, pod
}
func startPausePodInline(cs clientset.Interface, t testsuites.StorageClassTest, node framework.NodeSelection, ns string) *v1.Pod {
pod, err := startPausePodWithInlineVolume(cs,
&v1.CSIVolumeSource{
Driver: t.Provisioner,
},
node, ns)
framework.ExpectNoError(err, "Failed to create pod: %v", err)
return pod
}
func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node framework.NodeSelection, ns string) (*v1.Pod, error) {
return startPausePodWithVolumeSource(cs,
v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
ReadOnly: false,
},
},
node, ns)
}
func startPausePodWithInlineVolume(cs clientset.Interface, inlineVolume *v1.CSIVolumeSource, node framework.NodeSelection, ns string) (*v1.Pod, error) {
return startPausePodWithVolumeSource(cs,
v1.VolumeSource{
CSI: inlineVolume,
},
node, ns)
}
func startPausePodWithVolumeSource(cs clientset.Interface, volumeSource v1.VolumeSource, node framework.NodeSelection, ns string) (*v1.Pod, error) {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-volume-tester-",
@ -679,13 +700,8 @@ func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClai
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "my-volume",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
ReadOnly: false,
},
},
Name: "my-volume",
VolumeSource: volumeSource,
},
},
},
@ -702,13 +718,18 @@ func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClai
}
// checkPodInfo tests that NodePublish was called with expected volume_context
func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContainerName string, pod *v1.Pod, expectPodInfo bool) error {
func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContainerName string, pod *v1.Pod, expectPodInfo, ephemeralVolume, csiInlineVolumesEnabled bool) error {
expectedAttributes := map[string]string{
"csi.storage.k8s.io/pod.name": pod.Name,
"csi.storage.k8s.io/pod.namespace": namespace,
"csi.storage.k8s.io/pod.uid": string(pod.UID),
"csi.storage.k8s.io/serviceAccount.name": "default",
}
if csiInlineVolumesEnabled {
// This is only passed in 1.15 when the CSIInlineVolume feature gate is set.
expectedAttributes["csi.storage.k8s.io/ephemeral"] = strconv.FormatBool(ephemeralVolume)
}
// Load logs of driver pod
log, err := e2epod.GetPodLogs(cs, namespace, driverPodName, driverContainerName)
if err != nil {

View File

@ -40,6 +40,7 @@ import (
// List of testSuites to be executed for each external driver.
var csiTestSuites = []func() testsuites.TestSuite{
testsuites.InitEphemeralTestSuite,
testsuites.InitMultiVolumeTestSuite,
testsuites.InitProvisioningTestSuite,
testsuites.InitSnapshottableTestSuite,
@ -128,6 +129,9 @@ var _ testsuites.DynamicPVTestDriver = &driverDefinition{}
// Same for snapshotting.
var _ testsuites.SnapshottableTestDriver = &driverDefinition{}
// And for ephemeral volumes.
var _ testsuites.EphemeralTestDriver = &driverDefinition{}
// runtime.DecodeInto needs a runtime.Object but doesn't do any
// deserialization of it and therefore none of the methods below need
// an implementation.
@ -144,9 +148,6 @@ type driverDefinition struct {
// the default file system are enabled.
DriverInfo testsuites.DriverInfo
// ShortName is used to create unique names for test cases and test resources.
ShortName string
// StorageClass must be set to enable dynamic provisioning tests.
// The default is to not run those tests.
StorageClass struct {
@ -177,6 +178,15 @@ type driverDefinition struct {
// TODO (?): load from file
}
// InlineVolumeAttributes defines one or more set of attributes for
// use as inline ephemeral volumes. At least one set of attributes
// has to be defined to enable testing of inline ephemeral volumes.
// If a test needs more volumes than defined, some of the defined
// volumes will be used multiple times.
//
// DriverInfo.Name is used as name of the driver in the inline volume.
InlineVolumeAttributes []map[string]string
// ClaimSize defines the desired size of dynamically
// provisioned volumes. Default is "5GiB".
ClaimSize string
@ -209,6 +219,8 @@ func (d *driverDefinition) SkipUnsupportedTest(pattern testpatterns.TestPattern)
if d.StorageClass.FromName || d.StorageClass.FromFile != "" {
supported = true
}
case testpatterns.CSIInlineVolume:
supported = len(d.InlineVolumeAttributes) != 0
}
if !supported {
framework.Skipf("Driver %q does not support volume type %q - skipping", d.DriverInfo.Name, pattern.VolType)
@ -281,6 +293,17 @@ func (d *driverDefinition) GetClaimSize() string {
return d.ClaimSize
}
func (d *driverDefinition) GetVolumeAttributes(config *testsuites.PerTestConfig, volumeNumber int) map[string]string {
if len(d.InlineVolumeAttributes) == 0 {
framework.Skipf("%s does not have any InlineVolumeAttributes defined", d.DriverInfo.Name)
}
return d.InlineVolumeAttributes[volumeNumber%len(d.InlineVolumeAttributes)]
}
func (d *driverDefinition) GetCSIDriverName(config *testsuites.PerTestConfig) string {
return d.DriverInfo.Name
}
func (d *driverDefinition) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
config := &testsuites.PerTestConfig{
Driver: d,

View File

@ -33,7 +33,6 @@ func TestDriverParameter(t *testing.T) {
"", // Default fsType
),
},
ShortName: "foo",
ClaimSize: "5Gi",
}
testcases := []struct {

View File

@ -44,6 +44,8 @@ var (
PreprovisionedPV TestVolType = "PreprovisionedPV"
// DynamicPV represents a volume type for dynamic provisioned Persistent Volume
DynamicPV TestVolType = "DynamicPV"
// CSIInlineVolume represents a volume type that is defined inline and provided by a CSI driver.
CSIInlineVolume TestVolType = "CSIInlineVolume"
)
// TestSnapshotType represents a snapshot type to be tested in a TestSuite

View File

@ -5,6 +5,7 @@ go_library(
srcs = [
"base.go",
"driveroperations.go",
"ephemeral.go",
"multivolume.go",
"provisioning.go",
"snapshottable.go",

View File

@ -133,6 +133,8 @@ func skipUnsupportedTest(driver TestDriver, pattern testpatterns.TestPattern) {
_, isSupported = driver.(PreprovisionedPVTestDriver)
case testpatterns.DynamicPV:
_, isSupported = driver.(DynamicPVTestDriver)
case testpatterns.CSIInlineVolume:
_, isSupported = driver.(EphemeralTestDriver)
default:
isSupported = false
}

View File

@ -0,0 +1,285 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testsuites
import (
"fmt"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
type ephemeralTestSuite struct {
tsInfo TestSuiteInfo
}
var _ TestSuite = &ephemeralTestSuite{}
// InitEphemeralTestSuite returns ephemeralTestSuite that implements TestSuite interface
func InitEphemeralTestSuite() TestSuite {
return &ephemeralTestSuite{
tsInfo: TestSuiteInfo{
name: "ephemeral [Feature:CSIInlineVolume]",
testPatterns: []testpatterns.TestPattern{
{
Name: "inline ephemeral CSI volume",
VolType: testpatterns.CSIInlineVolume,
},
},
},
}
}
func (p *ephemeralTestSuite) getTestSuiteInfo() TestSuiteInfo {
return p.tsInfo
}
func (p *ephemeralTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
type local struct {
config *PerTestConfig
testCleanup func()
testCase *EphemeralTest
}
var (
dInfo = driver.GetDriverInfo()
eDriver EphemeralTestDriver
l local
)
ginkgo.BeforeEach(func() {
ok := false
eDriver, ok = driver.(EphemeralTestDriver)
if !ok {
framework.Skipf("Driver %s doesn't support ephemeral inline volumes -- skipping", dInfo.Name)
}
})
// This intentionally comes after checking the preconditions because it
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("ephemeral")
init := func() {
l = local{}
// Now do the more expensive test initialization.
l.config, l.testCleanup = driver.PrepareTest(f)
l.testCase = &EphemeralTest{
Client: l.config.Framework.ClientSet,
Namespace: f.Namespace.Name,
DriverName: eDriver.GetCSIDriverName(l.config),
Node: framework.NodeSelection{Name: l.config.ClientNodeName},
GetVolumeAttributes: func(volumeNumber int) map[string]string {
return eDriver.GetVolumeAttributes(l.config, volumeNumber)
},
}
}
cleanup := func() {
if l.testCleanup != nil {
l.testCleanup()
l.testCleanup = nil
}
}
ginkgo.It("should create inline ephemeral volume", func() {
init()
defer cleanup()
l.testCase.TestEphemeral()
})
}
// EphemeralTest represents parameters to be used by tests for inline volumes.
// Not all parameters are used by all tests.
type EphemeralTest struct {
Client clientset.Interface
Namespace string
DriverName string
Node framework.NodeSelection
// GetVolumeAttributes returns the volume attributes for a
// certain inline ephemeral volume, enumerated starting with
// #0. Some tests might require more than one volume. They can
// all be the same or different, depending what the driver supports
// and/or wants to test.
GetVolumeAttributes func(volumeNumber int) map[string]string
// RunningPodCheck is invoked while a pod using an inline volume is running.
// It can execute additional checks on the pod and its volume(s). Any data
// returned by it is passed to StoppedPodCheck.
RunningPodCheck func(pod *v1.Pod) interface{}
// StoppedPodCheck is invoked after ensuring that the pod is gone.
// It is passed the data gather by RunningPodCheck or nil if that
// isn't defined and then can do additional checks on the node,
// like for example verifying that the ephemeral volume was really
// removed. How to do such a check is driver-specific and not
// covered by the generic storage test suite.
StoppedPodCheck func(nodeName string, runningPodData interface{})
}
// TestEphemeral tests pod creation with one ephemeral volume.
func (t EphemeralTest) TestEphemeral() {
client := t.Client
gomega.Expect(client).NotTo(gomega.BeNil(), "EphemeralTest.Client is required")
gomega.Expect(t.GetVolumeAttributes).NotTo(gomega.BeNil(), "EphemeralTest.GetVolumeAttributes is required")
gomega.Expect(t.DriverName).NotTo(gomega.BeEmpty(), "EphemeralTest.DriverName is required")
ginkgo.By(fmt.Sprintf("checking the requested inline volume exists in the pod running on node %+v", t.Node))
command := "mount | grep /mnt/test"
pod := StartInPodWithInlineVolume(client, t.Namespace, "inline-volume-tester", command,
v1.CSIVolumeSource{
Driver: t.DriverName,
VolumeAttributes: t.GetVolumeAttributes(0),
},
t.Node)
defer func() {
// pod might be nil now.
StopPod(client, pod)
}()
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace), "waiting for pod with inline volume")
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod")
actualNodeName := runningPod.Spec.NodeName
// Run the checker of the running pod.
var runningPodData interface{}
if t.RunningPodCheck != nil {
runningPodData = t.RunningPodCheck(pod)
}
StopPod(client, pod)
pod = nil // Don't stop twice.
if t.StoppedPodCheck != nil {
t.StoppedPodCheck(actualNodeName, runningPodData)
}
}
// StartInPodWithInlineVolume starts a command in a pod with given volume mounted to /mnt/test directory.
// The caller is responsible for checking the pod and deleting it.
func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command string, csiVolume v1.CSIVolumeSource, node framework.NodeSelection) *v1.Pod {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: podName + "-",
Labels: map[string]string{
"app": podName,
},
},
Spec: v1.PodSpec{
NodeName: node.Name,
NodeSelector: node.Selector,
Affinity: node.Affinity,
Containers: []v1.Container{
{
Name: "csi-volume-tester",
Image: volume.GetTestImage(framework.BusyBoxImage),
Command: volume.GenerateScriptCmd(command),
VolumeMounts: []v1.VolumeMount{
{
Name: "my-volume",
MountPath: "/mnt/test",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "my-volume",
VolumeSource: v1.VolumeSource{
CSI: &csiVolume,
},
},
},
},
}
pod, err := c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err, "failed to create pod")
return pod
}
// CSIInlineVolumesEnabled checks whether the running cluster has the CSIInlineVolumes feature gate enabled.
// It does that by trying to create a pod that uses that feature.
func CSIInlineVolumesEnabled(c clientset.Interface, ns string) (bool, error) {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "csi-inline-volume-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "csi-volume-tester",
Image: "no-such-registry/no-such-image",
VolumeMounts: []v1.VolumeMount{
{
Name: "my-volume",
MountPath: "/mnt/test",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "my-volume",
VolumeSource: v1.VolumeSource{
CSI: &v1.CSIVolumeSource{
Driver: "no-such-driver.example.com",
},
},
},
},
},
}
pod, err := c.CoreV1().Pods(ns).Create(pod)
switch {
case err == nil:
// Pod was created, feature supported.
StopPod(c, pod)
return true, nil
case errors.IsInvalid(err):
// "Invalid" because it uses a feature that isn't supported.
return false, nil
default:
// Unexpected error.
return false, err
}
}

View File

@ -101,6 +101,23 @@ type DynamicPVTestDriver interface {
GetClaimSize() string
}
// EphemeralTestDriver represents an interface for a TestDriver that supports ephemeral inline volumes.
type EphemeralTestDriver interface {
TestDriver
// GetVolumeAttributes returns the volume attributes for a
// certain inline ephemeral volume, enumerated starting with
// #0. Some tests might require more than one volume. They can
// all be the same or different, depending what the driver supports
// and/or wants to test.
GetVolumeAttributes(config *PerTestConfig, volumeNumber int) map[string]string
// GetCSIDriverName returns the name that was used when registering with
// kubelet. Depending on how the driver was deployed, this can be different
// from DriverInfo.Name.
GetCSIDriverName(config *PerTestConfig) string
}
// SnapshottableTestDriver represents an interface for a TestDriver that supports DynamicSnapshot
type SnapshottableTestDriver interface {
TestDriver

View File

@ -48,7 +48,9 @@ spec:
- mountPath: /registration
name: registration-dir
- name: mock
image: quay.io/k8scsi/mock-driver:v1.1.1
image: quay.io/k8scsi/mock-driver:v2.1.0
args:
- "--permissive-target-path" # because of https://github.com/kubernetes/kubernetes/issues/75535
env:
- name: CSI_ENDPOINT
value: /csi/csi.sock