mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #79983 from pohly/persistent-and-ephemeral-csi-volumes
persistent and ephemeral csi volumes
This commit is contained in:
commit
a3750501b0
@ -23,6 +23,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
|
|
||||||
@ -44,14 +45,14 @@ var (
|
|||||||
driverName,
|
driverName,
|
||||||
nodeName,
|
nodeName,
|
||||||
attachmentID,
|
attachmentID,
|
||||||
driverMode string
|
csiVolumeMode string
|
||||||
}{
|
}{
|
||||||
"specVolID",
|
"specVolID",
|
||||||
"volumeHandle",
|
"volumeHandle",
|
||||||
"driverName",
|
"driverName",
|
||||||
"nodeName",
|
"nodeName",
|
||||||
"attachmentID",
|
"attachmentID",
|
||||||
"driverMode",
|
"csiVolumeMode",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -60,7 +61,7 @@ type csiMountMgr struct {
|
|||||||
k8s kubernetes.Interface
|
k8s kubernetes.Interface
|
||||||
plugin *csiPlugin
|
plugin *csiPlugin
|
||||||
driverName csiDriverName
|
driverName csiDriverName
|
||||||
driverMode driverMode
|
csiVolumeMode csiVolumeMode
|
||||||
volumeID string
|
volumeID string
|
||||||
specVolumeID string
|
specVolumeID string
|
||||||
readOnly bool
|
readOnly bool
|
||||||
@ -146,8 +147,8 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
|
|||||||
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) {
|
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) {
|
||||||
return fmt.Errorf("CSIInlineVolume feature required")
|
return fmt.Errorf("CSIInlineVolume feature required")
|
||||||
}
|
}
|
||||||
if c.driverMode != ephemeralDriverMode {
|
if c.csiVolumeMode != ephemeralVolumeMode {
|
||||||
return fmt.Errorf("unexpected driver mode: %s", c.driverMode)
|
return fmt.Errorf("unexpected volume mode: %s", c.csiVolumeMode)
|
||||||
}
|
}
|
||||||
if volSrc.FSType != nil {
|
if volSrc.FSType != nil {
|
||||||
fsType = *volSrc.FSType
|
fsType = *volSrc.FSType
|
||||||
@ -161,8 +162,8 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
|
|||||||
secretRef = &api.SecretReference{Name: secretName, Namespace: ns}
|
secretRef = &api.SecretReference{Name: secretName, Namespace: ns}
|
||||||
}
|
}
|
||||||
case pvSrc != nil:
|
case pvSrc != nil:
|
||||||
if c.driverMode != persistentDriverMode {
|
if c.csiVolumeMode != persistentVolumeMode {
|
||||||
return fmt.Errorf("unexpected driver mode: %s", c.driverMode)
|
return fmt.Errorf("unexpected driver mode: %s", c.csiVolumeMode)
|
||||||
}
|
}
|
||||||
|
|
||||||
fsType = pvSrc.FSType
|
fsType = pvSrc.FSType
|
||||||
@ -324,6 +325,10 @@ func (c *csiMountMgr) podAttributes() (map[string]string, error) {
|
|||||||
"csi.storage.k8s.io/pod.uid": string(c.pod.UID),
|
"csi.storage.k8s.io/pod.uid": string(c.pod.UID),
|
||||||
"csi.storage.k8s.io/serviceAccount.name": c.pod.Spec.ServiceAccountName,
|
"csi.storage.k8s.io/serviceAccount.name": c.pod.Spec.ServiceAccountName,
|
||||||
}
|
}
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) {
|
||||||
|
attrs["csi.storage.k8s.io/ephemeral"] = strconv.FormatBool(c.csiVolumeMode == ephemeralVolumeMode)
|
||||||
|
}
|
||||||
|
|
||||||
klog.V(4).Infof(log("CSIDriver %q requires pod information", c.driverName))
|
klog.V(4).Infof(log("CSIDriver %q requires pod information", c.driverName))
|
||||||
return attrs, nil
|
return attrs, nil
|
||||||
}
|
}
|
||||||
|
@ -99,6 +99,7 @@ func MounterSetUpTests(t *testing.T, podInfoEnabled bool) {
|
|||||||
driver string
|
driver string
|
||||||
volumeContext map[string]string
|
volumeContext map[string]string
|
||||||
expectedVolumeContext map[string]string
|
expectedVolumeContext map[string]string
|
||||||
|
csiInlineVolume bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "no pod info",
|
name: "no pod info",
|
||||||
@ -136,6 +137,13 @@ func MounterSetUpTests(t *testing.T, podInfoEnabled bool) {
|
|||||||
volumeContext: map[string]string{"foo": "bar"},
|
volumeContext: map[string]string{"foo": "bar"},
|
||||||
expectedVolumeContext: map[string]string{"foo": "bar", "csi.storage.k8s.io/pod.uid": "test-pod", "csi.storage.k8s.io/serviceAccount.name": "test-service-account", "csi.storage.k8s.io/pod.name": "test-pod", "csi.storage.k8s.io/pod.namespace": "test-ns"},
|
expectedVolumeContext: map[string]string{"foo": "bar", "csi.storage.k8s.io/pod.uid": "test-pod", "csi.storage.k8s.io/serviceAccount.name": "test-service-account", "csi.storage.k8s.io/pod.name": "test-pod", "csi.storage.k8s.io/pod.namespace": "test-ns"},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "CSIInlineVolume pod info",
|
||||||
|
driver: "info",
|
||||||
|
volumeContext: nil,
|
||||||
|
expectedVolumeContext: map[string]string{"csi.storage.k8s.io/pod.uid": "test-pod", "csi.storage.k8s.io/serviceAccount.name": "test-service-account", "csi.storage.k8s.io/pod.name": "test-pod", "csi.storage.k8s.io/pod.namespace": "test-ns", "csi.storage.k8s.io/ephemeral": "false"},
|
||||||
|
csiInlineVolume: true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
noPodMountInfo := false
|
noPodMountInfo := false
|
||||||
@ -143,6 +151,9 @@ func MounterSetUpTests(t *testing.T, podInfoEnabled bool) {
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
klog.Infof("Starting test %s", test.name)
|
klog.Infof("Starting test %s", test.name)
|
||||||
|
if test.csiInlineVolume {
|
||||||
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
||||||
|
}
|
||||||
fakeClient := fakeclient.NewSimpleClientset(
|
fakeClient := fakeclient.NewSimpleClientset(
|
||||||
getTestCSIDriver("no-info", &noPodMountInfo, nil),
|
getTestCSIDriver("no-info", &noPodMountInfo, nil),
|
||||||
getTestCSIDriver("info", ¤tPodInfoMount, nil),
|
getTestCSIDriver("info", ¤tPodInfoMount, nil),
|
||||||
@ -267,7 +278,7 @@ func TestMounterSetUpSimple(t *testing.T) {
|
|||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
podUID types.UID
|
podUID types.UID
|
||||||
mode driverMode
|
mode csiVolumeMode
|
||||||
fsType string
|
fsType string
|
||||||
options []string
|
options []string
|
||||||
spec func(string, []string) *volume.Spec
|
spec func(string, []string) *volume.Spec
|
||||||
@ -276,7 +287,7 @@ func TestMounterSetUpSimple(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "setup with vol source",
|
name: "setup with vol source",
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
mode: ephemeralDriverMode,
|
mode: ephemeralVolumeMode,
|
||||||
fsType: "ext4",
|
fsType: "ext4",
|
||||||
shouldFail: true,
|
shouldFail: true,
|
||||||
spec: func(fsType string, options []string) *volume.Spec {
|
spec: func(fsType string, options []string) *volume.Spec {
|
||||||
@ -288,7 +299,7 @@ func TestMounterSetUpSimple(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "setup with persistent source",
|
name: "setup with persistent source",
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
mode: persistentDriverMode,
|
mode: persistentVolumeMode,
|
||||||
fsType: "zfs",
|
fsType: "zfs",
|
||||||
spec: func(fsType string, options []string) *volume.Spec {
|
spec: func(fsType string, options []string) *volume.Spec {
|
||||||
pvSrc := makeTestPV("pv1", 20, testDriver, "vol1")
|
pvSrc := makeTestPV("pv1", 20, testDriver, "vol1")
|
||||||
@ -300,7 +311,7 @@ func TestMounterSetUpSimple(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "setup with persistent source without unspecified fstype and options",
|
name: "setup with persistent source without unspecified fstype and options",
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
mode: persistentDriverMode,
|
mode: persistentVolumeMode,
|
||||||
spec: func(fsType string, options []string) *volume.Spec {
|
spec: func(fsType string, options []string) *volume.Spec {
|
||||||
return volume.NewSpecFromPersistentVolume(makeTestPV("pv1", 20, testDriver, "vol2"), false)
|
return volume.NewSpecFromPersistentVolume(makeTestPV("pv1", 20, testDriver, "vol2"), false)
|
||||||
},
|
},
|
||||||
@ -334,8 +345,8 @@ func TestMounterSetUpSimple(t *testing.T) {
|
|||||||
csiMounter := mounter.(*csiMountMgr)
|
csiMounter := mounter.(*csiMountMgr)
|
||||||
csiMounter.csiClient = setupClient(t, true)
|
csiMounter.csiClient = setupClient(t, true)
|
||||||
|
|
||||||
if csiMounter.driverMode != persistentDriverMode {
|
if csiMounter.csiVolumeMode != persistentVolumeMode {
|
||||||
t.Fatal("unexpected driver mode: ", csiMounter.driverMode)
|
t.Fatal("unexpected volume mode: ", csiMounter.csiVolumeMode)
|
||||||
}
|
}
|
||||||
|
|
||||||
attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
|
attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
|
||||||
@ -393,7 +404,7 @@ func TestMounterSetUpWithInline(t *testing.T) {
|
|||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
podUID types.UID
|
podUID types.UID
|
||||||
mode driverMode
|
mode csiVolumeMode
|
||||||
fsType string
|
fsType string
|
||||||
options []string
|
options []string
|
||||||
spec func(string, []string) *volume.Spec
|
spec func(string, []string) *volume.Spec
|
||||||
@ -402,7 +413,7 @@ func TestMounterSetUpWithInline(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "setup with vol source",
|
name: "setup with vol source",
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
mode: ephemeralDriverMode,
|
mode: ephemeralVolumeMode,
|
||||||
fsType: "ext4",
|
fsType: "ext4",
|
||||||
spec: func(fsType string, options []string) *volume.Spec {
|
spec: func(fsType string, options []string) *volume.Spec {
|
||||||
volSrc := makeTestVol("pv1", testDriver)
|
volSrc := makeTestVol("pv1", testDriver)
|
||||||
@ -413,7 +424,7 @@ func TestMounterSetUpWithInline(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "setup with persistent source",
|
name: "setup with persistent source",
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
mode: persistentDriverMode,
|
mode: persistentVolumeMode,
|
||||||
fsType: "zfs",
|
fsType: "zfs",
|
||||||
spec: func(fsType string, options []string) *volume.Spec {
|
spec: func(fsType string, options []string) *volume.Spec {
|
||||||
pvSrc := makeTestPV("pv1", 20, testDriver, "vol1")
|
pvSrc := makeTestPV("pv1", 20, testDriver, "vol1")
|
||||||
@ -425,7 +436,7 @@ func TestMounterSetUpWithInline(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "setup with persistent source without unspecified fstype and options",
|
name: "setup with persistent source without unspecified fstype and options",
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
mode: persistentDriverMode,
|
mode: persistentVolumeMode,
|
||||||
spec: func(fsType string, options []string) *volume.Spec {
|
spec: func(fsType string, options []string) *volume.Spec {
|
||||||
return volume.NewSpecFromPersistentVolume(makeTestPV("pv1", 20, testDriver, "vol2"), false)
|
return volume.NewSpecFromPersistentVolume(makeTestPV("pv1", 20, testDriver, "vol2"), false)
|
||||||
},
|
},
|
||||||
@ -459,15 +470,15 @@ func TestMounterSetUpWithInline(t *testing.T) {
|
|||||||
csiMounter := mounter.(*csiMountMgr)
|
csiMounter := mounter.(*csiMountMgr)
|
||||||
csiMounter.csiClient = setupClient(t, true)
|
csiMounter.csiClient = setupClient(t, true)
|
||||||
|
|
||||||
if csiMounter.driverMode != tc.mode {
|
if csiMounter.csiVolumeMode != tc.mode {
|
||||||
t.Fatal("unexpected driver mode: ", csiMounter.driverMode)
|
t.Fatal("unexpected volume mode: ", csiMounter.csiVolumeMode)
|
||||||
}
|
}
|
||||||
|
|
||||||
if csiMounter.driverMode == ephemeralDriverMode && csiMounter.volumeID != makeVolumeHandle(string(tc.podUID), csiMounter.specVolumeID) {
|
if csiMounter.csiVolumeMode == ephemeralVolumeMode && csiMounter.volumeID != makeVolumeHandle(string(tc.podUID), csiMounter.specVolumeID) {
|
||||||
t.Fatal("unexpected generated volumeHandle:", csiMounter.volumeID)
|
t.Fatal("unexpected generated volumeHandle:", csiMounter.volumeID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if csiMounter.driverMode == persistentDriverMode {
|
if csiMounter.csiVolumeMode == persistentVolumeMode {
|
||||||
attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
|
attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
|
||||||
attachment := makeTestAttachment(attachID, "test-node", csiMounter.spec.Name())
|
attachment := makeTestAttachment(attachID, "test-node", csiMounter.spec.Name())
|
||||||
_, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(attachment)
|
_, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(attachment)
|
||||||
@ -492,10 +503,10 @@ func TestMounterSetUpWithInline(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// validate stagingTargetPath
|
// validate stagingTargetPath
|
||||||
if tc.mode == ephemeralDriverMode && vol.DeviceMountPath != "" {
|
if tc.mode == ephemeralVolumeMode && vol.DeviceMountPath != "" {
|
||||||
t.Errorf("unexpected devicePathTarget sent to driver: %s", vol.DeviceMountPath)
|
t.Errorf("unexpected devicePathTarget sent to driver: %s", vol.DeviceMountPath)
|
||||||
}
|
}
|
||||||
if tc.mode == persistentDriverMode {
|
if tc.mode == persistentVolumeMode {
|
||||||
devicePath, err := makeDeviceMountPath(plug, csiMounter.spec)
|
devicePath, err := makeDeviceMountPath(plug, csiMounter.spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -77,6 +77,12 @@ type driverMode string
|
|||||||
|
|
||||||
const persistentDriverMode driverMode = "persistent"
|
const persistentDriverMode driverMode = "persistent"
|
||||||
const ephemeralDriverMode driverMode = "ephemeral"
|
const ephemeralDriverMode driverMode = "ephemeral"
|
||||||
|
const combinedDriverMode driverMode = "persistent+ephemeral"
|
||||||
|
|
||||||
|
type csiVolumeMode string
|
||||||
|
|
||||||
|
const persistentVolumeMode csiVolumeMode = "persistent"
|
||||||
|
const ephemeralVolumeMode csiVolumeMode = "ephemeral"
|
||||||
|
|
||||||
// ProbeVolumePlugins returns implemented plugins
|
// ProbeVolumePlugins returns implemented plugins
|
||||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||||
@ -381,11 +387,16 @@ func (p *csiPlugin) NewMounter(
|
|||||||
return nil, fmt.Errorf("volume source not found in volume.Spec")
|
return nil, fmt.Errorf("volume source not found in volume.Spec")
|
||||||
}
|
}
|
||||||
|
|
||||||
driverMode, err := p.getDriverMode(spec)
|
csiVolumeMode, err := p.getCSIVolumeMode(spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(pohly): check CSIDriver.Spec.Mode to ensure that the CSI driver
|
||||||
|
// supports the current csiVolumeMode.
|
||||||
|
// In alpha it is assumed that drivers are used correctly without
|
||||||
|
// the additional sanity check.
|
||||||
|
|
||||||
k8s := p.host.GetKubeClient()
|
k8s := p.host.GetKubeClient()
|
||||||
if k8s == nil {
|
if k8s == nil {
|
||||||
klog.Error(log("failed to get a kubernetes client"))
|
klog.Error(log("failed to get a kubernetes client"))
|
||||||
@ -399,17 +410,17 @@ func (p *csiPlugin) NewMounter(
|
|||||||
}
|
}
|
||||||
|
|
||||||
mounter := &csiMountMgr{
|
mounter := &csiMountMgr{
|
||||||
plugin: p,
|
plugin: p,
|
||||||
k8s: k8s,
|
k8s: k8s,
|
||||||
spec: spec,
|
spec: spec,
|
||||||
pod: pod,
|
pod: pod,
|
||||||
podUID: pod.UID,
|
podUID: pod.UID,
|
||||||
driverName: csiDriverName(driverName),
|
driverName: csiDriverName(driverName),
|
||||||
driverMode: driverMode,
|
csiVolumeMode: csiVolumeMode,
|
||||||
volumeID: volumeHandle,
|
volumeID: volumeHandle,
|
||||||
specVolumeID: spec.Name(),
|
specVolumeID: spec.Name(),
|
||||||
readOnly: readOnly,
|
readOnly: readOnly,
|
||||||
kubeVolHost: kvh,
|
kubeVolHost: kvh,
|
||||||
}
|
}
|
||||||
mounter.csiClientGetter.driverName = csiDriverName(driverName)
|
mounter.csiClientGetter.driverName = csiDriverName(driverName)
|
||||||
|
|
||||||
@ -428,11 +439,11 @@ func (p *csiPlugin) NewMounter(
|
|||||||
// persist volume info data for teardown
|
// persist volume info data for teardown
|
||||||
node := string(p.host.GetNodeName())
|
node := string(p.host.GetNodeName())
|
||||||
volData := map[string]string{
|
volData := map[string]string{
|
||||||
volDataKey.specVolID: spec.Name(),
|
volDataKey.specVolID: spec.Name(),
|
||||||
volDataKey.volHandle: volumeHandle,
|
volDataKey.volHandle: volumeHandle,
|
||||||
volDataKey.driverName: driverName,
|
volDataKey.driverName: driverName,
|
||||||
volDataKey.nodeName: node,
|
volDataKey.nodeName: node,
|
||||||
volDataKey.driverMode: string(driverMode),
|
volDataKey.csiVolumeMode: string(csiVolumeMode),
|
||||||
}
|
}
|
||||||
|
|
||||||
attachID := getAttachmentName(volumeHandle, driverName, node)
|
attachID := getAttachmentName(volumeHandle, driverName, node)
|
||||||
@ -496,16 +507,13 @@ func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.S
|
|||||||
var spec *volume.Spec
|
var spec *volume.Spec
|
||||||
inlineEnabled := utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume)
|
inlineEnabled := utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume)
|
||||||
|
|
||||||
// If inlineEnabled is true and mode is ephemeralDriverMode,
|
// If inlineEnabled is true and mode is ephemeralVolumeMode,
|
||||||
// use constructVolSourceSpec to construct volume source spec.
|
// use constructVolSourceSpec to construct volume source spec.
|
||||||
// If inlineEnabled is false or mode is persistentDriverMode,
|
// If inlineEnabled is false or mode is persistentVolumeMode,
|
||||||
// use constructPVSourceSpec to construct volume construct pv source spec.
|
// use constructPVSourceSpec to construct volume construct pv source spec.
|
||||||
if inlineEnabled {
|
if inlineEnabled && csiVolumeMode(volData[volDataKey.csiVolumeMode]) == ephemeralVolumeMode {
|
||||||
if driverMode(volData[volDataKey.driverMode]) == ephemeralDriverMode {
|
spec = p.constructVolSourceSpec(volData[volDataKey.specVolID], volData[volDataKey.driverName])
|
||||||
spec = p.constructVolSourceSpec(volData[volDataKey.specVolID], volData[volDataKey.driverName])
|
return spec, nil
|
||||||
return spec, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
spec = p.constructPVSourceSpec(volData[volDataKey.specVolID], volData[volDataKey.driverName], volData[volDataKey.volHandle])
|
spec = p.constructPVSourceSpec(volData[volDataKey.specVolID], volData[volDataKey.driverName], volData[volDataKey.volHandle])
|
||||||
|
|
||||||
@ -576,14 +584,17 @@ func (p *csiPlugin) NewDetacher() (volume.Detacher, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *csiPlugin) CanAttach(spec *volume.Spec) (bool, error) {
|
func (p *csiPlugin) CanAttach(spec *volume.Spec) (bool, error) {
|
||||||
driverMode, err := p.getDriverMode(spec)
|
inlineEnabled := utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume)
|
||||||
if err != nil {
|
if inlineEnabled {
|
||||||
return false, err
|
csiVolumeMode, err := p.getCSIVolumeMode(spec)
|
||||||
}
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
if driverMode == ephemeralDriverMode {
|
if csiVolumeMode == ephemeralVolumeMode {
|
||||||
klog.V(5).Info(log("plugin.CanAttach = false, ephemeral mode detected for spec %v", spec.Name()))
|
klog.V(5).Info(log("plugin.CanAttach = false, ephemeral mode detected for spec %v", spec.Name()))
|
||||||
return false, nil
|
return false, nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pvSrc, err := getCSISourceFromSpec(spec)
|
pvSrc, err := getCSISourceFromSpec(spec)
|
||||||
@ -603,16 +614,23 @@ func (p *csiPlugin) CanAttach(spec *volume.Spec) (bool, error) {
|
|||||||
|
|
||||||
// CanDeviceMount returns true if the spec supports device mount
|
// CanDeviceMount returns true if the spec supports device mount
|
||||||
func (p *csiPlugin) CanDeviceMount(spec *volume.Spec) (bool, error) {
|
func (p *csiPlugin) CanDeviceMount(spec *volume.Spec) (bool, error) {
|
||||||
driverMode, err := p.getDriverMode(spec)
|
inlineEnabled := utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume)
|
||||||
|
if !inlineEnabled {
|
||||||
|
// No need to check anything, we assume it is a persistent volume.
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
csiVolumeMode, err := p.getCSIVolumeMode(spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if driverMode == ephemeralDriverMode {
|
if csiVolumeMode == ephemeralVolumeMode {
|
||||||
klog.V(5).Info(log("plugin.CanDeviceMount skipped ephemeral mode detected for spec %v", spec.Name()))
|
klog.V(5).Info(log("plugin.CanDeviceMount skipped ephemeral mode detected for spec %v", spec.Name()))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Persistent volumes support device mount.
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -783,13 +801,11 @@ func (p *csiPlugin) skipAttach(driver string) (bool, error) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getDriverMode returns the driver mode for the specified spec: {persistent|ephemeral}.
|
// getCSIVolumeMode returns the mode for the specified spec: {persistent|ephemeral}.
|
||||||
// 1) If mode cannot be determined, it will default to "persistent".
|
// 1) If mode cannot be determined, it will default to "persistent".
|
||||||
// 2) If Mode cannot be resolved to either {persistent | ephemeral}, an error is returned
|
// 2) If Mode cannot be resolved to either {persistent | ephemeral}, an error is returned
|
||||||
// See https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/20190122-csi-inline-volumes.md
|
// See https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/20190122-csi-inline-volumes.md
|
||||||
func (p *csiPlugin) getDriverMode(spec *volume.Spec) (driverMode, error) {
|
func (p *csiPlugin) getCSIVolumeMode(spec *volume.Spec) (csiVolumeMode, error) {
|
||||||
// TODO (vladimirvivien) ultimately, mode will be retrieved from CSIDriver.Spec.Mode.
|
|
||||||
// However, in alpha version, mode is determined by the volume source:
|
|
||||||
// 1) if volume.Spec.Volume.CSI != nil -> mode is ephemeral
|
// 1) if volume.Spec.Volume.CSI != nil -> mode is ephemeral
|
||||||
// 2) if volume.Spec.PersistentVolume.Spec.CSI != nil -> persistent
|
// 2) if volume.Spec.PersistentVolume.Spec.CSI != nil -> persistent
|
||||||
volSrc, _, err := getSourceFromSpec(spec)
|
volSrc, _, err := getSourceFromSpec(spec)
|
||||||
@ -798,9 +814,9 @@ func (p *csiPlugin) getDriverMode(spec *volume.Spec) (driverMode, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if volSrc != nil && utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) {
|
if volSrc != nil && utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) {
|
||||||
return ephemeralDriverMode, nil
|
return ephemeralVolumeMode, nil
|
||||||
}
|
}
|
||||||
return persistentDriverMode, nil
|
return persistentVolumeMode, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *csiPlugin) getPublishContext(client clientset.Interface, handle, driver, nodeName string) (map[string]string, error) {
|
func (p *csiPlugin) getPublishContext(client clientset.Interface, handle, driver, nodeName string) (map[string]string, error) {
|
||||||
|
@ -520,27 +520,27 @@ func TestPluginNewMounter(t *testing.T) {
|
|||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
spec *volume.Spec
|
spec *volume.Spec
|
||||||
podUID types.UID
|
podUID types.UID
|
||||||
namespace string
|
namespace string
|
||||||
driverMode driverMode
|
csiVolumeMode csiVolumeMode
|
||||||
shouldFail bool
|
shouldFail bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "mounter from persistent volume source",
|
name: "mounter from persistent volume source",
|
||||||
spec: volume.NewSpecFromPersistentVolume(makeTestPV("test-pv1", 20, testDriver, testVol), true),
|
spec: volume.NewSpecFromPersistentVolume(makeTestPV("test-pv1", 20, testDriver, testVol), true),
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
namespace: "test-ns1",
|
namespace: "test-ns1",
|
||||||
driverMode: persistentDriverMode,
|
csiVolumeMode: persistentVolumeMode,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "mounter from volume source",
|
name: "mounter from volume source",
|
||||||
spec: volume.NewSpecFromVolume(makeTestVol("test-vol1", testDriver)),
|
spec: volume.NewSpecFromVolume(makeTestVol("test-vol1", testDriver)),
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
namespace: "test-ns2",
|
namespace: "test-ns2",
|
||||||
driverMode: ephemeralDriverMode,
|
csiVolumeMode: ephemeralVolumeMode,
|
||||||
shouldFail: true, // csi inline not enabled
|
shouldFail: true, // csi inline not enabled
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "mounter from no spec provided",
|
name: "mounter from no spec provided",
|
||||||
@ -590,8 +590,8 @@ func TestPluginNewMounter(t *testing.T) {
|
|||||||
if csiClient == nil {
|
if csiClient == nil {
|
||||||
t.Error("mounter csiClient is nil")
|
t.Error("mounter csiClient is nil")
|
||||||
}
|
}
|
||||||
if csiMounter.driverMode != test.driverMode {
|
if csiMounter.csiVolumeMode != test.csiVolumeMode {
|
||||||
t.Error("unexpected driver mode:", csiMounter.driverMode)
|
t.Error("unexpected driver mode:", csiMounter.csiVolumeMode)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ensure data file is created
|
// ensure data file is created
|
||||||
@ -620,8 +620,8 @@ func TestPluginNewMounter(t *testing.T) {
|
|||||||
if data[volDataKey.nodeName] != string(csiMounter.plugin.host.GetNodeName()) {
|
if data[volDataKey.nodeName] != string(csiMounter.plugin.host.GetNodeName()) {
|
||||||
t.Error("volume data file unexpected nodeName:", data[volDataKey.nodeName])
|
t.Error("volume data file unexpected nodeName:", data[volDataKey.nodeName])
|
||||||
}
|
}
|
||||||
if data[volDataKey.driverMode] != string(test.driverMode) {
|
if data[volDataKey.csiVolumeMode] != string(test.csiVolumeMode) {
|
||||||
t.Error("volume data file unexpected driverMode:", data[volDataKey.driverMode])
|
t.Error("volume data file unexpected csiVolumeMode:", data[volDataKey.csiVolumeMode])
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -631,12 +631,12 @@ func TestPluginNewMounterWithInline(t *testing.T) {
|
|||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
spec *volume.Spec
|
spec *volume.Spec
|
||||||
podUID types.UID
|
podUID types.UID
|
||||||
namespace string
|
namespace string
|
||||||
driverMode driverMode
|
csiVolumeMode csiVolumeMode
|
||||||
shouldFail bool
|
shouldFail bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "mounter with missing spec",
|
name: "mounter with missing spec",
|
||||||
@ -652,18 +652,18 @@ func TestPluginNewMounterWithInline(t *testing.T) {
|
|||||||
shouldFail: true,
|
shouldFail: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "mounter with persistent volume source",
|
name: "mounter with persistent volume source",
|
||||||
spec: volume.NewSpecFromPersistentVolume(makeTestPV("test-pv1", 20, testDriver, testVol), true),
|
spec: volume.NewSpecFromPersistentVolume(makeTestPV("test-pv1", 20, testDriver, testVol), true),
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
namespace: "test-ns1",
|
namespace: "test-ns1",
|
||||||
driverMode: persistentDriverMode,
|
csiVolumeMode: persistentVolumeMode,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "mounter with volume source",
|
name: "mounter with volume source",
|
||||||
spec: volume.NewSpecFromVolume(makeTestVol("test-vol1", testDriver)),
|
spec: volume.NewSpecFromVolume(makeTestVol("test-vol1", testDriver)),
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
namespace: "test-ns2",
|
namespace: "test-ns2",
|
||||||
driverMode: ephemeralDriverMode,
|
csiVolumeMode: ephemeralVolumeMode,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -709,8 +709,8 @@ func TestPluginNewMounterWithInline(t *testing.T) {
|
|||||||
if csiClient == nil {
|
if csiClient == nil {
|
||||||
t.Error("mounter csiClient is nil")
|
t.Error("mounter csiClient is nil")
|
||||||
}
|
}
|
||||||
if csiMounter.driverMode != test.driverMode {
|
if csiMounter.csiVolumeMode != test.csiVolumeMode {
|
||||||
t.Error("unexpected driver mode:", csiMounter.driverMode)
|
t.Error("unexpected driver mode:", csiMounter.csiVolumeMode)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ensure data file is created
|
// ensure data file is created
|
||||||
@ -739,8 +739,8 @@ func TestPluginNewMounterWithInline(t *testing.T) {
|
|||||||
if data[volDataKey.nodeName] != string(csiMounter.plugin.host.GetNodeName()) {
|
if data[volDataKey.nodeName] != string(csiMounter.plugin.host.GetNodeName()) {
|
||||||
t.Error("volume data file unexpected nodeName:", data[volDataKey.nodeName])
|
t.Error("volume data file unexpected nodeName:", data[volDataKey.nodeName])
|
||||||
}
|
}
|
||||||
if data[volDataKey.driverMode] != string(csiMounter.driverMode) {
|
if data[volDataKey.csiVolumeMode] != string(csiMounter.csiVolumeMode) {
|
||||||
t.Error("volume data file unexpected driverMode:", data[volDataKey.driverMode])
|
t.Error("volume data file unexpected csiVolumeMode:", data[volDataKey.csiVolumeMode])
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -131,7 +132,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
createPod := func() (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
|
createPod := func(ephemeral bool) (class *storagev1.StorageClass, claim *v1.PersistentVolumeClaim, pod *v1.Pod) {
|
||||||
ginkgo.By("Creating pod")
|
ginkgo.By("Creating pod")
|
||||||
var sc *storagev1.StorageClass
|
var sc *storagev1.StorageClass
|
||||||
if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok {
|
if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok {
|
||||||
@ -162,17 +163,24 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
Selector: m.nodeLabel,
|
Selector: m.nodeLabel,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
class, claim, pod := startPausePod(f.ClientSet, scTest, nodeSelection, f.Namespace.Name)
|
if ephemeral {
|
||||||
if class != nil {
|
pod = startPausePodInline(f.ClientSet, scTest, nodeSelection, f.Namespace.Name)
|
||||||
m.sc[class.Name] = class
|
if pod != nil {
|
||||||
|
m.pods = append(m.pods, pod)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
class, claim, pod = startPausePod(f.ClientSet, scTest, nodeSelection, f.Namespace.Name)
|
||||||
|
if class != nil {
|
||||||
|
m.sc[class.Name] = class
|
||||||
|
}
|
||||||
|
if claim != nil {
|
||||||
|
m.pvcs = append(m.pvcs, claim)
|
||||||
|
}
|
||||||
|
if pod != nil {
|
||||||
|
m.pods = append(m.pods, pod)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if claim != nil {
|
return // result variables set above
|
||||||
m.pvcs = append(m.pvcs, claim)
|
|
||||||
}
|
|
||||||
if pod != nil {
|
|
||||||
m.pods = append(m.pods, pod)
|
|
||||||
}
|
|
||||||
return class, claim, pod
|
|
||||||
}
|
}
|
||||||
|
|
||||||
createPodWithPVC := func(pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
|
createPodWithPVC := func(pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
|
||||||
@ -257,7 +265,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
init(testParameters{registerDriver: test.deployClusterRegistrar, disableAttach: test.disableAttach})
|
init(testParameters{registerDriver: test.deployClusterRegistrar, disableAttach: test.disableAttach})
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
_, claim, pod := createPod()
|
_, claim, pod := createPod(false)
|
||||||
if pod == nil {
|
if pod == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -297,29 +305,42 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
podInfoOnMount *bool
|
podInfoOnMount *bool
|
||||||
deployClusterRegistrar bool
|
deployClusterRegistrar bool
|
||||||
expectPodInfo bool
|
expectPodInfo bool
|
||||||
|
expectEphemeral bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "should not be passed when podInfoOnMount=nil",
|
name: "should not be passed when podInfoOnMount=nil",
|
||||||
podInfoOnMount: nil,
|
podInfoOnMount: nil,
|
||||||
deployClusterRegistrar: true,
|
deployClusterRegistrar: true,
|
||||||
expectPodInfo: false,
|
expectPodInfo: false,
|
||||||
|
expectEphemeral: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "should be passed when podInfoOnMount=true",
|
name: "should be passed when podInfoOnMount=true",
|
||||||
podInfoOnMount: &podInfoTrue,
|
podInfoOnMount: &podInfoTrue,
|
||||||
deployClusterRegistrar: true,
|
deployClusterRegistrar: true,
|
||||||
expectPodInfo: true,
|
expectPodInfo: true,
|
||||||
|
expectEphemeral: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// TODO(pohly): remove the feature tag when moving to beta
|
||||||
|
name: "contain ephemeral=true when using inline volume [Feature:CSIInlineVolume]",
|
||||||
|
podInfoOnMount: &podInfoTrue,
|
||||||
|
deployClusterRegistrar: true,
|
||||||
|
expectPodInfo: true,
|
||||||
|
expectEphemeral: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "should not be passed when podInfoOnMount=false",
|
name: "should not be passed when podInfoOnMount=false",
|
||||||
podInfoOnMount: &podInfoFalse,
|
podInfoOnMount: &podInfoFalse,
|
||||||
deployClusterRegistrar: true,
|
deployClusterRegistrar: true,
|
||||||
expectPodInfo: false,
|
expectPodInfo: false,
|
||||||
|
expectEphemeral: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "should not be passed when CSIDriver does not exist",
|
name: "should not be passed when CSIDriver does not exist",
|
||||||
deployClusterRegistrar: false,
|
deployClusterRegistrar: false,
|
||||||
expectPodInfo: false,
|
expectPodInfo: false,
|
||||||
|
expectEphemeral: false,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, t := range tests {
|
for _, t := range tests {
|
||||||
@ -332,17 +353,27 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
|
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
_, _, pod := createPod()
|
_, _, pod := createPod(test.expectEphemeral)
|
||||||
if pod == nil {
|
if pod == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
||||||
framework.ExpectNoError(err, "Failed to start pod: %v", err)
|
framework.ExpectNoError(err, "Failed to start pod: %v", err)
|
||||||
ginkgo.By("Checking CSI driver logs")
|
|
||||||
|
|
||||||
|
// If we expect an ephemeral volume, the feature has to be enabled.
|
||||||
|
// Otherwise need to check if we expect pod info, because the content
|
||||||
|
// of that depends on whether the feature is enabled or not.
|
||||||
|
csiInlineVolumesEnabled := test.expectEphemeral
|
||||||
|
if test.expectPodInfo {
|
||||||
|
ginkgo.By("checking for CSIInlineVolumes feature")
|
||||||
|
csiInlineVolumesEnabled, err = testsuites.CSIInlineVolumesEnabled(m.cs, f.Namespace.Name)
|
||||||
|
framework.ExpectNoError(err, "failed to test for CSIInlineVolumes")
|
||||||
|
}
|
||||||
|
|
||||||
|
ginkgo.By("Checking CSI driver logs")
|
||||||
// The driver is deployed as a statefulset with stable pod names
|
// The driver is deployed as a statefulset with stable pod names
|
||||||
driverPodName := "csi-mockplugin-0"
|
driverPodName := "csi-mockplugin-0"
|
||||||
err = checkPodInfo(m.cs, f.Namespace.Name, driverPodName, "mock", pod, test.expectPodInfo)
|
err = checkPodInfo(m.cs, f.Namespace.Name, driverPodName, "mock", pod, test.expectPodInfo, test.expectEphemeral, csiInlineVolumesEnabled)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -364,19 +395,19 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
|
|
||||||
gomega.Expect(csiNodeAttachLimit).To(gomega.BeNumerically("==", 2))
|
gomega.Expect(csiNodeAttachLimit).To(gomega.BeNumerically("==", 2))
|
||||||
|
|
||||||
_, _, pod1 := createPod()
|
_, _, pod1 := createPod(false)
|
||||||
gomega.Expect(pod1).NotTo(gomega.BeNil(), "while creating first pod")
|
gomega.Expect(pod1).NotTo(gomega.BeNil(), "while creating first pod")
|
||||||
|
|
||||||
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod1.Name, pod1.Namespace)
|
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod1.Name, pod1.Namespace)
|
||||||
framework.ExpectNoError(err, "Failed to start pod1: %v", err)
|
framework.ExpectNoError(err, "Failed to start pod1: %v", err)
|
||||||
|
|
||||||
_, _, pod2 := createPod()
|
_, _, pod2 := createPod(false)
|
||||||
gomega.Expect(pod2).NotTo(gomega.BeNil(), "while creating second pod")
|
gomega.Expect(pod2).NotTo(gomega.BeNil(), "while creating second pod")
|
||||||
|
|
||||||
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod2.Name, pod2.Namespace)
|
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod2.Name, pod2.Namespace)
|
||||||
framework.ExpectNoError(err, "Failed to start pod2: %v", err)
|
framework.ExpectNoError(err, "Failed to start pod2: %v", err)
|
||||||
|
|
||||||
_, _, pod3 := createPod()
|
_, _, pod3 := createPod(false)
|
||||||
gomega.Expect(pod3).NotTo(gomega.BeNil(), "while creating third pod")
|
gomega.Expect(pod3).NotTo(gomega.BeNil(), "while creating third pod")
|
||||||
err = waitForMaxVolumeCondition(pod3, m.cs)
|
err = waitForMaxVolumeCondition(pod3, m.cs)
|
||||||
framework.ExpectNoError(err, "while waiting for max volume condition on pod : %+v", pod3)
|
framework.ExpectNoError(err, "while waiting for max volume condition on pod : %+v", pod3)
|
||||||
@ -429,7 +460,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
ns := f.Namespace.Name
|
ns := f.Namespace.Name
|
||||||
sc, pvc, pod := createPod()
|
sc, pvc, pod := createPod(false)
|
||||||
gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
|
gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
|
||||||
|
|
||||||
gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion")
|
gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion")
|
||||||
@ -520,7 +551,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
|
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
sc, pvc, pod := createPod()
|
sc, pvc, pod := createPod(false)
|
||||||
gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
|
gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
|
||||||
|
|
||||||
gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion")
|
gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion")
|
||||||
@ -614,51 +645,41 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node f
|
|||||||
_, err = framework.WaitForPVClaimBoundPhase(cs, pvcClaims, framework.ClaimProvisionTimeout)
|
_, err = framework.WaitForPVClaimBoundPhase(cs, pvcClaims, framework.ClaimProvisionTimeout)
|
||||||
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
|
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
|
||||||
|
|
||||||
pod := &v1.Pod{
|
pod, err := startPausePodWithClaim(cs, claim, node, ns)
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
GenerateName: "pvc-volume-tester-",
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: "volume-tester",
|
|
||||||
Image: imageutils.GetE2EImage(imageutils.Pause),
|
|
||||||
VolumeMounts: []v1.VolumeMount{
|
|
||||||
{
|
|
||||||
Name: "my-volume",
|
|
||||||
MountPath: "/mnt/test",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
RestartPolicy: v1.RestartPolicyNever,
|
|
||||||
Volumes: []v1.Volume{
|
|
||||||
{
|
|
||||||
Name: "my-volume",
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
|
||||||
ClaimName: claim.Name,
|
|
||||||
ReadOnly: false,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if node.Name != "" {
|
|
||||||
pod.Spec.NodeName = node.Name
|
|
||||||
}
|
|
||||||
if len(node.Selector) != 0 {
|
|
||||||
pod.Spec.NodeSelector = node.Selector
|
|
||||||
}
|
|
||||||
|
|
||||||
pod, err = cs.CoreV1().Pods(ns).Create(pod)
|
|
||||||
framework.ExpectNoError(err, "Failed to create pod: %v", err)
|
framework.ExpectNoError(err, "Failed to create pod: %v", err)
|
||||||
return class, claim, pod
|
return class, claim, pod
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func startPausePodInline(cs clientset.Interface, t testsuites.StorageClassTest, node framework.NodeSelection, ns string) *v1.Pod {
|
||||||
|
pod, err := startPausePodWithInlineVolume(cs,
|
||||||
|
&v1.CSIVolumeSource{
|
||||||
|
Driver: t.Provisioner,
|
||||||
|
},
|
||||||
|
node, ns)
|
||||||
|
framework.ExpectNoError(err, "Failed to create pod: %v", err)
|
||||||
|
return pod
|
||||||
|
}
|
||||||
|
|
||||||
func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node framework.NodeSelection, ns string) (*v1.Pod, error) {
|
func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node framework.NodeSelection, ns string) (*v1.Pod, error) {
|
||||||
|
return startPausePodWithVolumeSource(cs,
|
||||||
|
v1.VolumeSource{
|
||||||
|
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||||
|
ClaimName: pvc.Name,
|
||||||
|
ReadOnly: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
node, ns)
|
||||||
|
}
|
||||||
|
|
||||||
|
func startPausePodWithInlineVolume(cs clientset.Interface, inlineVolume *v1.CSIVolumeSource, node framework.NodeSelection, ns string) (*v1.Pod, error) {
|
||||||
|
return startPausePodWithVolumeSource(cs,
|
||||||
|
v1.VolumeSource{
|
||||||
|
CSI: inlineVolume,
|
||||||
|
},
|
||||||
|
node, ns)
|
||||||
|
}
|
||||||
|
|
||||||
|
func startPausePodWithVolumeSource(cs clientset.Interface, volumeSource v1.VolumeSource, node framework.NodeSelection, ns string) (*v1.Pod, error) {
|
||||||
pod := &v1.Pod{
|
pod := &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
GenerateName: "pvc-volume-tester-",
|
GenerateName: "pvc-volume-tester-",
|
||||||
@ -679,13 +700,8 @@ func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClai
|
|||||||
RestartPolicy: v1.RestartPolicyNever,
|
RestartPolicy: v1.RestartPolicyNever,
|
||||||
Volumes: []v1.Volume{
|
Volumes: []v1.Volume{
|
||||||
{
|
{
|
||||||
Name: "my-volume",
|
Name: "my-volume",
|
||||||
VolumeSource: v1.VolumeSource{
|
VolumeSource: volumeSource,
|
||||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
|
||||||
ClaimName: pvc.Name,
|
|
||||||
ReadOnly: false,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -702,13 +718,18 @@ func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClai
|
|||||||
}
|
}
|
||||||
|
|
||||||
// checkPodInfo tests that NodePublish was called with expected volume_context
|
// checkPodInfo tests that NodePublish was called with expected volume_context
|
||||||
func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContainerName string, pod *v1.Pod, expectPodInfo bool) error {
|
func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContainerName string, pod *v1.Pod, expectPodInfo, ephemeralVolume, csiInlineVolumesEnabled bool) error {
|
||||||
expectedAttributes := map[string]string{
|
expectedAttributes := map[string]string{
|
||||||
"csi.storage.k8s.io/pod.name": pod.Name,
|
"csi.storage.k8s.io/pod.name": pod.Name,
|
||||||
"csi.storage.k8s.io/pod.namespace": namespace,
|
"csi.storage.k8s.io/pod.namespace": namespace,
|
||||||
"csi.storage.k8s.io/pod.uid": string(pod.UID),
|
"csi.storage.k8s.io/pod.uid": string(pod.UID),
|
||||||
"csi.storage.k8s.io/serviceAccount.name": "default",
|
"csi.storage.k8s.io/serviceAccount.name": "default",
|
||||||
}
|
}
|
||||||
|
if csiInlineVolumesEnabled {
|
||||||
|
// This is only passed in 1.15 when the CSIInlineVolume feature gate is set.
|
||||||
|
expectedAttributes["csi.storage.k8s.io/ephemeral"] = strconv.FormatBool(ephemeralVolume)
|
||||||
|
}
|
||||||
|
|
||||||
// Load logs of driver pod
|
// Load logs of driver pod
|
||||||
log, err := e2epod.GetPodLogs(cs, namespace, driverPodName, driverContainerName)
|
log, err := e2epod.GetPodLogs(cs, namespace, driverPodName, driverContainerName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
29
test/e2e/storage/external/external.go
vendored
29
test/e2e/storage/external/external.go
vendored
@ -40,6 +40,7 @@ import (
|
|||||||
|
|
||||||
// List of testSuites to be executed for each external driver.
|
// List of testSuites to be executed for each external driver.
|
||||||
var csiTestSuites = []func() testsuites.TestSuite{
|
var csiTestSuites = []func() testsuites.TestSuite{
|
||||||
|
testsuites.InitEphemeralTestSuite,
|
||||||
testsuites.InitMultiVolumeTestSuite,
|
testsuites.InitMultiVolumeTestSuite,
|
||||||
testsuites.InitProvisioningTestSuite,
|
testsuites.InitProvisioningTestSuite,
|
||||||
testsuites.InitSnapshottableTestSuite,
|
testsuites.InitSnapshottableTestSuite,
|
||||||
@ -128,6 +129,9 @@ var _ testsuites.DynamicPVTestDriver = &driverDefinition{}
|
|||||||
// Same for snapshotting.
|
// Same for snapshotting.
|
||||||
var _ testsuites.SnapshottableTestDriver = &driverDefinition{}
|
var _ testsuites.SnapshottableTestDriver = &driverDefinition{}
|
||||||
|
|
||||||
|
// And for ephemeral volumes.
|
||||||
|
var _ testsuites.EphemeralTestDriver = &driverDefinition{}
|
||||||
|
|
||||||
// runtime.DecodeInto needs a runtime.Object but doesn't do any
|
// runtime.DecodeInto needs a runtime.Object but doesn't do any
|
||||||
// deserialization of it and therefore none of the methods below need
|
// deserialization of it and therefore none of the methods below need
|
||||||
// an implementation.
|
// an implementation.
|
||||||
@ -144,9 +148,6 @@ type driverDefinition struct {
|
|||||||
// the default file system are enabled.
|
// the default file system are enabled.
|
||||||
DriverInfo testsuites.DriverInfo
|
DriverInfo testsuites.DriverInfo
|
||||||
|
|
||||||
// ShortName is used to create unique names for test cases and test resources.
|
|
||||||
ShortName string
|
|
||||||
|
|
||||||
// StorageClass must be set to enable dynamic provisioning tests.
|
// StorageClass must be set to enable dynamic provisioning tests.
|
||||||
// The default is to not run those tests.
|
// The default is to not run those tests.
|
||||||
StorageClass struct {
|
StorageClass struct {
|
||||||
@ -177,6 +178,15 @@ type driverDefinition struct {
|
|||||||
// TODO (?): load from file
|
// TODO (?): load from file
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// InlineVolumeAttributes defines one or more set of attributes for
|
||||||
|
// use as inline ephemeral volumes. At least one set of attributes
|
||||||
|
// has to be defined to enable testing of inline ephemeral volumes.
|
||||||
|
// If a test needs more volumes than defined, some of the defined
|
||||||
|
// volumes will be used multiple times.
|
||||||
|
//
|
||||||
|
// DriverInfo.Name is used as name of the driver in the inline volume.
|
||||||
|
InlineVolumeAttributes []map[string]string
|
||||||
|
|
||||||
// ClaimSize defines the desired size of dynamically
|
// ClaimSize defines the desired size of dynamically
|
||||||
// provisioned volumes. Default is "5GiB".
|
// provisioned volumes. Default is "5GiB".
|
||||||
ClaimSize string
|
ClaimSize string
|
||||||
@ -209,6 +219,8 @@ func (d *driverDefinition) SkipUnsupportedTest(pattern testpatterns.TestPattern)
|
|||||||
if d.StorageClass.FromName || d.StorageClass.FromFile != "" {
|
if d.StorageClass.FromName || d.StorageClass.FromFile != "" {
|
||||||
supported = true
|
supported = true
|
||||||
}
|
}
|
||||||
|
case testpatterns.CSIInlineVolume:
|
||||||
|
supported = len(d.InlineVolumeAttributes) != 0
|
||||||
}
|
}
|
||||||
if !supported {
|
if !supported {
|
||||||
framework.Skipf("Driver %q does not support volume type %q - skipping", d.DriverInfo.Name, pattern.VolType)
|
framework.Skipf("Driver %q does not support volume type %q - skipping", d.DriverInfo.Name, pattern.VolType)
|
||||||
@ -281,6 +293,17 @@ func (d *driverDefinition) GetClaimSize() string {
|
|||||||
return d.ClaimSize
|
return d.ClaimSize
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *driverDefinition) GetVolumeAttributes(config *testsuites.PerTestConfig, volumeNumber int) map[string]string {
|
||||||
|
if len(d.InlineVolumeAttributes) == 0 {
|
||||||
|
framework.Skipf("%s does not have any InlineVolumeAttributes defined", d.DriverInfo.Name)
|
||||||
|
}
|
||||||
|
return d.InlineVolumeAttributes[volumeNumber%len(d.InlineVolumeAttributes)]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *driverDefinition) GetCSIDriverName(config *testsuites.PerTestConfig) string {
|
||||||
|
return d.DriverInfo.Name
|
||||||
|
}
|
||||||
|
|
||||||
func (d *driverDefinition) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
func (d *driverDefinition) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||||
config := &testsuites.PerTestConfig{
|
config := &testsuites.PerTestConfig{
|
||||||
Driver: d,
|
Driver: d,
|
||||||
|
1
test/e2e/storage/external/external_test.go
vendored
1
test/e2e/storage/external/external_test.go
vendored
@ -33,7 +33,6 @@ func TestDriverParameter(t *testing.T) {
|
|||||||
"", // Default fsType
|
"", // Default fsType
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
ShortName: "foo",
|
|
||||||
ClaimSize: "5Gi",
|
ClaimSize: "5Gi",
|
||||||
}
|
}
|
||||||
testcases := []struct {
|
testcases := []struct {
|
||||||
|
@ -44,6 +44,8 @@ var (
|
|||||||
PreprovisionedPV TestVolType = "PreprovisionedPV"
|
PreprovisionedPV TestVolType = "PreprovisionedPV"
|
||||||
// DynamicPV represents a volume type for dynamic provisioned Persistent Volume
|
// DynamicPV represents a volume type for dynamic provisioned Persistent Volume
|
||||||
DynamicPV TestVolType = "DynamicPV"
|
DynamicPV TestVolType = "DynamicPV"
|
||||||
|
// CSIInlineVolume represents a volume type that is defined inline and provided by a CSI driver.
|
||||||
|
CSIInlineVolume TestVolType = "CSIInlineVolume"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestSnapshotType represents a snapshot type to be tested in a TestSuite
|
// TestSnapshotType represents a snapshot type to be tested in a TestSuite
|
||||||
|
@ -5,6 +5,7 @@ go_library(
|
|||||||
srcs = [
|
srcs = [
|
||||||
"base.go",
|
"base.go",
|
||||||
"driveroperations.go",
|
"driveroperations.go",
|
||||||
|
"ephemeral.go",
|
||||||
"multivolume.go",
|
"multivolume.go",
|
||||||
"provisioning.go",
|
"provisioning.go",
|
||||||
"snapshottable.go",
|
"snapshottable.go",
|
||||||
|
@ -133,6 +133,8 @@ func skipUnsupportedTest(driver TestDriver, pattern testpatterns.TestPattern) {
|
|||||||
_, isSupported = driver.(PreprovisionedPVTestDriver)
|
_, isSupported = driver.(PreprovisionedPVTestDriver)
|
||||||
case testpatterns.DynamicPV:
|
case testpatterns.DynamicPV:
|
||||||
_, isSupported = driver.(DynamicPVTestDriver)
|
_, isSupported = driver.(DynamicPVTestDriver)
|
||||||
|
case testpatterns.CSIInlineVolume:
|
||||||
|
_, isSupported = driver.(EphemeralTestDriver)
|
||||||
default:
|
default:
|
||||||
isSupported = false
|
isSupported = false
|
||||||
}
|
}
|
||||||
|
285
test/e2e/storage/testsuites/ephemeral.go
Normal file
285
test/e2e/storage/testsuites/ephemeral.go
Normal file
@ -0,0 +1,285 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package testsuites
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo"
|
||||||
|
"github.com/onsi/gomega"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||||
|
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ephemeralTestSuite struct {
|
||||||
|
tsInfo TestSuiteInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ TestSuite = &ephemeralTestSuite{}
|
||||||
|
|
||||||
|
// InitEphemeralTestSuite returns ephemeralTestSuite that implements TestSuite interface
|
||||||
|
func InitEphemeralTestSuite() TestSuite {
|
||||||
|
return &ephemeralTestSuite{
|
||||||
|
tsInfo: TestSuiteInfo{
|
||||||
|
name: "ephemeral [Feature:CSIInlineVolume]",
|
||||||
|
testPatterns: []testpatterns.TestPattern{
|
||||||
|
{
|
||||||
|
Name: "inline ephemeral CSI volume",
|
||||||
|
VolType: testpatterns.CSIInlineVolume,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ephemeralTestSuite) getTestSuiteInfo() TestSuiteInfo {
|
||||||
|
return p.tsInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ephemeralTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||||
|
type local struct {
|
||||||
|
config *PerTestConfig
|
||||||
|
testCleanup func()
|
||||||
|
|
||||||
|
testCase *EphemeralTest
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
dInfo = driver.GetDriverInfo()
|
||||||
|
eDriver EphemeralTestDriver
|
||||||
|
l local
|
||||||
|
)
|
||||||
|
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
ok := false
|
||||||
|
eDriver, ok = driver.(EphemeralTestDriver)
|
||||||
|
if !ok {
|
||||||
|
framework.Skipf("Driver %s doesn't support ephemeral inline volumes -- skipping", dInfo.Name)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// This intentionally comes after checking the preconditions because it
|
||||||
|
// registers its own BeforeEach which creates the namespace. Beware that it
|
||||||
|
// also registers an AfterEach which renders f unusable. Any code using
|
||||||
|
// f must run inside an It or Context callback.
|
||||||
|
f := framework.NewDefaultFramework("ephemeral")
|
||||||
|
|
||||||
|
init := func() {
|
||||||
|
l = local{}
|
||||||
|
|
||||||
|
// Now do the more expensive test initialization.
|
||||||
|
l.config, l.testCleanup = driver.PrepareTest(f)
|
||||||
|
l.testCase = &EphemeralTest{
|
||||||
|
Client: l.config.Framework.ClientSet,
|
||||||
|
Namespace: f.Namespace.Name,
|
||||||
|
DriverName: eDriver.GetCSIDriverName(l.config),
|
||||||
|
Node: framework.NodeSelection{Name: l.config.ClientNodeName},
|
||||||
|
GetVolumeAttributes: func(volumeNumber int) map[string]string {
|
||||||
|
return eDriver.GetVolumeAttributes(l.config, volumeNumber)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup := func() {
|
||||||
|
if l.testCleanup != nil {
|
||||||
|
l.testCleanup()
|
||||||
|
l.testCleanup = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ginkgo.It("should create inline ephemeral volume", func() {
|
||||||
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
l.testCase.TestEphemeral()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// EphemeralTest represents parameters to be used by tests for inline volumes.
|
||||||
|
// Not all parameters are used by all tests.
|
||||||
|
type EphemeralTest struct {
|
||||||
|
Client clientset.Interface
|
||||||
|
Namespace string
|
||||||
|
DriverName string
|
||||||
|
Node framework.NodeSelection
|
||||||
|
|
||||||
|
// GetVolumeAttributes returns the volume attributes for a
|
||||||
|
// certain inline ephemeral volume, enumerated starting with
|
||||||
|
// #0. Some tests might require more than one volume. They can
|
||||||
|
// all be the same or different, depending what the driver supports
|
||||||
|
// and/or wants to test.
|
||||||
|
GetVolumeAttributes func(volumeNumber int) map[string]string
|
||||||
|
|
||||||
|
// RunningPodCheck is invoked while a pod using an inline volume is running.
|
||||||
|
// It can execute additional checks on the pod and its volume(s). Any data
|
||||||
|
// returned by it is passed to StoppedPodCheck.
|
||||||
|
RunningPodCheck func(pod *v1.Pod) interface{}
|
||||||
|
|
||||||
|
// StoppedPodCheck is invoked after ensuring that the pod is gone.
|
||||||
|
// It is passed the data gather by RunningPodCheck or nil if that
|
||||||
|
// isn't defined and then can do additional checks on the node,
|
||||||
|
// like for example verifying that the ephemeral volume was really
|
||||||
|
// removed. How to do such a check is driver-specific and not
|
||||||
|
// covered by the generic storage test suite.
|
||||||
|
StoppedPodCheck func(nodeName string, runningPodData interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestEphemeral tests pod creation with one ephemeral volume.
|
||||||
|
func (t EphemeralTest) TestEphemeral() {
|
||||||
|
client := t.Client
|
||||||
|
gomega.Expect(client).NotTo(gomega.BeNil(), "EphemeralTest.Client is required")
|
||||||
|
gomega.Expect(t.GetVolumeAttributes).NotTo(gomega.BeNil(), "EphemeralTest.GetVolumeAttributes is required")
|
||||||
|
gomega.Expect(t.DriverName).NotTo(gomega.BeEmpty(), "EphemeralTest.DriverName is required")
|
||||||
|
|
||||||
|
ginkgo.By(fmt.Sprintf("checking the requested inline volume exists in the pod running on node %+v", t.Node))
|
||||||
|
command := "mount | grep /mnt/test"
|
||||||
|
pod := StartInPodWithInlineVolume(client, t.Namespace, "inline-volume-tester", command,
|
||||||
|
v1.CSIVolumeSource{
|
||||||
|
Driver: t.DriverName,
|
||||||
|
VolumeAttributes: t.GetVolumeAttributes(0),
|
||||||
|
},
|
||||||
|
t.Node)
|
||||||
|
defer func() {
|
||||||
|
// pod might be nil now.
|
||||||
|
StopPod(client, pod)
|
||||||
|
}()
|
||||||
|
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace), "waiting for pod with inline volume")
|
||||||
|
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
||||||
|
framework.ExpectNoError(err, "get pod")
|
||||||
|
actualNodeName := runningPod.Spec.NodeName
|
||||||
|
|
||||||
|
// Run the checker of the running pod.
|
||||||
|
var runningPodData interface{}
|
||||||
|
if t.RunningPodCheck != nil {
|
||||||
|
runningPodData = t.RunningPodCheck(pod)
|
||||||
|
}
|
||||||
|
|
||||||
|
StopPod(client, pod)
|
||||||
|
pod = nil // Don't stop twice.
|
||||||
|
|
||||||
|
if t.StoppedPodCheck != nil {
|
||||||
|
t.StoppedPodCheck(actualNodeName, runningPodData)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartInPodWithInlineVolume starts a command in a pod with given volume mounted to /mnt/test directory.
|
||||||
|
// The caller is responsible for checking the pod and deleting it.
|
||||||
|
func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command string, csiVolume v1.CSIVolumeSource, node framework.NodeSelection) *v1.Pod {
|
||||||
|
pod := &v1.Pod{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
Kind: "Pod",
|
||||||
|
APIVersion: "v1",
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
GenerateName: podName + "-",
|
||||||
|
Labels: map[string]string{
|
||||||
|
"app": podName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
NodeName: node.Name,
|
||||||
|
NodeSelector: node.Selector,
|
||||||
|
Affinity: node.Affinity,
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "csi-volume-tester",
|
||||||
|
Image: volume.GetTestImage(framework.BusyBoxImage),
|
||||||
|
Command: volume.GenerateScriptCmd(command),
|
||||||
|
VolumeMounts: []v1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "my-volume",
|
||||||
|
MountPath: "/mnt/test",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RestartPolicy: v1.RestartPolicyNever,
|
||||||
|
Volumes: []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "my-volume",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
CSI: &csiVolume,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pod, err := c.CoreV1().Pods(ns).Create(pod)
|
||||||
|
framework.ExpectNoError(err, "failed to create pod")
|
||||||
|
return pod
|
||||||
|
}
|
||||||
|
|
||||||
|
// CSIInlineVolumesEnabled checks whether the running cluster has the CSIInlineVolumes feature gate enabled.
|
||||||
|
// It does that by trying to create a pod that uses that feature.
|
||||||
|
func CSIInlineVolumesEnabled(c clientset.Interface, ns string) (bool, error) {
|
||||||
|
pod := &v1.Pod{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
Kind: "Pod",
|
||||||
|
APIVersion: "v1",
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
GenerateName: "csi-inline-volume-",
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "csi-volume-tester",
|
||||||
|
Image: "no-such-registry/no-such-image",
|
||||||
|
VolumeMounts: []v1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "my-volume",
|
||||||
|
MountPath: "/mnt/test",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RestartPolicy: v1.RestartPolicyNever,
|
||||||
|
Volumes: []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "my-volume",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
CSI: &v1.CSIVolumeSource{
|
||||||
|
Driver: "no-such-driver.example.com",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pod, err := c.CoreV1().Pods(ns).Create(pod)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
// Pod was created, feature supported.
|
||||||
|
StopPod(c, pod)
|
||||||
|
return true, nil
|
||||||
|
case errors.IsInvalid(err):
|
||||||
|
// "Invalid" because it uses a feature that isn't supported.
|
||||||
|
return false, nil
|
||||||
|
default:
|
||||||
|
// Unexpected error.
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
@ -101,6 +101,23 @@ type DynamicPVTestDriver interface {
|
|||||||
GetClaimSize() string
|
GetClaimSize() string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EphemeralTestDriver represents an interface for a TestDriver that supports ephemeral inline volumes.
|
||||||
|
type EphemeralTestDriver interface {
|
||||||
|
TestDriver
|
||||||
|
|
||||||
|
// GetVolumeAttributes returns the volume attributes for a
|
||||||
|
// certain inline ephemeral volume, enumerated starting with
|
||||||
|
// #0. Some tests might require more than one volume. They can
|
||||||
|
// all be the same or different, depending what the driver supports
|
||||||
|
// and/or wants to test.
|
||||||
|
GetVolumeAttributes(config *PerTestConfig, volumeNumber int) map[string]string
|
||||||
|
|
||||||
|
// GetCSIDriverName returns the name that was used when registering with
|
||||||
|
// kubelet. Depending on how the driver was deployed, this can be different
|
||||||
|
// from DriverInfo.Name.
|
||||||
|
GetCSIDriverName(config *PerTestConfig) string
|
||||||
|
}
|
||||||
|
|
||||||
// SnapshottableTestDriver represents an interface for a TestDriver that supports DynamicSnapshot
|
// SnapshottableTestDriver represents an interface for a TestDriver that supports DynamicSnapshot
|
||||||
type SnapshottableTestDriver interface {
|
type SnapshottableTestDriver interface {
|
||||||
TestDriver
|
TestDriver
|
||||||
|
@ -48,7 +48,9 @@ spec:
|
|||||||
- mountPath: /registration
|
- mountPath: /registration
|
||||||
name: registration-dir
|
name: registration-dir
|
||||||
- name: mock
|
- name: mock
|
||||||
image: quay.io/k8scsi/mock-driver:v1.1.1
|
image: quay.io/k8scsi/mock-driver:v2.1.0
|
||||||
|
args:
|
||||||
|
- "--permissive-target-path" # because of https://github.com/kubernetes/kubernetes/issues/75535
|
||||||
env:
|
env:
|
||||||
- name: CSI_ENDPOINT
|
- name: CSI_ENDPOINT
|
||||||
value: /csi/csi.sock
|
value: /csi/csi.sock
|
||||||
|
Loading…
Reference in New Issue
Block a user