storage: check CSIDriver.Spec.VolumeLifecycleModes

Using a "normal" CSI driver for an inline ephemeral volume may have
unexpected and potentially harmful effects when the driver gets a
NodePublishVolume call that it isn't expecting. To prevent that mistake,
driver deployments for a driver that supports such volumes must:
- deploy a CSIDriver object for the driver
- set CSIDriver.Spec.VolumeLifecycleModes such that it contains "ephemeral"

The default for that field is "persistent", so existing deployments
continue to work and are automatically protected against incorrect
usage.

For the E2E tests we need a way to specify the driver mode. The
existing cluster-driver-registrar doesn't support that and also was
deprecated, so we stop using it altogether and instead deploy and
patch a CSIDriver object.
This commit is contained in:
Patrick Ohly 2019-07-24 08:16:43 +02:00
parent b60f08e24b
commit 7bbc06fcd4
15 changed files with 522 additions and 310 deletions

View File

@ -342,9 +342,9 @@ func TestAttacherWithCSIDriver(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeClient := fakeclient.NewSimpleClientset(
getTestCSIDriver("not-attachable", nil, &bFalse),
getTestCSIDriver("attachable", nil, &bTrue),
getTestCSIDriver("nil", nil, nil),
getTestCSIDriver("not-attachable", nil, &bFalse, nil),
getTestCSIDriver("attachable", nil, &bTrue, nil),
getTestCSIDriver("nil", nil, nil, nil),
)
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t, fakeClient)
defer os.RemoveAll(tmpDir)
@ -430,9 +430,9 @@ func TestAttacherWaitForVolumeAttachmentWithCSIDriver(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeClient := fakeclient.NewSimpleClientset(
getTestCSIDriver("not-attachable", nil, &bFalse),
getTestCSIDriver("attachable", nil, &bTrue),
getTestCSIDriver("nil", nil, nil),
getTestCSIDriver("not-attachable", nil, &bFalse, nil),
getTestCSIDriver("attachable", nil, &bTrue, nil),
getTestCSIDriver("nil", nil, nil, nil),
)
plug, tmpDir := newTestPlugin(t, fakeClient)
defer os.RemoveAll(tmpDir)

View File

@ -29,6 +29,7 @@ import (
"k8s.io/klog"
api "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
@ -46,32 +47,32 @@ var (
driverName,
nodeName,
attachmentID,
csiVolumeMode string
volumeLifecycleMode string
}{
"specVolID",
"volumeHandle",
"driverName",
"nodeName",
"attachmentID",
"csiVolumeMode",
"volumeLifecycleMode",
}
)
type csiMountMgr struct {
csiClientGetter
k8s kubernetes.Interface
plugin *csiPlugin
driverName csiDriverName
csiVolumeMode csiVolumeMode
volumeID string
specVolumeID string
readOnly bool
spec *volume.Spec
pod *api.Pod
podUID types.UID
options volume.VolumeOptions
publishContext map[string]string
kubeVolHost volume.KubeletVolumeHost
k8s kubernetes.Interface
plugin *csiPlugin
driverName csiDriverName
volumeLifecycleMode storage.VolumeLifecycleMode
volumeID string
specVolumeID string
readOnly bool
spec *volume.Spec
pod *api.Pod
podUID types.UID
options volume.VolumeOptions
publishContext map[string]string
kubeVolHost volume.KubeletVolumeHost
volume.MetricsProvider
}
@ -145,8 +146,8 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) {
return fmt.Errorf("CSIInlineVolume feature required")
}
if c.csiVolumeMode != ephemeralVolumeMode {
return fmt.Errorf("unexpected volume mode: %s", c.csiVolumeMode)
if c.volumeLifecycleMode != storage.VolumeLifecycleEphemeral {
return fmt.Errorf("unexpected volume mode: %s", c.volumeLifecycleMode)
}
if volSrc.FSType != nil {
fsType = *volSrc.FSType
@ -160,8 +161,8 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
secretRef = &api.SecretReference{Name: secretName, Namespace: ns}
}
case pvSrc != nil:
if c.csiVolumeMode != persistentVolumeMode {
return fmt.Errorf("unexpected driver mode: %s", c.csiVolumeMode)
if c.volumeLifecycleMode != storage.VolumeLifecyclePersistent {
return fmt.Errorf("unexpected driver mode: %s", c.volumeLifecycleMode)
}
fsType = pvSrc.FSType
@ -319,7 +320,7 @@ func (c *csiMountMgr) podAttributes() (map[string]string, error) {
"csi.storage.k8s.io/serviceAccount.name": c.pod.Spec.ServiceAccountName,
}
if utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) {
attrs["csi.storage.k8s.io/ephemeral"] = strconv.FormatBool(c.csiVolumeMode == ephemeralVolumeMode)
attrs["csi.storage.k8s.io/ephemeral"] = strconv.FormatBool(c.volumeLifecycleMode == storage.VolumeLifecycleEphemeral)
}
klog.V(4).Infof(log("CSIDriver %q requires pod information", c.driverName))

View File

@ -28,6 +28,7 @@ import (
api "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
storagev1beta1 "k8s.io/api/storage/v1beta1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
@ -151,13 +152,16 @@ func MounterSetUpTests(t *testing.T, podInfoEnabled bool) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
klog.Infof("Starting test %s", test.name)
// Modes must be set if (and only if) CSIInlineVolume is enabled.
var modes []storagev1beta1.VolumeLifecycleMode
if test.csiInlineVolume {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
modes = append(modes, storagev1beta1.VolumeLifecyclePersistent)
}
fakeClient := fakeclient.NewSimpleClientset(
getTestCSIDriver("no-info", &noPodMountInfo, nil),
getTestCSIDriver("info", &currentPodInfoMount, nil),
getTestCSIDriver("nil", nil, nil),
getTestCSIDriver("no-info", &noPodMountInfo, nil, modes),
getTestCSIDriver("info", &currentPodInfoMount, nil, modes),
getTestCSIDriver("nil", nil, nil, modes),
)
plug, tmpDir := newTestPlugin(t, fakeClient)
defer os.RemoveAll(tmpDir)
@ -278,16 +282,16 @@ func TestMounterSetUpSimple(t *testing.T) {
testCases := []struct {
name string
podUID types.UID
mode csiVolumeMode
mode storagev1beta1.VolumeLifecycleMode
fsType string
options []string
spec func(string, []string) *volume.Spec
shouldFail bool
}{
{
name: "setup with vol source",
name: "setup with ephemeral source",
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
mode: ephemeralVolumeMode,
mode: storagev1beta1.VolumeLifecycleEphemeral,
fsType: "ext4",
shouldFail: true,
spec: func(fsType string, options []string) *volume.Spec {
@ -299,7 +303,7 @@ func TestMounterSetUpSimple(t *testing.T) {
{
name: "setup with persistent source",
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
mode: persistentVolumeMode,
mode: storagev1beta1.VolumeLifecyclePersistent,
fsType: "zfs",
spec: func(fsType string, options []string) *volume.Spec {
pvSrc := makeTestPV("pv1", 20, testDriver, "vol1")
@ -311,7 +315,7 @@ func TestMounterSetUpSimple(t *testing.T) {
{
name: "setup with persistent source without unspecified fstype and options",
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
mode: persistentVolumeMode,
mode: storagev1beta1.VolumeLifecyclePersistent,
spec: func(fsType string, options []string) *volume.Spec {
return volume.NewSpecFromPersistentVolume(makeTestPV("pv1", 20, testDriver, "vol2"), false)
},
@ -345,8 +349,8 @@ func TestMounterSetUpSimple(t *testing.T) {
csiMounter := mounter.(*csiMountMgr)
csiMounter.csiClient = setupClient(t, true)
if csiMounter.csiVolumeMode != persistentVolumeMode {
t.Fatal("unexpected volume mode: ", csiMounter.csiVolumeMode)
if csiMounter.volumeLifecycleMode != storagev1beta1.VolumeLifecyclePersistent {
t.Fatal("unexpected volume mode: ", csiMounter.volumeLifecycleMode)
}
attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
@ -397,14 +401,10 @@ func TestMounterSetUpSimple(t *testing.T) {
func TestMounterSetUpWithInline(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
fakeClient := fakeclient.NewSimpleClientset()
plug, tmpDir := newTestPlugin(t, fakeClient)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
podUID types.UID
mode csiVolumeMode
mode storagev1beta1.VolumeLifecycleMode
fsType string
options []string
spec func(string, []string) *volume.Spec
@ -413,7 +413,7 @@ func TestMounterSetUpWithInline(t *testing.T) {
{
name: "setup with vol source",
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
mode: ephemeralVolumeMode,
mode: storagev1beta1.VolumeLifecycleEphemeral,
fsType: "ext4",
spec: func(fsType string, options []string) *volume.Spec {
volSrc := makeTestVol("pv1", testDriver)
@ -424,7 +424,7 @@ func TestMounterSetUpWithInline(t *testing.T) {
{
name: "setup with persistent source",
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
mode: persistentVolumeMode,
mode: storagev1beta1.VolumeLifecyclePersistent,
fsType: "zfs",
spec: func(fsType string, options []string) *volume.Spec {
pvSrc := makeTestPV("pv1", 20, testDriver, "vol1")
@ -436,7 +436,7 @@ func TestMounterSetUpWithInline(t *testing.T) {
{
name: "setup with persistent source without unspecified fstype and options",
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
mode: persistentVolumeMode,
mode: storagev1beta1.VolumeLifecyclePersistent,
spec: func(fsType string, options []string) *volume.Spec {
return volume.NewSpecFromPersistentVolume(makeTestPV("pv1", 20, testDriver, "vol2"), false)
},
@ -449,6 +449,15 @@ func TestMounterSetUpWithInline(t *testing.T) {
}
for _, tc := range testCases {
// The fake driver currently supports all modes.
volumeLifecycleModes := []storagev1beta1.VolumeLifecycleMode{
storagev1beta1.VolumeLifecycleEphemeral,
storagev1beta1.VolumeLifecyclePersistent,
}
driver := getTestCSIDriver(testDriver, nil, nil, volumeLifecycleModes)
fakeClient := fakeclient.NewSimpleClientset(driver)
plug, tmpDir := newTestPlugin(t, fakeClient)
defer os.RemoveAll(tmpDir)
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
t.Run(tc.name, func(t *testing.T) {
mounter, err := plug.NewMounter(
@ -470,15 +479,15 @@ func TestMounterSetUpWithInline(t *testing.T) {
csiMounter := mounter.(*csiMountMgr)
csiMounter.csiClient = setupClient(t, true)
if csiMounter.csiVolumeMode != tc.mode {
t.Fatal("unexpected volume mode: ", csiMounter.csiVolumeMode)
if csiMounter.volumeLifecycleMode != tc.mode {
t.Fatal("unexpected volume mode: ", csiMounter.volumeLifecycleMode)
}
if csiMounter.csiVolumeMode == ephemeralVolumeMode && csiMounter.volumeID != makeVolumeHandle(string(tc.podUID), csiMounter.specVolumeID) {
if csiMounter.volumeLifecycleMode == storagev1beta1.VolumeLifecycleEphemeral && csiMounter.volumeID != makeVolumeHandle(string(tc.podUID), csiMounter.specVolumeID) {
t.Fatal("unexpected generated volumeHandle:", csiMounter.volumeID)
}
if csiMounter.csiVolumeMode == persistentVolumeMode {
if csiMounter.volumeLifecycleMode == storagev1beta1.VolumeLifecyclePersistent {
attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
attachment := makeTestAttachment(attachID, "test-node", csiMounter.spec.Name())
_, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(attachment)
@ -503,10 +512,10 @@ func TestMounterSetUpWithInline(t *testing.T) {
}
// validate stagingTargetPath
if tc.mode == ephemeralVolumeMode && vol.DeviceMountPath != "" {
if tc.mode == storagev1beta1.VolumeLifecycleEphemeral && vol.DeviceMountPath != "" {
t.Errorf("unexpected devicePathTarget sent to driver: %s", vol.DeviceMountPath)
}
if tc.mode == persistentVolumeMode {
if tc.mode == storagev1beta1.VolumeLifecyclePersistent {
devicePath, err := makeDeviceMountPath(plug, csiMounter.spec)
if err != nil {
t.Fatal(err)

View File

@ -30,6 +30,7 @@ import (
"k8s.io/klog"
api "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@ -67,18 +68,6 @@ type csiPlugin struct {
csiDriverLister storagelisters.CSIDriverLister
}
//TODO (vladimirvivien) add this type to storage api
type driverMode string
const persistentDriverMode driverMode = "persistent"
const ephemeralDriverMode driverMode = "ephemeral"
const combinedDriverMode driverMode = "persistent+ephemeral"
type csiVolumeMode string
const persistentVolumeMode csiVolumeMode = "persistent"
const ephemeralVolumeMode csiVolumeMode = "ephemeral"
// ProbeVolumePlugins returns implemented plugins
func ProbeVolumePlugins() []volume.VolumePlugin {
p := &csiPlugin{
@ -373,15 +362,16 @@ func (p *csiPlugin) NewMounter(
return nil, errors.New(log("volume source not found in volume.Spec"))
}
csiVolumeMode, err := p.getCSIVolumeMode(spec)
volumeLifecycleMode, err := p.getVolumeLifecycleMode(spec)
if err != nil {
return nil, err
}
// TODO(pohly): check CSIDriver.Spec.Mode to ensure that the CSI driver
// supports the current csiVolumeMode.
// In alpha it is assumed that drivers are used correctly without
// the additional sanity check.
// Check CSIDriver.Spec.Mode to ensure that the CSI driver
// supports the current volumeLifecycleMode.
if err := p.supportsVolumeLifecycleMode(driverName, volumeLifecycleMode); err != nil {
return nil, err
}
k8s := p.host.GetKubeClient()
if k8s == nil {
@ -394,17 +384,17 @@ func (p *csiPlugin) NewMounter(
}
mounter := &csiMountMgr{
plugin: p,
k8s: k8s,
spec: spec,
pod: pod,
podUID: pod.UID,
driverName: csiDriverName(driverName),
csiVolumeMode: csiVolumeMode,
volumeID: volumeHandle,
specVolumeID: spec.Name(),
readOnly: readOnly,
kubeVolHost: kvh,
plugin: p,
k8s: k8s,
spec: spec,
pod: pod,
podUID: pod.UID,
driverName: csiDriverName(driverName),
volumeLifecycleMode: volumeLifecycleMode,
volumeID: volumeHandle,
specVolumeID: spec.Name(),
readOnly: readOnly,
kubeVolHost: kvh,
}
mounter.csiClientGetter.driverName = csiDriverName(driverName)
@ -422,11 +412,11 @@ func (p *csiPlugin) NewMounter(
// persist volume info data for teardown
node := string(p.host.GetNodeName())
volData := map[string]string{
volDataKey.specVolID: spec.Name(),
volDataKey.volHandle: volumeHandle,
volDataKey.driverName: driverName,
volDataKey.nodeName: node,
volDataKey.csiVolumeMode: string(csiVolumeMode),
volDataKey.specVolID: spec.Name(),
volDataKey.volHandle: volumeHandle,
volDataKey.driverName: driverName,
volDataKey.nodeName: node,
volDataKey.volumeLifecycleMode: string(volumeLifecycleMode),
}
attachID := getAttachmentName(volumeHandle, driverName, node)
@ -486,11 +476,11 @@ func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.S
var spec *volume.Spec
inlineEnabled := utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume)
// If inlineEnabled is true and mode is ephemeralVolumeMode,
// If inlineEnabled is true and mode is VolumeLifecycleEphemeral,
// use constructVolSourceSpec to construct volume source spec.
// If inlineEnabled is false or mode is persistentVolumeMode,
// If inlineEnabled is false or mode is VolumeLifecyclePersistent,
// use constructPVSourceSpec to construct volume construct pv source spec.
if inlineEnabled && csiVolumeMode(volData[volDataKey.csiVolumeMode]) == ephemeralVolumeMode {
if inlineEnabled && storage.VolumeLifecycleMode(volData[volDataKey.volumeLifecycleMode]) == storage.VolumeLifecycleEphemeral {
spec = p.constructVolSourceSpec(volData[volDataKey.specVolID], volData[volDataKey.driverName])
return spec, nil
}
@ -565,12 +555,12 @@ func (p *csiPlugin) NewDetacher() (volume.Detacher, error) {
func (p *csiPlugin) CanAttach(spec *volume.Spec) (bool, error) {
inlineEnabled := utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume)
if inlineEnabled {
csiVolumeMode, err := p.getCSIVolumeMode(spec)
volumeLifecycleMode, err := p.getVolumeLifecycleMode(spec)
if err != nil {
return false, err
}
if csiVolumeMode == ephemeralVolumeMode {
if volumeLifecycleMode == storage.VolumeLifecycleEphemeral {
klog.V(5).Info(log("plugin.CanAttach = false, ephemeral mode detected for spec %v", spec.Name()))
return false, nil
}
@ -599,12 +589,12 @@ func (p *csiPlugin) CanDeviceMount(spec *volume.Spec) (bool, error) {
return true, nil
}
csiVolumeMode, err := p.getCSIVolumeMode(spec)
volumeLifecycleMode, err := p.getVolumeLifecycleMode(spec)
if err != nil {
return false, err
}
if csiVolumeMode == ephemeralVolumeMode {
if volumeLifecycleMode == storage.VolumeLifecycleEphemeral {
klog.V(5).Info(log("plugin.CanDeviceMount skipped ephemeral mode detected for spec %v", spec.Name()))
return false, nil
}
@ -775,11 +765,67 @@ func (p *csiPlugin) skipAttach(driver string) (bool, error) {
return false, nil
}
// getCSIVolumeMode returns the mode for the specified spec: {persistent|ephemeral}.
// supportsVolumeMode checks whether the CSI driver supports a volume in the given mode.
// An error indicates that it isn't supported and explains why.
func (p *csiPlugin) supportsVolumeLifecycleMode(driver string, volumeMode storage.VolumeLifecycleMode) error {
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) {
// Feature disabled, therefore only "persistent" volumes are supported.
if volumeMode != storage.VolumeLifecyclePersistent {
return fmt.Errorf("CSIInlineVolume feature not enabled, %q volumes not supported", volumeMode)
}
return nil
}
// Retrieve CSIDriver. It's not an error if that isn't
// possible (we don't have the lister if CSIDriverRegistry is
// disabled) or the driver isn't found (CSIDriver is
// optional), but then only persistent volumes are supported.
var csiDriver *storage.CSIDriver
if p.csiDriverLister != nil {
kletHost, ok := p.host.(volume.KubeletVolumeHost)
if ok {
kletHost.WaitForCacheSync()
}
c, err := p.csiDriverLister.Get(driver)
if err != nil && !apierrs.IsNotFound(err) {
// Some internal error.
return err
}
csiDriver = c
}
// The right response depends on whether we have information
// about the driver and the volume mode.
switch {
case csiDriver == nil && volumeMode == storage.VolumeLifecyclePersistent:
// No information, but that's okay for persistent volumes (and only those).
return nil
case csiDriver == nil:
return fmt.Errorf("volume mode %q not supported by driver %s (no CSIDriver object)", volumeMode, driver)
case containsVolumeMode(csiDriver.Spec.VolumeLifecycleModes, volumeMode):
// Explicitly listed.
return nil
default:
return fmt.Errorf("volume mode %q not supported by driver %s (only supports %q)", volumeMode, driver, csiDriver.Spec.VolumeLifecycleModes)
}
}
// containsVolumeMode checks whether the given volume mode is listed.
func containsVolumeMode(modes []storage.VolumeLifecycleMode, mode storage.VolumeLifecycleMode) bool {
for _, m := range modes {
if m == mode {
return true
}
}
return false
}
// getVolumeLifecycleMode returns the mode for the specified spec: {persistent|ephemeral}.
// 1) If mode cannot be determined, it will default to "persistent".
// 2) If Mode cannot be resolved to either {persistent | ephemeral}, an error is returned
// See https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/20190122-csi-inline-volumes.md
func (p *csiPlugin) getCSIVolumeMode(spec *volume.Spec) (csiVolumeMode, error) {
func (p *csiPlugin) getVolumeLifecycleMode(spec *volume.Spec) (storage.VolumeLifecycleMode, error) {
// 1) if volume.Spec.Volume.CSI != nil -> mode is ephemeral
// 2) if volume.Spec.PersistentVolume.Spec.CSI != nil -> persistent
volSrc, _, err := getSourceFromSpec(spec)
@ -788,9 +834,9 @@ func (p *csiPlugin) getCSIVolumeMode(spec *volume.Spec) (csiVolumeMode, error) {
}
if volSrc != nil && utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) {
return ephemeralVolumeMode, nil
return storage.VolumeLifecycleEphemeral, nil
}
return persistentVolumeMode, nil
return storage.VolumeLifecyclePersistent, nil
}
func (p *csiPlugin) getPublishContext(client clientset.Interface, handle, driver, nodeName string) (map[string]string, error) {

View File

@ -25,6 +25,7 @@ import (
"testing"
api "k8s.io/api/core/v1"
storagev1beta1 "k8s.io/api/storage/v1beta1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
@ -178,7 +179,12 @@ func TestPluginGetVolumeNameWithInline(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
plug, tmpDir := newTestPlugin(t, nil)
modes := []storagev1beta1.VolumeLifecycleMode{
storagev1beta1.VolumeLifecyclePersistent,
}
driver := getTestCSIDriver(testDriver, nil, nil, modes)
client := fakeclient.NewSimpleClientset(driver)
plug, tmpDir := newTestPlugin(t, client)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
@ -403,9 +409,6 @@ func TestPluginConstructVolumeSpecWithInline(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
plug, tmpDir := newTestPlugin(t, nil)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
originSpec *volume.Spec
@ -413,6 +416,7 @@ func TestPluginConstructVolumeSpecWithInline(t *testing.T) {
volHandle string
podUID types.UID
shouldFail bool
modes []storagev1beta1.VolumeLifecycleMode
}{
{
name: "construct spec1 from persistent spec",
@ -420,6 +424,7 @@ func TestPluginConstructVolumeSpecWithInline(t *testing.T) {
volHandle: "testvol-handle1",
originSpec: volume.NewSpecFromPersistentVolume(makeTestPV("test.vol.id", 20, testDriver, "testvol-handle1"), true),
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
modes: []storagev1beta1.VolumeLifecycleMode{storagev1beta1.VolumeLifecyclePersistent},
},
{
name: "construct spec2 from persistent spec",
@ -427,18 +432,38 @@ func TestPluginConstructVolumeSpecWithInline(t *testing.T) {
volHandle: "handle2",
originSpec: volume.NewSpecFromPersistentVolume(makeTestPV("spec2", 20, testDriver, "handle2"), true),
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
modes: []storagev1beta1.VolumeLifecycleMode{storagev1beta1.VolumeLifecyclePersistent},
},
{
name: "construct spec2 from persistent spec, missing mode",
specVolID: "spec2",
volHandle: "handle2",
originSpec: volume.NewSpecFromPersistentVolume(makeTestPV("spec2", 20, testDriver, "handle2"), true),
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
modes: []storagev1beta1.VolumeLifecycleMode{},
shouldFail: true,
},
{
name: "construct spec from volume spec",
specVolID: "volspec",
originSpec: volume.NewSpecFromVolume(makeTestVol("volspec", testDriver)),
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
modes: []storagev1beta1.VolumeLifecycleMode{storagev1beta1.VolumeLifecycleEphemeral},
},
{
name: "construct spec from volume spec2",
specVolID: "volspec2",
originSpec: volume.NewSpecFromVolume(makeTestVol("volspec2", testDriver)),
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
modes: []storagev1beta1.VolumeLifecycleMode{storagev1beta1.VolumeLifecycleEphemeral},
},
{
name: "construct spec from volume spec2, missing mode",
specVolID: "volspec2",
originSpec: volume.NewSpecFromVolume(makeTestVol("volspec2", testDriver)),
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
modes: []storagev1beta1.VolumeLifecycleMode{},
shouldFail: true,
},
{
name: "missing spec",
@ -451,6 +476,11 @@ func TestPluginConstructVolumeSpecWithInline(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
driver := getTestCSIDriver(testDriver, nil, nil, tc.modes)
client := fakeclient.NewSimpleClientset(driver)
plug, tmpDir := newTestPlugin(t, client)
defer os.RemoveAll(tmpDir)
mounter, err := plug.NewMounter(
tc.originSpec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}},
@ -520,27 +550,27 @@ func TestPluginNewMounter(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
tests := []struct {
name string
spec *volume.Spec
podUID types.UID
namespace string
csiVolumeMode csiVolumeMode
shouldFail bool
name string
spec *volume.Spec
podUID types.UID
namespace string
volumeLifecycleMode storagev1beta1.VolumeLifecycleMode
shouldFail bool
}{
{
name: "mounter from persistent volume source",
spec: volume.NewSpecFromPersistentVolume(makeTestPV("test-pv1", 20, testDriver, testVol), true),
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
namespace: "test-ns1",
csiVolumeMode: persistentVolumeMode,
name: "mounter from persistent volume source",
spec: volume.NewSpecFromPersistentVolume(makeTestPV("test-pv1", 20, testDriver, testVol), true),
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
namespace: "test-ns1",
volumeLifecycleMode: storagev1beta1.VolumeLifecyclePersistent,
},
{
name: "mounter from volume source",
spec: volume.NewSpecFromVolume(makeTestVol("test-vol1", testDriver)),
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
namespace: "test-ns2",
csiVolumeMode: ephemeralVolumeMode,
shouldFail: true, // csi inline not enabled
name: "mounter from volume source",
spec: volume.NewSpecFromVolume(makeTestVol("test-vol1", testDriver)),
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
namespace: "test-ns2",
volumeLifecycleMode: storagev1beta1.VolumeLifecycleEphemeral,
shouldFail: true, // csi inline not enabled
},
{
name: "mounter from no spec provided",
@ -589,8 +619,8 @@ func TestPluginNewMounter(t *testing.T) {
if csiClient == nil {
t.Error("mounter csiClient is nil")
}
if csiMounter.csiVolumeMode != test.csiVolumeMode {
t.Error("unexpected driver mode:", csiMounter.csiVolumeMode)
if csiMounter.volumeLifecycleMode != test.volumeLifecycleMode {
t.Error("unexpected driver mode:", csiMounter.volumeLifecycleMode)
}
// ensure data file is created
@ -619,8 +649,8 @@ func TestPluginNewMounter(t *testing.T) {
if data[volDataKey.nodeName] != string(csiMounter.plugin.host.GetNodeName()) {
t.Error("volume data file unexpected nodeName:", data[volDataKey.nodeName])
}
if data[volDataKey.csiVolumeMode] != string(test.csiVolumeMode) {
t.Error("volume data file unexpected csiVolumeMode:", data[volDataKey.csiVolumeMode])
if data[volDataKey.volumeLifecycleMode] != string(test.volumeLifecycleMode) {
t.Error("volume data file unexpected volumeLifecycleMode:", data[volDataKey.volumeLifecycleMode])
}
})
}
@ -629,13 +659,23 @@ func TestPluginNewMounter(t *testing.T) {
func TestPluginNewMounterWithInline(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
bothModes := []storagev1beta1.VolumeLifecycleMode{
storagev1beta1.VolumeLifecycleEphemeral,
storagev1beta1.VolumeLifecyclePersistent,
}
persistentMode := []storagev1beta1.VolumeLifecycleMode{
storagev1beta1.VolumeLifecyclePersistent,
}
ephemeralMode := []storagev1beta1.VolumeLifecycleMode{
storagev1beta1.VolumeLifecycleEphemeral,
}
tests := []struct {
name string
spec *volume.Spec
podUID types.UID
namespace string
csiVolumeMode csiVolumeMode
shouldFail bool
name string
spec *volume.Spec
podUID types.UID
namespace string
volumeLifecycleMode storagev1beta1.VolumeLifecycleMode
shouldFail bool
}{
{
name: "mounter with missing spec",
@ -651,97 +691,119 @@ func TestPluginNewMounterWithInline(t *testing.T) {
shouldFail: true,
},
{
name: "mounter with persistent volume source",
spec: volume.NewSpecFromPersistentVolume(makeTestPV("test-pv1", 20, testDriver, testVol), true),
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
namespace: "test-ns1",
csiVolumeMode: persistentVolumeMode,
name: "mounter with persistent volume source",
spec: volume.NewSpecFromPersistentVolume(makeTestPV("test-pv1", 20, testDriver, testVol), true),
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
namespace: "test-ns1",
volumeLifecycleMode: storagev1beta1.VolumeLifecyclePersistent,
},
{
name: "mounter with volume source",
spec: volume.NewSpecFromVolume(makeTestVol("test-vol1", testDriver)),
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
namespace: "test-ns2",
csiVolumeMode: ephemeralVolumeMode,
name: "mounter with volume source",
spec: volume.NewSpecFromVolume(makeTestVol("test-vol1", testDriver)),
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
namespace: "test-ns2",
volumeLifecycleMode: storagev1beta1.VolumeLifecycleEphemeral,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
plug, tmpDir := newTestPlugin(t, nil)
defer os.RemoveAll(tmpDir)
runAll := func(t *testing.T, supported []storagev1beta1.VolumeLifecycleMode) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
driver := getTestCSIDriver(testDriver, nil, nil, supported)
fakeClient := fakeclient.NewSimpleClientset(driver)
plug, tmpDir := newTestPlugin(t, fakeClient)
defer os.RemoveAll(tmpDir)
registerFakePlugin(testDriver, "endpoint", []string{"1.2.0"}, t)
mounter, err := plug.NewMounter(
test.spec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: test.podUID, Namespace: test.namespace}},
volume.VolumeOptions{},
)
if test.shouldFail != (err != nil) {
t.Fatal("Unexpected error:", err)
}
if test.shouldFail && err != nil {
t.Log(err)
return
}
registerFakePlugin(testDriver, "endpoint", []string{"1.2.0"}, t)
if mounter == nil {
t.Fatal("failed to create CSI mounter")
}
csiMounter := mounter.(*csiMountMgr)
mounter, err := plug.NewMounter(
test.spec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: test.podUID, Namespace: test.namespace}},
volume.VolumeOptions{},
)
// validate mounter fields
if string(csiMounter.driverName) != testDriver {
t.Error("mounter driver name not set")
}
if csiMounter.volumeID == "" {
t.Error("mounter volume id not set")
}
if csiMounter.pod == nil {
t.Error("mounter pod not set")
}
if string(csiMounter.podUID) != string(test.podUID) {
t.Error("mounter podUID not set")
}
csiClient, err := csiMounter.csiClientGetter.Get()
if csiClient == nil {
t.Error("mounter csiClient is nil")
}
if csiMounter.csiVolumeMode != test.csiVolumeMode {
t.Error("unexpected driver mode:", csiMounter.csiVolumeMode)
}
// Some test cases are meant to fail because their input data is broken.
shouldFail := test.shouldFail
// Others fail if the driver does not support the volume mode.
if !containsVolumeMode(supported, test.volumeLifecycleMode) {
shouldFail = true
}
if shouldFail != (err != nil) {
t.Fatal("Unexpected error:", err)
}
if shouldFail && err != nil {
t.Log(err)
return
}
// ensure data file is created
dataDir := path.Dir(mounter.GetPath())
dataFile := filepath.Join(dataDir, volDataFileName)
if _, err := os.Stat(dataFile); err != nil {
if os.IsNotExist(err) {
t.Errorf("data file not created %s", dataFile)
} else {
if mounter == nil {
t.Fatal("failed to create CSI mounter")
}
csiMounter := mounter.(*csiMountMgr)
// validate mounter fields
if string(csiMounter.driverName) != testDriver {
t.Error("mounter driver name not set")
}
if csiMounter.volumeID == "" {
t.Error("mounter volume id not set")
}
if csiMounter.pod == nil {
t.Error("mounter pod not set")
}
if string(csiMounter.podUID) != string(test.podUID) {
t.Error("mounter podUID not set")
}
csiClient, err := csiMounter.csiClientGetter.Get()
if csiClient == nil {
t.Error("mounter csiClient is nil")
}
if csiMounter.volumeLifecycleMode != test.volumeLifecycleMode {
t.Error("unexpected driver mode:", csiMounter.volumeLifecycleMode)
}
// ensure data file is created
dataDir := path.Dir(mounter.GetPath())
dataFile := filepath.Join(dataDir, volDataFileName)
if _, err := os.Stat(dataFile); err != nil {
if os.IsNotExist(err) {
t.Errorf("data file not created %s", dataFile)
} else {
t.Fatal(err)
}
}
data, err := loadVolumeData(dataDir, volDataFileName)
if err != nil {
t.Fatal(err)
}
}
data, err := loadVolumeData(dataDir, volDataFileName)
if err != nil {
t.Fatal(err)
}
if data[volDataKey.specVolID] != csiMounter.spec.Name() {
t.Error("volume data file unexpected specVolID:", data[volDataKey.specVolID])
}
if data[volDataKey.volHandle] != csiMounter.volumeID {
t.Error("volume data file unexpected volHandle:", data[volDataKey.volHandle])
}
if data[volDataKey.driverName] != string(csiMounter.driverName) {
t.Error("volume data file unexpected driverName:", data[volDataKey.driverName])
}
if data[volDataKey.nodeName] != string(csiMounter.plugin.host.GetNodeName()) {
t.Error("volume data file unexpected nodeName:", data[volDataKey.nodeName])
}
if data[volDataKey.csiVolumeMode] != string(csiMounter.csiVolumeMode) {
t.Error("volume data file unexpected csiVolumeMode:", data[volDataKey.csiVolumeMode])
}
})
if data[volDataKey.specVolID] != csiMounter.spec.Name() {
t.Error("volume data file unexpected specVolID:", data[volDataKey.specVolID])
}
if data[volDataKey.volHandle] != csiMounter.volumeID {
t.Error("volume data file unexpected volHandle:", data[volDataKey.volHandle])
}
if data[volDataKey.driverName] != string(csiMounter.driverName) {
t.Error("volume data file unexpected driverName:", data[volDataKey.driverName])
}
if data[volDataKey.nodeName] != string(csiMounter.plugin.host.GetNodeName()) {
t.Error("volume data file unexpected nodeName:", data[volDataKey.nodeName])
}
if data[volDataKey.volumeLifecycleMode] != string(csiMounter.volumeLifecycleMode) {
t.Error("volume data file unexpected volumeLifecycleMode:", data[volDataKey.volumeLifecycleMode])
}
})
}
}
t.Run("both supported", func(t *testing.T) {
runAll(t, bothModes)
})
t.Run("persistent supported", func(t *testing.T) {
runAll(t, persistentMode)
})
t.Run("ephemeral supported", func(t *testing.T) {
runAll(t, ephemeralMode)
})
}
func TestPluginNewUnmounter(t *testing.T) {
@ -871,8 +933,8 @@ func TestPluginCanAttach(t *testing.T) {
}
for _, test := range tests {
csiDriver := getTestCSIDriver(test.driverName, nil, &test.canAttach)
t.Run(test.name, func(t *testing.T) {
csiDriver := getTestCSIDriver(test.driverName, nil, &test.canAttach, nil)
fakeCSIClient := fakeclient.NewSimpleClientset(csiDriver)
plug, tmpDir := newTestPlugin(t, fakeCSIClient)
defer os.RemoveAll(tmpDir)
@ -932,7 +994,7 @@ func TestPluginFindAttachablePlugin(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
client := fakeclient.NewSimpleClientset(getTestCSIDriver(test.driverName, nil, &test.canAttach))
client := fakeclient.NewSimpleClientset(getTestCSIDriver(test.driverName, nil, &test.canAttach, nil))
factory := informers.NewSharedInformerFactory(client, CsiResyncPeriod)
host := volumetest.NewFakeVolumeHostWithCSINodeName(
tmpDir,

View File

@ -27,7 +27,10 @@ import (
api "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
storagebeta1 "k8s.io/api/storage/v1beta1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
@ -45,6 +48,7 @@ import (
// based on operations from the volume manager/reconciler/operation executor
func TestCSI_VolumeAll(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
tests := []struct {
name string
specName string
@ -54,6 +58,7 @@ func TestCSI_VolumeAll(t *testing.T) {
podFunc func() *api.Pod
isInline bool
shouldFail bool
driverSpec *storagebeta1.CSIDriverSpec
}{
{
name: "PersistentVolume",
@ -69,7 +74,42 @@ func TestCSI_VolumeAll(t *testing.T) {
},
},
{
name: "ephermeral inline",
name: "PersistentVolume with driver info",
specName: "pv2",
driver: "simple-driver",
volName: "vol2",
specFunc: func(specName, driver, volName string) *volume.Spec {
return volume.NewSpecFromPersistentVolume(makeTestPV(specName, 20, driver, volName), false)
},
podFunc: func() *api.Pod {
podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64()))
return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}}
},
driverSpec: &storagebeta1.CSIDriverSpec{
// Required for the driver to be accepted for the persistent volume.
VolumeLifecycleModes: []storagebeta1.VolumeLifecycleMode{storagebeta1.VolumeLifecyclePersistent},
},
},
{
name: "PersistentVolume with wrong mode in driver info",
specName: "pv2",
driver: "simple-driver",
volName: "vol2",
specFunc: func(specName, driver, volName string) *volume.Spec {
return volume.NewSpecFromPersistentVolume(makeTestPV(specName, 20, driver, volName), false)
},
podFunc: func() *api.Pod {
podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64()))
return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}}
},
driverSpec: &storagebeta1.CSIDriverSpec{
// This will cause the volume to be rejected.
VolumeLifecycleModes: []storagebeta1.VolumeLifecycleMode{storagebeta1.VolumeLifecycleEphemeral},
},
shouldFail: true,
},
{
name: "ephemeral inline supported",
driver: "inline-driver-1",
volName: "test.vol2",
specFunc: func(specName, driver, volName string) *volume.Spec {
@ -80,6 +120,75 @@ func TestCSI_VolumeAll(t *testing.T) {
return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}}
},
isInline: true,
driverSpec: &storagebeta1.CSIDriverSpec{
// Required for the driver to be accepted for the inline volume.
VolumeLifecycleModes: []storagebeta1.VolumeLifecycleMode{storagebeta1.VolumeLifecycleEphemeral},
},
},
{
name: "ephemeral inline also supported",
driver: "inline-driver-1",
volName: "test.vol2",
specFunc: func(specName, driver, volName string) *volume.Spec {
return volume.NewSpecFromVolume(makeTestVol(specName, driver))
},
podFunc: func() *api.Pod {
podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64()))
return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}}
},
isInline: true,
driverSpec: &storagebeta1.CSIDriverSpec{
// Required for the driver to be accepted for the inline volume.
VolumeLifecycleModes: []storagebeta1.VolumeLifecycleMode{storagebeta1.VolumeLifecyclePersistent, storagebeta1.VolumeLifecycleEphemeral},
},
},
{
name: "ephemeral inline without CSIDriver info",
driver: "inline-driver-2",
volName: "test.vol3",
specFunc: func(specName, driver, volName string) *volume.Spec {
return volume.NewSpecFromVolume(makeTestVol(specName, driver))
},
podFunc: func() *api.Pod {
podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64()))
return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}}
},
isInline: true,
},
{
name: "ephemeral inline with driver that has no mode",
driver: "inline-driver-3",
volName: "test.vol4",
specFunc: func(specName, driver, volName string) *volume.Spec {
return volume.NewSpecFromVolume(makeTestVol(specName, driver))
},
podFunc: func() *api.Pod {
podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64()))
return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}}
},
isInline: true,
driverSpec: &storagebeta1.CSIDriverSpec{
// This means the driver *cannot* handle the inline volume because
// the default is "persistent".
VolumeLifecycleModes: nil,
},
},
{
name: "ephemeral inline with driver that has wrong mode",
driver: "inline-driver-3",
volName: "test.vol4",
specFunc: func(specName, driver, volName string) *volume.Spec {
return volume.NewSpecFromVolume(makeTestVol(specName, driver))
},
podFunc: func() *api.Pod {
podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64()))
return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}}
},
isInline: true,
driverSpec: &storagebeta1.CSIDriverSpec{
// This means the driver *cannot* handle the inline volume.
VolumeLifecycleModes: []storagebeta1.VolumeLifecycleMode{storagebeta1.VolumeLifecyclePersistent},
},
},
{
name: "missing spec",
@ -118,10 +227,27 @@ func TestCSI_VolumeAll(t *testing.T) {
t.Fatalf("can't create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
client := fakeclient.NewSimpleClientset()
var driverInfo *storagebeta1.CSIDriver
objs := []runtime.Object{}
if test.driverSpec != nil {
driverInfo = &storagebeta1.CSIDriver{
ObjectMeta: metav1.ObjectMeta{
Name: test.driver,
},
Spec: *test.driverSpec,
}
objs = append(objs, driverInfo)
}
client := fakeclient.NewSimpleClientset(objs...)
fakeWatcher := watch.NewRaceFreeFake()
factory := informers.NewSharedInformerFactory(client, CsiResyncPeriod)
factory := informers.NewSharedInformerFactory(client, time.Hour /* disable resync */)
csiDriverInformer := factory.Storage().V1beta1().CSIDrivers()
if driverInfo != nil {
csiDriverInformer.Informer().GetStore().Add(driverInfo)
}
factory.Start(wait.NeverStop)
host := volumetest.NewFakeVolumeHostWithCSINodeName(
@ -129,7 +255,7 @@ func TestCSI_VolumeAll(t *testing.T) {
client,
nil,
"csi-node",
factory.Storage().V1beta1().CSIDrivers().Lister(),
csiDriverInformer.Lister(),
)
plugMgr := &volume.VolumePluginMgr{}
@ -253,6 +379,22 @@ func TestCSI_VolumeAll(t *testing.T) {
}
mounter, err := volPlug.NewMounter(volSpec, pod, volume.VolumeOptions{})
if test.isInline && (test.driverSpec == nil || !containsVolumeMode(test.driverSpec.VolumeLifecycleModes, storagebeta1.VolumeLifecycleEphemeral)) {
// This *must* fail because a CSIDriver.Spec.VolumeLifecycleModes entry "ephemeral"
// is required.
if err == nil || mounter != nil {
t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter should have failed for inline volume due to lack of support for inline volumes, got: %+v, %s", mounter, err)
}
return
}
if !test.isInline && test.driverSpec != nil && !containsVolumeMode(test.driverSpec.VolumeLifecycleModes, storagebeta1.VolumeLifecyclePersistent) {
// This *must* fail because a CSIDriver.Spec.VolumeLifecycleModes entry "persistent"
// is required when a driver object is available.
if err == nil || mounter != nil {
t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter should have failed for persistent volume due to lack of support for persistent volumes, got: %+v, %s", mounter, err)
}
return
}
if err != nil || mounter == nil {
t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter is nil or error: %s", err)
}

View File

@ -84,14 +84,15 @@ func makeTestVol(name string, driverName string) *api.Volume {
}
}
func getTestCSIDriver(name string, podInfoMount *bool, attachable *bool) *storagev1beta1.CSIDriver {
func getTestCSIDriver(name string, podInfoMount *bool, attachable *bool, volumeLifecycleModes []storagev1beta1.VolumeLifecycleMode) *storagev1beta1.CSIDriver {
return &storagev1beta1.CSIDriver{
ObjectMeta: meta.ObjectMeta{
Name: name,
},
Spec: storagev1beta1.CSIDriverSpec{
PodInfoOnMount: podInfoMount,
AttachRequired: attachable,
PodInfoOnMount: podInfoMount,
AttachRequired: attachable,
VolumeLifecycleModes: volumeLifecycleModes,
},
}
}

View File

@ -43,6 +43,7 @@ import (
"github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
storagev1beta1 "k8s.io/api/storage/v1beta1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework"
@ -195,7 +196,6 @@ var _ testsuites.DynamicPVTestDriver = &mockCSIDriver{}
// InitMockCSIDriver returns a mockCSIDriver that implements TestDriver interface
func InitMockCSIDriver(driverOpts CSIMockDriverOpts) testsuites.TestDriver {
driverManifests := []string{
"test/e2e/testing-manifests/storage-csi/cluster-driver-registrar/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
@ -206,7 +206,7 @@ func InitMockCSIDriver(driverOpts CSIMockDriverOpts) testsuites.TestDriver {
}
if driverOpts.RegisterDriver {
driverManifests = append(driverManifests, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-cluster-driver-registrar.yaml")
driverManifests = append(driverManifests, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driverinfo.yaml")
}
if !driverOpts.DisableAttach {
@ -288,14 +288,18 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest
}
o := utils.PatchCSIOptions{
OldDriverName: "csi-mock",
NewDriverName: "csi-mock-" + f.UniqueName,
DriverContainerName: "mock",
DriverContainerArguments: containerArgs,
ProvisionerContainerName: "csi-provisioner",
ClusterRegistrarContainerName: "csi-cluster-driver-registrar",
NodeName: config.ClientNodeName,
PodInfo: m.podInfo,
OldDriverName: "csi-mock",
NewDriverName: "csi-mock-" + f.UniqueName,
DriverContainerName: "mock",
DriverContainerArguments: containerArgs,
ProvisionerContainerName: "csi-provisioner",
NodeName: config.ClientNodeName,
PodInfo: m.podInfo,
CanAttach: &m.attachable,
VolumeLifecycleModes: []storagev1beta1.VolumeLifecycleMode{
storagev1beta1.VolumeLifecyclePersistent,
storagev1beta1.VolumeLifecycleEphemeral,
},
}
cleanup, err := f.CreateFromManifests(func(item interface{}) error {
return utils.PatchCSIDeployment(f, o, item)

View File

@ -114,7 +114,9 @@ type EphemeralTestDriver interface {
// GetCSIDriverName returns the name that was used when registering with
// kubelet. Depending on how the driver was deployed, this can be different
// from DriverInfo.Name.
// from DriverInfo.Name. Starting with Kubernetes 1.16, there must also
// be a CSIDriver object under the same name with a "mode" field that enables
// usage of the driver for ephemeral inline volumes.
GetCSIDriverName(config *PerTestConfig) string
}

View File

@ -18,12 +18,12 @@ package utils
import (
"path"
"strconv"
"strings"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
storagev1beta1 "k8s.io/api/storage/v1beta1"
"k8s.io/kubernetes/test/e2e/framework"
)
@ -94,10 +94,6 @@ func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interf
// Driver name is expected to be the same
// as the snapshotter here.
container.Args = append(container.Args, "--snapshotter="+o.NewDriverName)
case o.ClusterRegistrarContainerName:
if o.PodInfo != nil {
container.Args = append(container.Args, "--pod-info-mount="+strconv.FormatBool(*o.PodInfo))
}
}
}
}
@ -125,6 +121,17 @@ func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interf
// as the provisioner name here.
object.Provisioner = o.NewDriverName
}
case *storagev1beta1.CSIDriver:
if o.NewDriverName != "" {
object.Name = o.NewDriverName
}
if o.PodInfo != nil {
object.Spec.PodInfoOnMount = o.PodInfo
}
if o.CanAttach != nil {
object.Spec.AttachRequired = o.CanAttach
}
object.Spec.VolumeLifecycleModes = o.VolumeLifecycleModes
}
return nil
@ -154,12 +161,18 @@ type PatchCSIOptions struct {
// If non-empty, --snapshotter with new name will be appended
// to the argument list.
SnapshotterContainerName string
// The name of the container which has the cluster-driver-registrar
// binary.
ClusterRegistrarContainerName string
// If non-empty, all pods are forced to run on this node.
NodeName string
// If not nil, the argument to pass to the cluster-driver-registrar's
// pod-info-mount argument.
// If not nil, the value to use for the CSIDriver.Spec.PodInfo
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
PodInfo *bool
// If not nil, the value to use for the CSIDriver.Spec.CanAttach
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
CanAttach *bool
// The value to use for the CSIDriver.Spec.VolumeLifecycleModes
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
VolumeLifecycleModes []storagev1beta1.VolumeLifecycleMode
}

View File

@ -1 +0,0 @@
The original file is (or will be) https://github.com/kubernetes-csi/cluster-driver-registrar/blob/master/deploy/kubernetes/rbac.yaml

View File

@ -1,38 +0,0 @@
# This YAML file contains all RBAC objects that are necessary to run
# cluster-driver-registrar.
#
# In production, each CSI driver deployment has to be customized:
# - to avoid conflicts, use non-default namespace and different names
# for non-namespaced entities like the ClusterRole
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-cluster-driver-registrar
# replace with non-default namespace name
namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cluster-driver-registrar-runner
rules:
- apiGroups: ["storage.k8s.io"]
resources: ["csidrivers"]
verbs: ["create", "delete"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-cluster-driver-registrar-role
subjects:
- kind: ServiceAccount
name: csi-cluster-driver-registrar
# replace with non-default namespace name
namespace: default
roleRef:
kind: ClusterRole
name: cluster-driver-registrar-runner
apiGroup: rbac.authorization.k8s.io

View File

@ -1,36 +0,0 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-mockplugin-cluster-driver-registrar
spec:
selector:
matchLabels:
app: csi-mockplugin-cluster-driver-registrar
replicas: 1
template:
metadata:
labels:
app: csi-mockplugin-cluster-driver-registrar
spec:
serviceAccountName: csi-mock
containers:
- name: csi-cluster-driver-registrar
# TODO: replace with official image
image: quay.io/k8scsi/csi-cluster-driver-registrar:tmp-test-1.14
args:
- --v=5
- --csi-address=$(ADDRESS)
env:
- name: ADDRESS
value: /csi/csi.sock
imagePullPolicy: Always
securityContext:
privileged: true
volumeMounts:
- mountPath: /csi
name: socket-dir
volumes:
- hostPath:
path: /var/lib/kubelet/plugins/csi-mock
type: DirectoryOrCreate
name: socket-dir

View File

@ -18,7 +18,6 @@ spec:
# TODO: replace with official 1.4.0 release when ready
image: quay.io/k8scsi/csi-provisioner:v1.4.0-rc1
args:
- "--provisioner=csi-hostpath"
- "--csi-address=$(ADDRESS)"
- "--connection-timeout=15s"
env:
@ -51,6 +50,7 @@ spec:
- name: mock
image: quay.io/k8scsi/mock-driver:v2.1.0
args:
- "--name=mock.storage.k8s.io"
- "--permissive-target-path" # because of https://github.com/kubernetes/kubernetes/issues/75535
env:
- name: CSI_ENDPOINT

View File

@ -0,0 +1,7 @@
apiVersion: storage.k8s.io/v1beta1
kind: CSIDriver
metadata:
name: mock.storage.k8s.io
# Intentionally no spec. All values in the
# spec will be inserted dynamically by PatchCSIDeployment()
# in test/e2e/storage/utils/deployment.go.