mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 17:30:00 +00:00
storage: check CSIDriver.Spec.VolumeLifecycleModes
Using a "normal" CSI driver for an inline ephemeral volume may have unexpected and potentially harmful effects when the driver gets a NodePublishVolume call that it isn't expecting. To prevent that mistake, driver deployments for a driver that supports such volumes must: - deploy a CSIDriver object for the driver - set CSIDriver.Spec.VolumeLifecycleModes such that it contains "ephemeral" The default for that field is "persistent", so existing deployments continue to work and are automatically protected against incorrect usage. For the E2E tests we need a way to specify the driver mode. The existing cluster-driver-registrar doesn't support that and also was deprecated, so we stop using it altogether and instead deploy and patch a CSIDriver object.
This commit is contained in:
parent
b60f08e24b
commit
7bbc06fcd4
@ -342,9 +342,9 @@ func TestAttacherWithCSIDriver(t *testing.T) {
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
fakeClient := fakeclient.NewSimpleClientset(
|
fakeClient := fakeclient.NewSimpleClientset(
|
||||||
getTestCSIDriver("not-attachable", nil, &bFalse),
|
getTestCSIDriver("not-attachable", nil, &bFalse, nil),
|
||||||
getTestCSIDriver("attachable", nil, &bTrue),
|
getTestCSIDriver("attachable", nil, &bTrue, nil),
|
||||||
getTestCSIDriver("nil", nil, nil),
|
getTestCSIDriver("nil", nil, nil, nil),
|
||||||
)
|
)
|
||||||
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t, fakeClient)
|
plug, fakeWatcher, tmpDir, _ := newTestWatchPlugin(t, fakeClient)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
@ -430,9 +430,9 @@ func TestAttacherWaitForVolumeAttachmentWithCSIDriver(t *testing.T) {
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
fakeClient := fakeclient.NewSimpleClientset(
|
fakeClient := fakeclient.NewSimpleClientset(
|
||||||
getTestCSIDriver("not-attachable", nil, &bFalse),
|
getTestCSIDriver("not-attachable", nil, &bFalse, nil),
|
||||||
getTestCSIDriver("attachable", nil, &bTrue),
|
getTestCSIDriver("attachable", nil, &bTrue, nil),
|
||||||
getTestCSIDriver("nil", nil, nil),
|
getTestCSIDriver("nil", nil, nil, nil),
|
||||||
)
|
)
|
||||||
plug, tmpDir := newTestPlugin(t, fakeClient)
|
plug, tmpDir := newTestPlugin(t, fakeClient)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
|
|
||||||
api "k8s.io/api/core/v1"
|
api "k8s.io/api/core/v1"
|
||||||
|
storage "k8s.io/api/storage/v1beta1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
@ -46,14 +47,14 @@ var (
|
|||||||
driverName,
|
driverName,
|
||||||
nodeName,
|
nodeName,
|
||||||
attachmentID,
|
attachmentID,
|
||||||
csiVolumeMode string
|
volumeLifecycleMode string
|
||||||
}{
|
}{
|
||||||
"specVolID",
|
"specVolID",
|
||||||
"volumeHandle",
|
"volumeHandle",
|
||||||
"driverName",
|
"driverName",
|
||||||
"nodeName",
|
"nodeName",
|
||||||
"attachmentID",
|
"attachmentID",
|
||||||
"csiVolumeMode",
|
"volumeLifecycleMode",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -62,7 +63,7 @@ type csiMountMgr struct {
|
|||||||
k8s kubernetes.Interface
|
k8s kubernetes.Interface
|
||||||
plugin *csiPlugin
|
plugin *csiPlugin
|
||||||
driverName csiDriverName
|
driverName csiDriverName
|
||||||
csiVolumeMode csiVolumeMode
|
volumeLifecycleMode storage.VolumeLifecycleMode
|
||||||
volumeID string
|
volumeID string
|
||||||
specVolumeID string
|
specVolumeID string
|
||||||
readOnly bool
|
readOnly bool
|
||||||
@ -145,8 +146,8 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
|
|||||||
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) {
|
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) {
|
||||||
return fmt.Errorf("CSIInlineVolume feature required")
|
return fmt.Errorf("CSIInlineVolume feature required")
|
||||||
}
|
}
|
||||||
if c.csiVolumeMode != ephemeralVolumeMode {
|
if c.volumeLifecycleMode != storage.VolumeLifecycleEphemeral {
|
||||||
return fmt.Errorf("unexpected volume mode: %s", c.csiVolumeMode)
|
return fmt.Errorf("unexpected volume mode: %s", c.volumeLifecycleMode)
|
||||||
}
|
}
|
||||||
if volSrc.FSType != nil {
|
if volSrc.FSType != nil {
|
||||||
fsType = *volSrc.FSType
|
fsType = *volSrc.FSType
|
||||||
@ -160,8 +161,8 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
|
|||||||
secretRef = &api.SecretReference{Name: secretName, Namespace: ns}
|
secretRef = &api.SecretReference{Name: secretName, Namespace: ns}
|
||||||
}
|
}
|
||||||
case pvSrc != nil:
|
case pvSrc != nil:
|
||||||
if c.csiVolumeMode != persistentVolumeMode {
|
if c.volumeLifecycleMode != storage.VolumeLifecyclePersistent {
|
||||||
return fmt.Errorf("unexpected driver mode: %s", c.csiVolumeMode)
|
return fmt.Errorf("unexpected driver mode: %s", c.volumeLifecycleMode)
|
||||||
}
|
}
|
||||||
|
|
||||||
fsType = pvSrc.FSType
|
fsType = pvSrc.FSType
|
||||||
@ -319,7 +320,7 @@ func (c *csiMountMgr) podAttributes() (map[string]string, error) {
|
|||||||
"csi.storage.k8s.io/serviceAccount.name": c.pod.Spec.ServiceAccountName,
|
"csi.storage.k8s.io/serviceAccount.name": c.pod.Spec.ServiceAccountName,
|
||||||
}
|
}
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) {
|
if utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) {
|
||||||
attrs["csi.storage.k8s.io/ephemeral"] = strconv.FormatBool(c.csiVolumeMode == ephemeralVolumeMode)
|
attrs["csi.storage.k8s.io/ephemeral"] = strconv.FormatBool(c.volumeLifecycleMode == storage.VolumeLifecycleEphemeral)
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(4).Infof(log("CSIDriver %q requires pod information", c.driverName))
|
klog.V(4).Infof(log("CSIDriver %q requires pod information", c.driverName))
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
|
|
||||||
api "k8s.io/api/core/v1"
|
api "k8s.io/api/core/v1"
|
||||||
storage "k8s.io/api/storage/v1"
|
storage "k8s.io/api/storage/v1"
|
||||||
|
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
@ -151,13 +152,16 @@ func MounterSetUpTests(t *testing.T, podInfoEnabled bool) {
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
klog.Infof("Starting test %s", test.name)
|
klog.Infof("Starting test %s", test.name)
|
||||||
|
// Modes must be set if (and only if) CSIInlineVolume is enabled.
|
||||||
|
var modes []storagev1beta1.VolumeLifecycleMode
|
||||||
if test.csiInlineVolume {
|
if test.csiInlineVolume {
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
||||||
|
modes = append(modes, storagev1beta1.VolumeLifecyclePersistent)
|
||||||
}
|
}
|
||||||
fakeClient := fakeclient.NewSimpleClientset(
|
fakeClient := fakeclient.NewSimpleClientset(
|
||||||
getTestCSIDriver("no-info", &noPodMountInfo, nil),
|
getTestCSIDriver("no-info", &noPodMountInfo, nil, modes),
|
||||||
getTestCSIDriver("info", ¤tPodInfoMount, nil),
|
getTestCSIDriver("info", ¤tPodInfoMount, nil, modes),
|
||||||
getTestCSIDriver("nil", nil, nil),
|
getTestCSIDriver("nil", nil, nil, modes),
|
||||||
)
|
)
|
||||||
plug, tmpDir := newTestPlugin(t, fakeClient)
|
plug, tmpDir := newTestPlugin(t, fakeClient)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
@ -278,16 +282,16 @@ func TestMounterSetUpSimple(t *testing.T) {
|
|||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
podUID types.UID
|
podUID types.UID
|
||||||
mode csiVolumeMode
|
mode storagev1beta1.VolumeLifecycleMode
|
||||||
fsType string
|
fsType string
|
||||||
options []string
|
options []string
|
||||||
spec func(string, []string) *volume.Spec
|
spec func(string, []string) *volume.Spec
|
||||||
shouldFail bool
|
shouldFail bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "setup with vol source",
|
name: "setup with ephemeral source",
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
mode: ephemeralVolumeMode,
|
mode: storagev1beta1.VolumeLifecycleEphemeral,
|
||||||
fsType: "ext4",
|
fsType: "ext4",
|
||||||
shouldFail: true,
|
shouldFail: true,
|
||||||
spec: func(fsType string, options []string) *volume.Spec {
|
spec: func(fsType string, options []string) *volume.Spec {
|
||||||
@ -299,7 +303,7 @@ func TestMounterSetUpSimple(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "setup with persistent source",
|
name: "setup with persistent source",
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
mode: persistentVolumeMode,
|
mode: storagev1beta1.VolumeLifecyclePersistent,
|
||||||
fsType: "zfs",
|
fsType: "zfs",
|
||||||
spec: func(fsType string, options []string) *volume.Spec {
|
spec: func(fsType string, options []string) *volume.Spec {
|
||||||
pvSrc := makeTestPV("pv1", 20, testDriver, "vol1")
|
pvSrc := makeTestPV("pv1", 20, testDriver, "vol1")
|
||||||
@ -311,7 +315,7 @@ func TestMounterSetUpSimple(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "setup with persistent source without unspecified fstype and options",
|
name: "setup with persistent source without unspecified fstype and options",
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
mode: persistentVolumeMode,
|
mode: storagev1beta1.VolumeLifecyclePersistent,
|
||||||
spec: func(fsType string, options []string) *volume.Spec {
|
spec: func(fsType string, options []string) *volume.Spec {
|
||||||
return volume.NewSpecFromPersistentVolume(makeTestPV("pv1", 20, testDriver, "vol2"), false)
|
return volume.NewSpecFromPersistentVolume(makeTestPV("pv1", 20, testDriver, "vol2"), false)
|
||||||
},
|
},
|
||||||
@ -345,8 +349,8 @@ func TestMounterSetUpSimple(t *testing.T) {
|
|||||||
csiMounter := mounter.(*csiMountMgr)
|
csiMounter := mounter.(*csiMountMgr)
|
||||||
csiMounter.csiClient = setupClient(t, true)
|
csiMounter.csiClient = setupClient(t, true)
|
||||||
|
|
||||||
if csiMounter.csiVolumeMode != persistentVolumeMode {
|
if csiMounter.volumeLifecycleMode != storagev1beta1.VolumeLifecyclePersistent {
|
||||||
t.Fatal("unexpected volume mode: ", csiMounter.csiVolumeMode)
|
t.Fatal("unexpected volume mode: ", csiMounter.volumeLifecycleMode)
|
||||||
}
|
}
|
||||||
|
|
||||||
attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
|
attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
|
||||||
@ -397,14 +401,10 @@ func TestMounterSetUpSimple(t *testing.T) {
|
|||||||
func TestMounterSetUpWithInline(t *testing.T) {
|
func TestMounterSetUpWithInline(t *testing.T) {
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
||||||
|
|
||||||
fakeClient := fakeclient.NewSimpleClientset()
|
|
||||||
plug, tmpDir := newTestPlugin(t, fakeClient)
|
|
||||||
defer os.RemoveAll(tmpDir)
|
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
podUID types.UID
|
podUID types.UID
|
||||||
mode csiVolumeMode
|
mode storagev1beta1.VolumeLifecycleMode
|
||||||
fsType string
|
fsType string
|
||||||
options []string
|
options []string
|
||||||
spec func(string, []string) *volume.Spec
|
spec func(string, []string) *volume.Spec
|
||||||
@ -413,7 +413,7 @@ func TestMounterSetUpWithInline(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "setup with vol source",
|
name: "setup with vol source",
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
mode: ephemeralVolumeMode,
|
mode: storagev1beta1.VolumeLifecycleEphemeral,
|
||||||
fsType: "ext4",
|
fsType: "ext4",
|
||||||
spec: func(fsType string, options []string) *volume.Spec {
|
spec: func(fsType string, options []string) *volume.Spec {
|
||||||
volSrc := makeTestVol("pv1", testDriver)
|
volSrc := makeTestVol("pv1", testDriver)
|
||||||
@ -424,7 +424,7 @@ func TestMounterSetUpWithInline(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "setup with persistent source",
|
name: "setup with persistent source",
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
mode: persistentVolumeMode,
|
mode: storagev1beta1.VolumeLifecyclePersistent,
|
||||||
fsType: "zfs",
|
fsType: "zfs",
|
||||||
spec: func(fsType string, options []string) *volume.Spec {
|
spec: func(fsType string, options []string) *volume.Spec {
|
||||||
pvSrc := makeTestPV("pv1", 20, testDriver, "vol1")
|
pvSrc := makeTestPV("pv1", 20, testDriver, "vol1")
|
||||||
@ -436,7 +436,7 @@ func TestMounterSetUpWithInline(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "setup with persistent source without unspecified fstype and options",
|
name: "setup with persistent source without unspecified fstype and options",
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
mode: persistentVolumeMode,
|
mode: storagev1beta1.VolumeLifecyclePersistent,
|
||||||
spec: func(fsType string, options []string) *volume.Spec {
|
spec: func(fsType string, options []string) *volume.Spec {
|
||||||
return volume.NewSpecFromPersistentVolume(makeTestPV("pv1", 20, testDriver, "vol2"), false)
|
return volume.NewSpecFromPersistentVolume(makeTestPV("pv1", 20, testDriver, "vol2"), false)
|
||||||
},
|
},
|
||||||
@ -449,6 +449,15 @@ func TestMounterSetUpWithInline(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
|
// The fake driver currently supports all modes.
|
||||||
|
volumeLifecycleModes := []storagev1beta1.VolumeLifecycleMode{
|
||||||
|
storagev1beta1.VolumeLifecycleEphemeral,
|
||||||
|
storagev1beta1.VolumeLifecyclePersistent,
|
||||||
|
}
|
||||||
|
driver := getTestCSIDriver(testDriver, nil, nil, volumeLifecycleModes)
|
||||||
|
fakeClient := fakeclient.NewSimpleClientset(driver)
|
||||||
|
plug, tmpDir := newTestPlugin(t, fakeClient)
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
|
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
mounter, err := plug.NewMounter(
|
mounter, err := plug.NewMounter(
|
||||||
@ -470,15 +479,15 @@ func TestMounterSetUpWithInline(t *testing.T) {
|
|||||||
csiMounter := mounter.(*csiMountMgr)
|
csiMounter := mounter.(*csiMountMgr)
|
||||||
csiMounter.csiClient = setupClient(t, true)
|
csiMounter.csiClient = setupClient(t, true)
|
||||||
|
|
||||||
if csiMounter.csiVolumeMode != tc.mode {
|
if csiMounter.volumeLifecycleMode != tc.mode {
|
||||||
t.Fatal("unexpected volume mode: ", csiMounter.csiVolumeMode)
|
t.Fatal("unexpected volume mode: ", csiMounter.volumeLifecycleMode)
|
||||||
}
|
}
|
||||||
|
|
||||||
if csiMounter.csiVolumeMode == ephemeralVolumeMode && csiMounter.volumeID != makeVolumeHandle(string(tc.podUID), csiMounter.specVolumeID) {
|
if csiMounter.volumeLifecycleMode == storagev1beta1.VolumeLifecycleEphemeral && csiMounter.volumeID != makeVolumeHandle(string(tc.podUID), csiMounter.specVolumeID) {
|
||||||
t.Fatal("unexpected generated volumeHandle:", csiMounter.volumeID)
|
t.Fatal("unexpected generated volumeHandle:", csiMounter.volumeID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if csiMounter.csiVolumeMode == persistentVolumeMode {
|
if csiMounter.volumeLifecycleMode == storagev1beta1.VolumeLifecyclePersistent {
|
||||||
attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
|
attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
|
||||||
attachment := makeTestAttachment(attachID, "test-node", csiMounter.spec.Name())
|
attachment := makeTestAttachment(attachID, "test-node", csiMounter.spec.Name())
|
||||||
_, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(attachment)
|
_, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(attachment)
|
||||||
@ -503,10 +512,10 @@ func TestMounterSetUpWithInline(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// validate stagingTargetPath
|
// validate stagingTargetPath
|
||||||
if tc.mode == ephemeralVolumeMode && vol.DeviceMountPath != "" {
|
if tc.mode == storagev1beta1.VolumeLifecycleEphemeral && vol.DeviceMountPath != "" {
|
||||||
t.Errorf("unexpected devicePathTarget sent to driver: %s", vol.DeviceMountPath)
|
t.Errorf("unexpected devicePathTarget sent to driver: %s", vol.DeviceMountPath)
|
||||||
}
|
}
|
||||||
if tc.mode == persistentVolumeMode {
|
if tc.mode == storagev1beta1.VolumeLifecyclePersistent {
|
||||||
devicePath, err := makeDeviceMountPath(plug, csiMounter.spec)
|
devicePath, err := makeDeviceMountPath(plug, csiMounter.spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -30,6 +30,7 @@ import (
|
|||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
|
|
||||||
api "k8s.io/api/core/v1"
|
api "k8s.io/api/core/v1"
|
||||||
|
storage "k8s.io/api/storage/v1beta1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
@ -67,18 +68,6 @@ type csiPlugin struct {
|
|||||||
csiDriverLister storagelisters.CSIDriverLister
|
csiDriverLister storagelisters.CSIDriverLister
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO (vladimirvivien) add this type to storage api
|
|
||||||
type driverMode string
|
|
||||||
|
|
||||||
const persistentDriverMode driverMode = "persistent"
|
|
||||||
const ephemeralDriverMode driverMode = "ephemeral"
|
|
||||||
const combinedDriverMode driverMode = "persistent+ephemeral"
|
|
||||||
|
|
||||||
type csiVolumeMode string
|
|
||||||
|
|
||||||
const persistentVolumeMode csiVolumeMode = "persistent"
|
|
||||||
const ephemeralVolumeMode csiVolumeMode = "ephemeral"
|
|
||||||
|
|
||||||
// ProbeVolumePlugins returns implemented plugins
|
// ProbeVolumePlugins returns implemented plugins
|
||||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||||
p := &csiPlugin{
|
p := &csiPlugin{
|
||||||
@ -373,15 +362,16 @@ func (p *csiPlugin) NewMounter(
|
|||||||
return nil, errors.New(log("volume source not found in volume.Spec"))
|
return nil, errors.New(log("volume source not found in volume.Spec"))
|
||||||
}
|
}
|
||||||
|
|
||||||
csiVolumeMode, err := p.getCSIVolumeMode(spec)
|
volumeLifecycleMode, err := p.getVolumeLifecycleMode(spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(pohly): check CSIDriver.Spec.Mode to ensure that the CSI driver
|
// Check CSIDriver.Spec.Mode to ensure that the CSI driver
|
||||||
// supports the current csiVolumeMode.
|
// supports the current volumeLifecycleMode.
|
||||||
// In alpha it is assumed that drivers are used correctly without
|
if err := p.supportsVolumeLifecycleMode(driverName, volumeLifecycleMode); err != nil {
|
||||||
// the additional sanity check.
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
k8s := p.host.GetKubeClient()
|
k8s := p.host.GetKubeClient()
|
||||||
if k8s == nil {
|
if k8s == nil {
|
||||||
@ -400,7 +390,7 @@ func (p *csiPlugin) NewMounter(
|
|||||||
pod: pod,
|
pod: pod,
|
||||||
podUID: pod.UID,
|
podUID: pod.UID,
|
||||||
driverName: csiDriverName(driverName),
|
driverName: csiDriverName(driverName),
|
||||||
csiVolumeMode: csiVolumeMode,
|
volumeLifecycleMode: volumeLifecycleMode,
|
||||||
volumeID: volumeHandle,
|
volumeID: volumeHandle,
|
||||||
specVolumeID: spec.Name(),
|
specVolumeID: spec.Name(),
|
||||||
readOnly: readOnly,
|
readOnly: readOnly,
|
||||||
@ -426,7 +416,7 @@ func (p *csiPlugin) NewMounter(
|
|||||||
volDataKey.volHandle: volumeHandle,
|
volDataKey.volHandle: volumeHandle,
|
||||||
volDataKey.driverName: driverName,
|
volDataKey.driverName: driverName,
|
||||||
volDataKey.nodeName: node,
|
volDataKey.nodeName: node,
|
||||||
volDataKey.csiVolumeMode: string(csiVolumeMode),
|
volDataKey.volumeLifecycleMode: string(volumeLifecycleMode),
|
||||||
}
|
}
|
||||||
|
|
||||||
attachID := getAttachmentName(volumeHandle, driverName, node)
|
attachID := getAttachmentName(volumeHandle, driverName, node)
|
||||||
@ -486,11 +476,11 @@ func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.S
|
|||||||
var spec *volume.Spec
|
var spec *volume.Spec
|
||||||
inlineEnabled := utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume)
|
inlineEnabled := utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume)
|
||||||
|
|
||||||
// If inlineEnabled is true and mode is ephemeralVolumeMode,
|
// If inlineEnabled is true and mode is VolumeLifecycleEphemeral,
|
||||||
// use constructVolSourceSpec to construct volume source spec.
|
// use constructVolSourceSpec to construct volume source spec.
|
||||||
// If inlineEnabled is false or mode is persistentVolumeMode,
|
// If inlineEnabled is false or mode is VolumeLifecyclePersistent,
|
||||||
// use constructPVSourceSpec to construct volume construct pv source spec.
|
// use constructPVSourceSpec to construct volume construct pv source spec.
|
||||||
if inlineEnabled && csiVolumeMode(volData[volDataKey.csiVolumeMode]) == ephemeralVolumeMode {
|
if inlineEnabled && storage.VolumeLifecycleMode(volData[volDataKey.volumeLifecycleMode]) == storage.VolumeLifecycleEphemeral {
|
||||||
spec = p.constructVolSourceSpec(volData[volDataKey.specVolID], volData[volDataKey.driverName])
|
spec = p.constructVolSourceSpec(volData[volDataKey.specVolID], volData[volDataKey.driverName])
|
||||||
return spec, nil
|
return spec, nil
|
||||||
}
|
}
|
||||||
@ -565,12 +555,12 @@ func (p *csiPlugin) NewDetacher() (volume.Detacher, error) {
|
|||||||
func (p *csiPlugin) CanAttach(spec *volume.Spec) (bool, error) {
|
func (p *csiPlugin) CanAttach(spec *volume.Spec) (bool, error) {
|
||||||
inlineEnabled := utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume)
|
inlineEnabled := utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume)
|
||||||
if inlineEnabled {
|
if inlineEnabled {
|
||||||
csiVolumeMode, err := p.getCSIVolumeMode(spec)
|
volumeLifecycleMode, err := p.getVolumeLifecycleMode(spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if csiVolumeMode == ephemeralVolumeMode {
|
if volumeLifecycleMode == storage.VolumeLifecycleEphemeral {
|
||||||
klog.V(5).Info(log("plugin.CanAttach = false, ephemeral mode detected for spec %v", spec.Name()))
|
klog.V(5).Info(log("plugin.CanAttach = false, ephemeral mode detected for spec %v", spec.Name()))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
@ -599,12 +589,12 @@ func (p *csiPlugin) CanDeviceMount(spec *volume.Spec) (bool, error) {
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
csiVolumeMode, err := p.getCSIVolumeMode(spec)
|
volumeLifecycleMode, err := p.getVolumeLifecycleMode(spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if csiVolumeMode == ephemeralVolumeMode {
|
if volumeLifecycleMode == storage.VolumeLifecycleEphemeral {
|
||||||
klog.V(5).Info(log("plugin.CanDeviceMount skipped ephemeral mode detected for spec %v", spec.Name()))
|
klog.V(5).Info(log("plugin.CanDeviceMount skipped ephemeral mode detected for spec %v", spec.Name()))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
@ -775,11 +765,67 @@ func (p *csiPlugin) skipAttach(driver string) (bool, error) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getCSIVolumeMode returns the mode for the specified spec: {persistent|ephemeral}.
|
// supportsVolumeMode checks whether the CSI driver supports a volume in the given mode.
|
||||||
|
// An error indicates that it isn't supported and explains why.
|
||||||
|
func (p *csiPlugin) supportsVolumeLifecycleMode(driver string, volumeMode storage.VolumeLifecycleMode) error {
|
||||||
|
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) {
|
||||||
|
// Feature disabled, therefore only "persistent" volumes are supported.
|
||||||
|
if volumeMode != storage.VolumeLifecyclePersistent {
|
||||||
|
return fmt.Errorf("CSIInlineVolume feature not enabled, %q volumes not supported", volumeMode)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve CSIDriver. It's not an error if that isn't
|
||||||
|
// possible (we don't have the lister if CSIDriverRegistry is
|
||||||
|
// disabled) or the driver isn't found (CSIDriver is
|
||||||
|
// optional), but then only persistent volumes are supported.
|
||||||
|
var csiDriver *storage.CSIDriver
|
||||||
|
if p.csiDriverLister != nil {
|
||||||
|
kletHost, ok := p.host.(volume.KubeletVolumeHost)
|
||||||
|
if ok {
|
||||||
|
kletHost.WaitForCacheSync()
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := p.csiDriverLister.Get(driver)
|
||||||
|
if err != nil && !apierrs.IsNotFound(err) {
|
||||||
|
// Some internal error.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
csiDriver = c
|
||||||
|
}
|
||||||
|
|
||||||
|
// The right response depends on whether we have information
|
||||||
|
// about the driver and the volume mode.
|
||||||
|
switch {
|
||||||
|
case csiDriver == nil && volumeMode == storage.VolumeLifecyclePersistent:
|
||||||
|
// No information, but that's okay for persistent volumes (and only those).
|
||||||
|
return nil
|
||||||
|
case csiDriver == nil:
|
||||||
|
return fmt.Errorf("volume mode %q not supported by driver %s (no CSIDriver object)", volumeMode, driver)
|
||||||
|
case containsVolumeMode(csiDriver.Spec.VolumeLifecycleModes, volumeMode):
|
||||||
|
// Explicitly listed.
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("volume mode %q not supported by driver %s (only supports %q)", volumeMode, driver, csiDriver.Spec.VolumeLifecycleModes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// containsVolumeMode checks whether the given volume mode is listed.
|
||||||
|
func containsVolumeMode(modes []storage.VolumeLifecycleMode, mode storage.VolumeLifecycleMode) bool {
|
||||||
|
for _, m := range modes {
|
||||||
|
if m == mode {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// getVolumeLifecycleMode returns the mode for the specified spec: {persistent|ephemeral}.
|
||||||
// 1) If mode cannot be determined, it will default to "persistent".
|
// 1) If mode cannot be determined, it will default to "persistent".
|
||||||
// 2) If Mode cannot be resolved to either {persistent | ephemeral}, an error is returned
|
// 2) If Mode cannot be resolved to either {persistent | ephemeral}, an error is returned
|
||||||
// See https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/20190122-csi-inline-volumes.md
|
// See https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/20190122-csi-inline-volumes.md
|
||||||
func (p *csiPlugin) getCSIVolumeMode(spec *volume.Spec) (csiVolumeMode, error) {
|
func (p *csiPlugin) getVolumeLifecycleMode(spec *volume.Spec) (storage.VolumeLifecycleMode, error) {
|
||||||
// 1) if volume.Spec.Volume.CSI != nil -> mode is ephemeral
|
// 1) if volume.Spec.Volume.CSI != nil -> mode is ephemeral
|
||||||
// 2) if volume.Spec.PersistentVolume.Spec.CSI != nil -> persistent
|
// 2) if volume.Spec.PersistentVolume.Spec.CSI != nil -> persistent
|
||||||
volSrc, _, err := getSourceFromSpec(spec)
|
volSrc, _, err := getSourceFromSpec(spec)
|
||||||
@ -788,9 +834,9 @@ func (p *csiPlugin) getCSIVolumeMode(spec *volume.Spec) (csiVolumeMode, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if volSrc != nil && utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) {
|
if volSrc != nil && utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) {
|
||||||
return ephemeralVolumeMode, nil
|
return storage.VolumeLifecycleEphemeral, nil
|
||||||
}
|
}
|
||||||
return persistentVolumeMode, nil
|
return storage.VolumeLifecyclePersistent, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *csiPlugin) getPublishContext(client clientset.Interface, handle, driver, nodeName string) (map[string]string, error) {
|
func (p *csiPlugin) getPublishContext(client clientset.Interface, handle, driver, nodeName string) (map[string]string, error) {
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
api "k8s.io/api/core/v1"
|
api "k8s.io/api/core/v1"
|
||||||
|
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
@ -178,7 +179,12 @@ func TestPluginGetVolumeNameWithInline(t *testing.T) {
|
|||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
||||||
|
|
||||||
plug, tmpDir := newTestPlugin(t, nil)
|
modes := []storagev1beta1.VolumeLifecycleMode{
|
||||||
|
storagev1beta1.VolumeLifecyclePersistent,
|
||||||
|
}
|
||||||
|
driver := getTestCSIDriver(testDriver, nil, nil, modes)
|
||||||
|
client := fakeclient.NewSimpleClientset(driver)
|
||||||
|
plug, tmpDir := newTestPlugin(t, client)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
@ -403,9 +409,6 @@ func TestPluginConstructVolumeSpecWithInline(t *testing.T) {
|
|||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
||||||
|
|
||||||
plug, tmpDir := newTestPlugin(t, nil)
|
|
||||||
defer os.RemoveAll(tmpDir)
|
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
originSpec *volume.Spec
|
originSpec *volume.Spec
|
||||||
@ -413,6 +416,7 @@ func TestPluginConstructVolumeSpecWithInline(t *testing.T) {
|
|||||||
volHandle string
|
volHandle string
|
||||||
podUID types.UID
|
podUID types.UID
|
||||||
shouldFail bool
|
shouldFail bool
|
||||||
|
modes []storagev1beta1.VolumeLifecycleMode
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "construct spec1 from persistent spec",
|
name: "construct spec1 from persistent spec",
|
||||||
@ -420,6 +424,7 @@ func TestPluginConstructVolumeSpecWithInline(t *testing.T) {
|
|||||||
volHandle: "testvol-handle1",
|
volHandle: "testvol-handle1",
|
||||||
originSpec: volume.NewSpecFromPersistentVolume(makeTestPV("test.vol.id", 20, testDriver, "testvol-handle1"), true),
|
originSpec: volume.NewSpecFromPersistentVolume(makeTestPV("test.vol.id", 20, testDriver, "testvol-handle1"), true),
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
|
modes: []storagev1beta1.VolumeLifecycleMode{storagev1beta1.VolumeLifecyclePersistent},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "construct spec2 from persistent spec",
|
name: "construct spec2 from persistent spec",
|
||||||
@ -427,18 +432,38 @@ func TestPluginConstructVolumeSpecWithInline(t *testing.T) {
|
|||||||
volHandle: "handle2",
|
volHandle: "handle2",
|
||||||
originSpec: volume.NewSpecFromPersistentVolume(makeTestPV("spec2", 20, testDriver, "handle2"), true),
|
originSpec: volume.NewSpecFromPersistentVolume(makeTestPV("spec2", 20, testDriver, "handle2"), true),
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
|
modes: []storagev1beta1.VolumeLifecycleMode{storagev1beta1.VolumeLifecyclePersistent},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "construct spec2 from persistent spec, missing mode",
|
||||||
|
specVolID: "spec2",
|
||||||
|
volHandle: "handle2",
|
||||||
|
originSpec: volume.NewSpecFromPersistentVolume(makeTestPV("spec2", 20, testDriver, "handle2"), true),
|
||||||
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
|
modes: []storagev1beta1.VolumeLifecycleMode{},
|
||||||
|
shouldFail: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "construct spec from volume spec",
|
name: "construct spec from volume spec",
|
||||||
specVolID: "volspec",
|
specVolID: "volspec",
|
||||||
originSpec: volume.NewSpecFromVolume(makeTestVol("volspec", testDriver)),
|
originSpec: volume.NewSpecFromVolume(makeTestVol("volspec", testDriver)),
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
|
modes: []storagev1beta1.VolumeLifecycleMode{storagev1beta1.VolumeLifecycleEphemeral},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "construct spec from volume spec2",
|
name: "construct spec from volume spec2",
|
||||||
specVolID: "volspec2",
|
specVolID: "volspec2",
|
||||||
originSpec: volume.NewSpecFromVolume(makeTestVol("volspec2", testDriver)),
|
originSpec: volume.NewSpecFromVolume(makeTestVol("volspec2", testDriver)),
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
|
modes: []storagev1beta1.VolumeLifecycleMode{storagev1beta1.VolumeLifecycleEphemeral},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "construct spec from volume spec2, missing mode",
|
||||||
|
specVolID: "volspec2",
|
||||||
|
originSpec: volume.NewSpecFromVolume(makeTestVol("volspec2", testDriver)),
|
||||||
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
|
modes: []storagev1beta1.VolumeLifecycleMode{},
|
||||||
|
shouldFail: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "missing spec",
|
name: "missing spec",
|
||||||
@ -451,6 +476,11 @@ func TestPluginConstructVolumeSpecWithInline(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
driver := getTestCSIDriver(testDriver, nil, nil, tc.modes)
|
||||||
|
client := fakeclient.NewSimpleClientset(driver)
|
||||||
|
plug, tmpDir := newTestPlugin(t, client)
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
mounter, err := plug.NewMounter(
|
mounter, err := plug.NewMounter(
|
||||||
tc.originSpec,
|
tc.originSpec,
|
||||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}},
|
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}},
|
||||||
@ -524,7 +554,7 @@ func TestPluginNewMounter(t *testing.T) {
|
|||||||
spec *volume.Spec
|
spec *volume.Spec
|
||||||
podUID types.UID
|
podUID types.UID
|
||||||
namespace string
|
namespace string
|
||||||
csiVolumeMode csiVolumeMode
|
volumeLifecycleMode storagev1beta1.VolumeLifecycleMode
|
||||||
shouldFail bool
|
shouldFail bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
@ -532,14 +562,14 @@ func TestPluginNewMounter(t *testing.T) {
|
|||||||
spec: volume.NewSpecFromPersistentVolume(makeTestPV("test-pv1", 20, testDriver, testVol), true),
|
spec: volume.NewSpecFromPersistentVolume(makeTestPV("test-pv1", 20, testDriver, testVol), true),
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
namespace: "test-ns1",
|
namespace: "test-ns1",
|
||||||
csiVolumeMode: persistentVolumeMode,
|
volumeLifecycleMode: storagev1beta1.VolumeLifecyclePersistent,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "mounter from volume source",
|
name: "mounter from volume source",
|
||||||
spec: volume.NewSpecFromVolume(makeTestVol("test-vol1", testDriver)),
|
spec: volume.NewSpecFromVolume(makeTestVol("test-vol1", testDriver)),
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
namespace: "test-ns2",
|
namespace: "test-ns2",
|
||||||
csiVolumeMode: ephemeralVolumeMode,
|
volumeLifecycleMode: storagev1beta1.VolumeLifecycleEphemeral,
|
||||||
shouldFail: true, // csi inline not enabled
|
shouldFail: true, // csi inline not enabled
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -589,8 +619,8 @@ func TestPluginNewMounter(t *testing.T) {
|
|||||||
if csiClient == nil {
|
if csiClient == nil {
|
||||||
t.Error("mounter csiClient is nil")
|
t.Error("mounter csiClient is nil")
|
||||||
}
|
}
|
||||||
if csiMounter.csiVolumeMode != test.csiVolumeMode {
|
if csiMounter.volumeLifecycleMode != test.volumeLifecycleMode {
|
||||||
t.Error("unexpected driver mode:", csiMounter.csiVolumeMode)
|
t.Error("unexpected driver mode:", csiMounter.volumeLifecycleMode)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ensure data file is created
|
// ensure data file is created
|
||||||
@ -619,8 +649,8 @@ func TestPluginNewMounter(t *testing.T) {
|
|||||||
if data[volDataKey.nodeName] != string(csiMounter.plugin.host.GetNodeName()) {
|
if data[volDataKey.nodeName] != string(csiMounter.plugin.host.GetNodeName()) {
|
||||||
t.Error("volume data file unexpected nodeName:", data[volDataKey.nodeName])
|
t.Error("volume data file unexpected nodeName:", data[volDataKey.nodeName])
|
||||||
}
|
}
|
||||||
if data[volDataKey.csiVolumeMode] != string(test.csiVolumeMode) {
|
if data[volDataKey.volumeLifecycleMode] != string(test.volumeLifecycleMode) {
|
||||||
t.Error("volume data file unexpected csiVolumeMode:", data[volDataKey.csiVolumeMode])
|
t.Error("volume data file unexpected volumeLifecycleMode:", data[volDataKey.volumeLifecycleMode])
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -629,12 +659,22 @@ func TestPluginNewMounter(t *testing.T) {
|
|||||||
func TestPluginNewMounterWithInline(t *testing.T) {
|
func TestPluginNewMounterWithInline(t *testing.T) {
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIBlockVolume, true)()
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
||||||
|
bothModes := []storagev1beta1.VolumeLifecycleMode{
|
||||||
|
storagev1beta1.VolumeLifecycleEphemeral,
|
||||||
|
storagev1beta1.VolumeLifecyclePersistent,
|
||||||
|
}
|
||||||
|
persistentMode := []storagev1beta1.VolumeLifecycleMode{
|
||||||
|
storagev1beta1.VolumeLifecyclePersistent,
|
||||||
|
}
|
||||||
|
ephemeralMode := []storagev1beta1.VolumeLifecycleMode{
|
||||||
|
storagev1beta1.VolumeLifecycleEphemeral,
|
||||||
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
spec *volume.Spec
|
spec *volume.Spec
|
||||||
podUID types.UID
|
podUID types.UID
|
||||||
namespace string
|
namespace string
|
||||||
csiVolumeMode csiVolumeMode
|
volumeLifecycleMode storagev1beta1.VolumeLifecycleMode
|
||||||
shouldFail bool
|
shouldFail bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
@ -655,32 +695,43 @@ func TestPluginNewMounterWithInline(t *testing.T) {
|
|||||||
spec: volume.NewSpecFromPersistentVolume(makeTestPV("test-pv1", 20, testDriver, testVol), true),
|
spec: volume.NewSpecFromPersistentVolume(makeTestPV("test-pv1", 20, testDriver, testVol), true),
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
namespace: "test-ns1",
|
namespace: "test-ns1",
|
||||||
csiVolumeMode: persistentVolumeMode,
|
volumeLifecycleMode: storagev1beta1.VolumeLifecyclePersistent,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "mounter with volume source",
|
name: "mounter with volume source",
|
||||||
spec: volume.NewSpecFromVolume(makeTestVol("test-vol1", testDriver)),
|
spec: volume.NewSpecFromVolume(makeTestVol("test-vol1", testDriver)),
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
namespace: "test-ns2",
|
namespace: "test-ns2",
|
||||||
csiVolumeMode: ephemeralVolumeMode,
|
volumeLifecycleMode: storagev1beta1.VolumeLifecycleEphemeral,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
runAll := func(t *testing.T, supported []storagev1beta1.VolumeLifecycleMode) {
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
plug, tmpDir := newTestPlugin(t, nil)
|
driver := getTestCSIDriver(testDriver, nil, nil, supported)
|
||||||
|
fakeClient := fakeclient.NewSimpleClientset(driver)
|
||||||
|
plug, tmpDir := newTestPlugin(t, fakeClient)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
registerFakePlugin(testDriver, "endpoint", []string{"1.2.0"}, t)
|
registerFakePlugin(testDriver, "endpoint", []string{"1.2.0"}, t)
|
||||||
|
|
||||||
mounter, err := plug.NewMounter(
|
mounter, err := plug.NewMounter(
|
||||||
test.spec,
|
test.spec,
|
||||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: test.podUID, Namespace: test.namespace}},
|
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: test.podUID, Namespace: test.namespace}},
|
||||||
volume.VolumeOptions{},
|
volume.VolumeOptions{},
|
||||||
)
|
)
|
||||||
if test.shouldFail != (err != nil) {
|
|
||||||
|
// Some test cases are meant to fail because their input data is broken.
|
||||||
|
shouldFail := test.shouldFail
|
||||||
|
// Others fail if the driver does not support the volume mode.
|
||||||
|
if !containsVolumeMode(supported, test.volumeLifecycleMode) {
|
||||||
|
shouldFail = true
|
||||||
|
}
|
||||||
|
if shouldFail != (err != nil) {
|
||||||
t.Fatal("Unexpected error:", err)
|
t.Fatal("Unexpected error:", err)
|
||||||
}
|
}
|
||||||
if test.shouldFail && err != nil {
|
if shouldFail && err != nil {
|
||||||
t.Log(err)
|
t.Log(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -707,8 +758,8 @@ func TestPluginNewMounterWithInline(t *testing.T) {
|
|||||||
if csiClient == nil {
|
if csiClient == nil {
|
||||||
t.Error("mounter csiClient is nil")
|
t.Error("mounter csiClient is nil")
|
||||||
}
|
}
|
||||||
if csiMounter.csiVolumeMode != test.csiVolumeMode {
|
if csiMounter.volumeLifecycleMode != test.volumeLifecycleMode {
|
||||||
t.Error("unexpected driver mode:", csiMounter.csiVolumeMode)
|
t.Error("unexpected driver mode:", csiMounter.volumeLifecycleMode)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ensure data file is created
|
// ensure data file is created
|
||||||
@ -737,11 +788,22 @@ func TestPluginNewMounterWithInline(t *testing.T) {
|
|||||||
if data[volDataKey.nodeName] != string(csiMounter.plugin.host.GetNodeName()) {
|
if data[volDataKey.nodeName] != string(csiMounter.plugin.host.GetNodeName()) {
|
||||||
t.Error("volume data file unexpected nodeName:", data[volDataKey.nodeName])
|
t.Error("volume data file unexpected nodeName:", data[volDataKey.nodeName])
|
||||||
}
|
}
|
||||||
if data[volDataKey.csiVolumeMode] != string(csiMounter.csiVolumeMode) {
|
if data[volDataKey.volumeLifecycleMode] != string(csiMounter.volumeLifecycleMode) {
|
||||||
t.Error("volume data file unexpected csiVolumeMode:", data[volDataKey.csiVolumeMode])
|
t.Error("volume data file unexpected volumeLifecycleMode:", data[volDataKey.volumeLifecycleMode])
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("both supported", func(t *testing.T) {
|
||||||
|
runAll(t, bothModes)
|
||||||
|
})
|
||||||
|
t.Run("persistent supported", func(t *testing.T) {
|
||||||
|
runAll(t, persistentMode)
|
||||||
|
})
|
||||||
|
t.Run("ephemeral supported", func(t *testing.T) {
|
||||||
|
runAll(t, ephemeralMode)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPluginNewUnmounter(t *testing.T) {
|
func TestPluginNewUnmounter(t *testing.T) {
|
||||||
@ -871,8 +933,8 @@ func TestPluginCanAttach(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
csiDriver := getTestCSIDriver(test.driverName, nil, &test.canAttach)
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
csiDriver := getTestCSIDriver(test.driverName, nil, &test.canAttach, nil)
|
||||||
fakeCSIClient := fakeclient.NewSimpleClientset(csiDriver)
|
fakeCSIClient := fakeclient.NewSimpleClientset(csiDriver)
|
||||||
plug, tmpDir := newTestPlugin(t, fakeCSIClient)
|
plug, tmpDir := newTestPlugin(t, fakeCSIClient)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
@ -932,7 +994,7 @@ func TestPluginFindAttachablePlugin(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
client := fakeclient.NewSimpleClientset(getTestCSIDriver(test.driverName, nil, &test.canAttach))
|
client := fakeclient.NewSimpleClientset(getTestCSIDriver(test.driverName, nil, &test.canAttach, nil))
|
||||||
factory := informers.NewSharedInformerFactory(client, CsiResyncPeriod)
|
factory := informers.NewSharedInformerFactory(client, CsiResyncPeriod)
|
||||||
host := volumetest.NewFakeVolumeHostWithCSINodeName(
|
host := volumetest.NewFakeVolumeHostWithCSINodeName(
|
||||||
tmpDir,
|
tmpDir,
|
||||||
|
@ -27,7 +27,10 @@ import (
|
|||||||
|
|
||||||
api "k8s.io/api/core/v1"
|
api "k8s.io/api/core/v1"
|
||||||
storage "k8s.io/api/storage/v1"
|
storage "k8s.io/api/storage/v1"
|
||||||
|
storagebeta1 "k8s.io/api/storage/v1beta1"
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
@ -45,6 +48,7 @@ import (
|
|||||||
// based on operations from the volume manager/reconciler/operation executor
|
// based on operations from the volume manager/reconciler/operation executor
|
||||||
func TestCSI_VolumeAll(t *testing.T) {
|
func TestCSI_VolumeAll(t *testing.T) {
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
specName string
|
specName string
|
||||||
@ -54,6 +58,7 @@ func TestCSI_VolumeAll(t *testing.T) {
|
|||||||
podFunc func() *api.Pod
|
podFunc func() *api.Pod
|
||||||
isInline bool
|
isInline bool
|
||||||
shouldFail bool
|
shouldFail bool
|
||||||
|
driverSpec *storagebeta1.CSIDriverSpec
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "PersistentVolume",
|
name: "PersistentVolume",
|
||||||
@ -69,7 +74,42 @@ func TestCSI_VolumeAll(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "ephermeral inline",
|
name: "PersistentVolume with driver info",
|
||||||
|
specName: "pv2",
|
||||||
|
driver: "simple-driver",
|
||||||
|
volName: "vol2",
|
||||||
|
specFunc: func(specName, driver, volName string) *volume.Spec {
|
||||||
|
return volume.NewSpecFromPersistentVolume(makeTestPV(specName, 20, driver, volName), false)
|
||||||
|
},
|
||||||
|
podFunc: func() *api.Pod {
|
||||||
|
podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64()))
|
||||||
|
return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}}
|
||||||
|
},
|
||||||
|
driverSpec: &storagebeta1.CSIDriverSpec{
|
||||||
|
// Required for the driver to be accepted for the persistent volume.
|
||||||
|
VolumeLifecycleModes: []storagebeta1.VolumeLifecycleMode{storagebeta1.VolumeLifecyclePersistent},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "PersistentVolume with wrong mode in driver info",
|
||||||
|
specName: "pv2",
|
||||||
|
driver: "simple-driver",
|
||||||
|
volName: "vol2",
|
||||||
|
specFunc: func(specName, driver, volName string) *volume.Spec {
|
||||||
|
return volume.NewSpecFromPersistentVolume(makeTestPV(specName, 20, driver, volName), false)
|
||||||
|
},
|
||||||
|
podFunc: func() *api.Pod {
|
||||||
|
podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64()))
|
||||||
|
return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}}
|
||||||
|
},
|
||||||
|
driverSpec: &storagebeta1.CSIDriverSpec{
|
||||||
|
// This will cause the volume to be rejected.
|
||||||
|
VolumeLifecycleModes: []storagebeta1.VolumeLifecycleMode{storagebeta1.VolumeLifecycleEphemeral},
|
||||||
|
},
|
||||||
|
shouldFail: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ephemeral inline supported",
|
||||||
driver: "inline-driver-1",
|
driver: "inline-driver-1",
|
||||||
volName: "test.vol2",
|
volName: "test.vol2",
|
||||||
specFunc: func(specName, driver, volName string) *volume.Spec {
|
specFunc: func(specName, driver, volName string) *volume.Spec {
|
||||||
@ -80,6 +120,75 @@ func TestCSI_VolumeAll(t *testing.T) {
|
|||||||
return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}}
|
return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}}
|
||||||
},
|
},
|
||||||
isInline: true,
|
isInline: true,
|
||||||
|
driverSpec: &storagebeta1.CSIDriverSpec{
|
||||||
|
// Required for the driver to be accepted for the inline volume.
|
||||||
|
VolumeLifecycleModes: []storagebeta1.VolumeLifecycleMode{storagebeta1.VolumeLifecycleEphemeral},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ephemeral inline also supported",
|
||||||
|
driver: "inline-driver-1",
|
||||||
|
volName: "test.vol2",
|
||||||
|
specFunc: func(specName, driver, volName string) *volume.Spec {
|
||||||
|
return volume.NewSpecFromVolume(makeTestVol(specName, driver))
|
||||||
|
},
|
||||||
|
podFunc: func() *api.Pod {
|
||||||
|
podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64()))
|
||||||
|
return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}}
|
||||||
|
},
|
||||||
|
isInline: true,
|
||||||
|
driverSpec: &storagebeta1.CSIDriverSpec{
|
||||||
|
// Required for the driver to be accepted for the inline volume.
|
||||||
|
VolumeLifecycleModes: []storagebeta1.VolumeLifecycleMode{storagebeta1.VolumeLifecyclePersistent, storagebeta1.VolumeLifecycleEphemeral},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ephemeral inline without CSIDriver info",
|
||||||
|
driver: "inline-driver-2",
|
||||||
|
volName: "test.vol3",
|
||||||
|
specFunc: func(specName, driver, volName string) *volume.Spec {
|
||||||
|
return volume.NewSpecFromVolume(makeTestVol(specName, driver))
|
||||||
|
},
|
||||||
|
podFunc: func() *api.Pod {
|
||||||
|
podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64()))
|
||||||
|
return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}}
|
||||||
|
},
|
||||||
|
isInline: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ephemeral inline with driver that has no mode",
|
||||||
|
driver: "inline-driver-3",
|
||||||
|
volName: "test.vol4",
|
||||||
|
specFunc: func(specName, driver, volName string) *volume.Spec {
|
||||||
|
return volume.NewSpecFromVolume(makeTestVol(specName, driver))
|
||||||
|
},
|
||||||
|
podFunc: func() *api.Pod {
|
||||||
|
podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64()))
|
||||||
|
return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}}
|
||||||
|
},
|
||||||
|
isInline: true,
|
||||||
|
driverSpec: &storagebeta1.CSIDriverSpec{
|
||||||
|
// This means the driver *cannot* handle the inline volume because
|
||||||
|
// the default is "persistent".
|
||||||
|
VolumeLifecycleModes: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ephemeral inline with driver that has wrong mode",
|
||||||
|
driver: "inline-driver-3",
|
||||||
|
volName: "test.vol4",
|
||||||
|
specFunc: func(specName, driver, volName string) *volume.Spec {
|
||||||
|
return volume.NewSpecFromVolume(makeTestVol(specName, driver))
|
||||||
|
},
|
||||||
|
podFunc: func() *api.Pod {
|
||||||
|
podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64()))
|
||||||
|
return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}}
|
||||||
|
},
|
||||||
|
isInline: true,
|
||||||
|
driverSpec: &storagebeta1.CSIDriverSpec{
|
||||||
|
// This means the driver *cannot* handle the inline volume.
|
||||||
|
VolumeLifecycleModes: []storagebeta1.VolumeLifecycleMode{storagebeta1.VolumeLifecyclePersistent},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "missing spec",
|
name: "missing spec",
|
||||||
@ -118,10 +227,27 @@ func TestCSI_VolumeAll(t *testing.T) {
|
|||||||
t.Fatalf("can't create temp dir: %v", err)
|
t.Fatalf("can't create temp dir: %v", err)
|
||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
client := fakeclient.NewSimpleClientset()
|
|
||||||
|
var driverInfo *storagebeta1.CSIDriver
|
||||||
|
objs := []runtime.Object{}
|
||||||
|
if test.driverSpec != nil {
|
||||||
|
driverInfo = &storagebeta1.CSIDriver{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: test.driver,
|
||||||
|
},
|
||||||
|
Spec: *test.driverSpec,
|
||||||
|
}
|
||||||
|
objs = append(objs, driverInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
client := fakeclient.NewSimpleClientset(objs...)
|
||||||
fakeWatcher := watch.NewRaceFreeFake()
|
fakeWatcher := watch.NewRaceFreeFake()
|
||||||
|
|
||||||
factory := informers.NewSharedInformerFactory(client, CsiResyncPeriod)
|
factory := informers.NewSharedInformerFactory(client, time.Hour /* disable resync */)
|
||||||
|
csiDriverInformer := factory.Storage().V1beta1().CSIDrivers()
|
||||||
|
if driverInfo != nil {
|
||||||
|
csiDriverInformer.Informer().GetStore().Add(driverInfo)
|
||||||
|
}
|
||||||
factory.Start(wait.NeverStop)
|
factory.Start(wait.NeverStop)
|
||||||
|
|
||||||
host := volumetest.NewFakeVolumeHostWithCSINodeName(
|
host := volumetest.NewFakeVolumeHostWithCSINodeName(
|
||||||
@ -129,7 +255,7 @@ func TestCSI_VolumeAll(t *testing.T) {
|
|||||||
client,
|
client,
|
||||||
nil,
|
nil,
|
||||||
"csi-node",
|
"csi-node",
|
||||||
factory.Storage().V1beta1().CSIDrivers().Lister(),
|
csiDriverInformer.Lister(),
|
||||||
)
|
)
|
||||||
|
|
||||||
plugMgr := &volume.VolumePluginMgr{}
|
plugMgr := &volume.VolumePluginMgr{}
|
||||||
@ -253,6 +379,22 @@ func TestCSI_VolumeAll(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
mounter, err := volPlug.NewMounter(volSpec, pod, volume.VolumeOptions{})
|
mounter, err := volPlug.NewMounter(volSpec, pod, volume.VolumeOptions{})
|
||||||
|
if test.isInline && (test.driverSpec == nil || !containsVolumeMode(test.driverSpec.VolumeLifecycleModes, storagebeta1.VolumeLifecycleEphemeral)) {
|
||||||
|
// This *must* fail because a CSIDriver.Spec.VolumeLifecycleModes entry "ephemeral"
|
||||||
|
// is required.
|
||||||
|
if err == nil || mounter != nil {
|
||||||
|
t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter should have failed for inline volume due to lack of support for inline volumes, got: %+v, %s", mounter, err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !test.isInline && test.driverSpec != nil && !containsVolumeMode(test.driverSpec.VolumeLifecycleModes, storagebeta1.VolumeLifecyclePersistent) {
|
||||||
|
// This *must* fail because a CSIDriver.Spec.VolumeLifecycleModes entry "persistent"
|
||||||
|
// is required when a driver object is available.
|
||||||
|
if err == nil || mounter != nil {
|
||||||
|
t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter should have failed for persistent volume due to lack of support for persistent volumes, got: %+v, %s", mounter, err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
if err != nil || mounter == nil {
|
if err != nil || mounter == nil {
|
||||||
t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter is nil or error: %s", err)
|
t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter is nil or error: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -84,7 +84,7 @@ func makeTestVol(name string, driverName string) *api.Volume {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTestCSIDriver(name string, podInfoMount *bool, attachable *bool) *storagev1beta1.CSIDriver {
|
func getTestCSIDriver(name string, podInfoMount *bool, attachable *bool, volumeLifecycleModes []storagev1beta1.VolumeLifecycleMode) *storagev1beta1.CSIDriver {
|
||||||
return &storagev1beta1.CSIDriver{
|
return &storagev1beta1.CSIDriver{
|
||||||
ObjectMeta: meta.ObjectMeta{
|
ObjectMeta: meta.ObjectMeta{
|
||||||
Name: name,
|
Name: name,
|
||||||
@ -92,6 +92,7 @@ func getTestCSIDriver(name string, podInfoMount *bool, attachable *bool) *storag
|
|||||||
Spec: storagev1beta1.CSIDriverSpec{
|
Spec: storagev1beta1.CSIDriverSpec{
|
||||||
PodInfoOnMount: podInfoMount,
|
PodInfoOnMount: podInfoMount,
|
||||||
AttachRequired: attachable,
|
AttachRequired: attachable,
|
||||||
|
VolumeLifecycleModes: volumeLifecycleModes,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -43,6 +43,7 @@ import (
|
|||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
storagev1 "k8s.io/api/storage/v1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
|
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
@ -195,7 +196,6 @@ var _ testsuites.DynamicPVTestDriver = &mockCSIDriver{}
|
|||||||
// InitMockCSIDriver returns a mockCSIDriver that implements TestDriver interface
|
// InitMockCSIDriver returns a mockCSIDriver that implements TestDriver interface
|
||||||
func InitMockCSIDriver(driverOpts CSIMockDriverOpts) testsuites.TestDriver {
|
func InitMockCSIDriver(driverOpts CSIMockDriverOpts) testsuites.TestDriver {
|
||||||
driverManifests := []string{
|
driverManifests := []string{
|
||||||
"test/e2e/testing-manifests/storage-csi/cluster-driver-registrar/rbac.yaml",
|
|
||||||
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
|
||||||
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
|
||||||
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
|
||||||
@ -206,7 +206,7 @@ func InitMockCSIDriver(driverOpts CSIMockDriverOpts) testsuites.TestDriver {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if driverOpts.RegisterDriver {
|
if driverOpts.RegisterDriver {
|
||||||
driverManifests = append(driverManifests, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-cluster-driver-registrar.yaml")
|
driverManifests = append(driverManifests, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driverinfo.yaml")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !driverOpts.DisableAttach {
|
if !driverOpts.DisableAttach {
|
||||||
@ -293,9 +293,13 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest
|
|||||||
DriverContainerName: "mock",
|
DriverContainerName: "mock",
|
||||||
DriverContainerArguments: containerArgs,
|
DriverContainerArguments: containerArgs,
|
||||||
ProvisionerContainerName: "csi-provisioner",
|
ProvisionerContainerName: "csi-provisioner",
|
||||||
ClusterRegistrarContainerName: "csi-cluster-driver-registrar",
|
|
||||||
NodeName: config.ClientNodeName,
|
NodeName: config.ClientNodeName,
|
||||||
PodInfo: m.podInfo,
|
PodInfo: m.podInfo,
|
||||||
|
CanAttach: &m.attachable,
|
||||||
|
VolumeLifecycleModes: []storagev1beta1.VolumeLifecycleMode{
|
||||||
|
storagev1beta1.VolumeLifecyclePersistent,
|
||||||
|
storagev1beta1.VolumeLifecycleEphemeral,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
cleanup, err := f.CreateFromManifests(func(item interface{}) error {
|
cleanup, err := f.CreateFromManifests(func(item interface{}) error {
|
||||||
return utils.PatchCSIDeployment(f, o, item)
|
return utils.PatchCSIDeployment(f, o, item)
|
||||||
|
@ -114,7 +114,9 @@ type EphemeralTestDriver interface {
|
|||||||
|
|
||||||
// GetCSIDriverName returns the name that was used when registering with
|
// GetCSIDriverName returns the name that was used when registering with
|
||||||
// kubelet. Depending on how the driver was deployed, this can be different
|
// kubelet. Depending on how the driver was deployed, this can be different
|
||||||
// from DriverInfo.Name.
|
// from DriverInfo.Name. Starting with Kubernetes 1.16, there must also
|
||||||
|
// be a CSIDriver object under the same name with a "mode" field that enables
|
||||||
|
// usage of the driver for ephemeral inline volumes.
|
||||||
GetCSIDriverName(config *PerTestConfig) string
|
GetCSIDriverName(config *PerTestConfig) string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,12 +18,12 @@ package utils
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
storagev1 "k8s.io/api/storage/v1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
|
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -94,10 +94,6 @@ func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interf
|
|||||||
// Driver name is expected to be the same
|
// Driver name is expected to be the same
|
||||||
// as the snapshotter here.
|
// as the snapshotter here.
|
||||||
container.Args = append(container.Args, "--snapshotter="+o.NewDriverName)
|
container.Args = append(container.Args, "--snapshotter="+o.NewDriverName)
|
||||||
case o.ClusterRegistrarContainerName:
|
|
||||||
if o.PodInfo != nil {
|
|
||||||
container.Args = append(container.Args, "--pod-info-mount="+strconv.FormatBool(*o.PodInfo))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -125,6 +121,17 @@ func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interf
|
|||||||
// as the provisioner name here.
|
// as the provisioner name here.
|
||||||
object.Provisioner = o.NewDriverName
|
object.Provisioner = o.NewDriverName
|
||||||
}
|
}
|
||||||
|
case *storagev1beta1.CSIDriver:
|
||||||
|
if o.NewDriverName != "" {
|
||||||
|
object.Name = o.NewDriverName
|
||||||
|
}
|
||||||
|
if o.PodInfo != nil {
|
||||||
|
object.Spec.PodInfoOnMount = o.PodInfo
|
||||||
|
}
|
||||||
|
if o.CanAttach != nil {
|
||||||
|
object.Spec.AttachRequired = o.CanAttach
|
||||||
|
}
|
||||||
|
object.Spec.VolumeLifecycleModes = o.VolumeLifecycleModes
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -154,12 +161,18 @@ type PatchCSIOptions struct {
|
|||||||
// If non-empty, --snapshotter with new name will be appended
|
// If non-empty, --snapshotter with new name will be appended
|
||||||
// to the argument list.
|
// to the argument list.
|
||||||
SnapshotterContainerName string
|
SnapshotterContainerName string
|
||||||
// The name of the container which has the cluster-driver-registrar
|
|
||||||
// binary.
|
|
||||||
ClusterRegistrarContainerName string
|
|
||||||
// If non-empty, all pods are forced to run on this node.
|
// If non-empty, all pods are forced to run on this node.
|
||||||
NodeName string
|
NodeName string
|
||||||
// If not nil, the argument to pass to the cluster-driver-registrar's
|
// If not nil, the value to use for the CSIDriver.Spec.PodInfo
|
||||||
// pod-info-mount argument.
|
// field *if* the driver deploys a CSIDriver object. Ignored
|
||||||
|
// otherwise.
|
||||||
PodInfo *bool
|
PodInfo *bool
|
||||||
|
// If not nil, the value to use for the CSIDriver.Spec.CanAttach
|
||||||
|
// field *if* the driver deploys a CSIDriver object. Ignored
|
||||||
|
// otherwise.
|
||||||
|
CanAttach *bool
|
||||||
|
// The value to use for the CSIDriver.Spec.VolumeLifecycleModes
|
||||||
|
// field *if* the driver deploys a CSIDriver object. Ignored
|
||||||
|
// otherwise.
|
||||||
|
VolumeLifecycleModes []storagev1beta1.VolumeLifecycleMode
|
||||||
}
|
}
|
||||||
|
@ -1 +0,0 @@
|
|||||||
The original file is (or will be) https://github.com/kubernetes-csi/cluster-driver-registrar/blob/master/deploy/kubernetes/rbac.yaml
|
|
@ -1,38 +0,0 @@
|
|||||||
# This YAML file contains all RBAC objects that are necessary to run
|
|
||||||
# cluster-driver-registrar.
|
|
||||||
#
|
|
||||||
# In production, each CSI driver deployment has to be customized:
|
|
||||||
# - to avoid conflicts, use non-default namespace and different names
|
|
||||||
# for non-namespaced entities like the ClusterRole
|
|
||||||
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: csi-cluster-driver-registrar
|
|
||||||
# replace with non-default namespace name
|
|
||||||
namespace: default
|
|
||||||
|
|
||||||
---
|
|
||||||
kind: ClusterRole
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: cluster-driver-registrar-runner
|
|
||||||
rules:
|
|
||||||
- apiGroups: ["storage.k8s.io"]
|
|
||||||
resources: ["csidrivers"]
|
|
||||||
verbs: ["create", "delete"]
|
|
||||||
|
|
||||||
---
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: csi-cluster-driver-registrar-role
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: csi-cluster-driver-registrar
|
|
||||||
# replace with non-default namespace name
|
|
||||||
namespace: default
|
|
||||||
roleRef:
|
|
||||||
kind: ClusterRole
|
|
||||||
name: cluster-driver-registrar-runner
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
@ -1,36 +0,0 @@
|
|||||||
kind: StatefulSet
|
|
||||||
apiVersion: apps/v1
|
|
||||||
metadata:
|
|
||||||
name: csi-mockplugin-cluster-driver-registrar
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: csi-mockplugin-cluster-driver-registrar
|
|
||||||
replicas: 1
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: csi-mockplugin-cluster-driver-registrar
|
|
||||||
spec:
|
|
||||||
serviceAccountName: csi-mock
|
|
||||||
containers:
|
|
||||||
- name: csi-cluster-driver-registrar
|
|
||||||
# TODO: replace with official image
|
|
||||||
image: quay.io/k8scsi/csi-cluster-driver-registrar:tmp-test-1.14
|
|
||||||
args:
|
|
||||||
- --v=5
|
|
||||||
- --csi-address=$(ADDRESS)
|
|
||||||
env:
|
|
||||||
- name: ADDRESS
|
|
||||||
value: /csi/csi.sock
|
|
||||||
imagePullPolicy: Always
|
|
||||||
securityContext:
|
|
||||||
privileged: true
|
|
||||||
volumeMounts:
|
|
||||||
- mountPath: /csi
|
|
||||||
name: socket-dir
|
|
||||||
volumes:
|
|
||||||
- hostPath:
|
|
||||||
path: /var/lib/kubelet/plugins/csi-mock
|
|
||||||
type: DirectoryOrCreate
|
|
||||||
name: socket-dir
|
|
@ -18,7 +18,6 @@ spec:
|
|||||||
# TODO: replace with official 1.4.0 release when ready
|
# TODO: replace with official 1.4.0 release when ready
|
||||||
image: quay.io/k8scsi/csi-provisioner:v1.4.0-rc1
|
image: quay.io/k8scsi/csi-provisioner:v1.4.0-rc1
|
||||||
args:
|
args:
|
||||||
- "--provisioner=csi-hostpath"
|
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
- "--connection-timeout=15s"
|
- "--connection-timeout=15s"
|
||||||
env:
|
env:
|
||||||
@ -51,6 +50,7 @@ spec:
|
|||||||
- name: mock
|
- name: mock
|
||||||
image: quay.io/k8scsi/mock-driver:v2.1.0
|
image: quay.io/k8scsi/mock-driver:v2.1.0
|
||||||
args:
|
args:
|
||||||
|
- "--name=mock.storage.k8s.io"
|
||||||
- "--permissive-target-path" # because of https://github.com/kubernetes/kubernetes/issues/75535
|
- "--permissive-target-path" # because of https://github.com/kubernetes/kubernetes/issues/75535
|
||||||
env:
|
env:
|
||||||
- name: CSI_ENDPOINT
|
- name: CSI_ENDPOINT
|
||||||
|
@ -0,0 +1,7 @@
|
|||||||
|
apiVersion: storage.k8s.io/v1beta1
|
||||||
|
kind: CSIDriver
|
||||||
|
metadata:
|
||||||
|
name: mock.storage.k8s.io
|
||||||
|
# Intentionally no spec. All values in the
|
||||||
|
# spec will be inserted dynamically by PatchCSIDeployment()
|
||||||
|
# in test/e2e/storage/utils/deployment.go.
|
Loading…
Reference in New Issue
Block a user