mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Merge pull request #113306 from jsafrane/csidriver-setup
Move CSIDriver checks from NewMounter to SetUpAt
This commit is contained in:
commit
a74e16fc81
@ -67,7 +67,6 @@ type csiMountMgr struct {
|
||||
plugin *csiPlugin
|
||||
driverName csiDriverName
|
||||
volumeLifecycleMode storage.VolumeLifecycleMode
|
||||
fsGroupPolicy storage.FSGroupPolicy
|
||||
volumeID string
|
||||
specVolumeID string
|
||||
readOnly bool
|
||||
@ -107,8 +106,8 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
|
||||
csi, err := c.csiClientGetter.Get()
|
||||
if err != nil {
|
||||
return volumetypes.NewTransientOperationFailure(log("mounter.SetUpAt failed to get CSI client: %v", err))
|
||||
|
||||
}
|
||||
|
||||
ctx, cancel := createCSIOperationContext(c.spec, csiTimeout)
|
||||
defer cancel()
|
||||
|
||||
@ -117,6 +116,17 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
|
||||
return errors.New(log("mounter.SetupAt failed to get CSI persistent source: %v", err))
|
||||
}
|
||||
|
||||
// Check CSIDriver.Spec.Mode to ensure that the CSI driver
|
||||
// supports the current volumeLifecycleMode.
|
||||
if err := c.supportsVolumeLifecycleMode(); err != nil {
|
||||
return volumetypes.NewTransientOperationFailure(log("mounter.SetupAt failed to check volume lifecycle mode: %s", err))
|
||||
}
|
||||
|
||||
fsGroupPolicy, err := c.getFSGroupPolicy()
|
||||
if err != nil {
|
||||
return volumetypes.NewTransientOperationFailure(log("mounter.SetupAt failed to check fsGroup policy: %s", err))
|
||||
}
|
||||
|
||||
driverName := c.driverName
|
||||
volumeHandle := c.volumeID
|
||||
readOnly := c.readOnly
|
||||
@ -317,7 +327,7 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
|
||||
}
|
||||
}
|
||||
|
||||
if !driverSupportsCSIVolumeMountGroup && c.supportsFSGroup(fsType, mounterArgs.FsGroup, c.fsGroupPolicy) {
|
||||
if !driverSupportsCSIVolumeMountGroup && c.supportsFSGroup(fsType, mounterArgs.FsGroup, fsGroupPolicy) {
|
||||
// Driver doesn't support applying FSGroup. Kubelet must apply it instead.
|
||||
|
||||
// fullPluginName helps to distinguish different driver from csi plugin
|
||||
@ -464,6 +474,79 @@ func (c *csiMountMgr) supportsFSGroup(fsType string, fsGroup *int64, driverPolic
|
||||
return false
|
||||
}
|
||||
|
||||
// getFSGroupPolicy returns if the CSI driver supports a volume in the given mode.
|
||||
// An error indicates that it isn't supported and explains why.
|
||||
func (c *csiMountMgr) getFSGroupPolicy() (storage.FSGroupPolicy, error) {
|
||||
// Retrieve CSIDriver. It's not an error if that isn't
|
||||
// possible (we don't have the lister if CSIDriverRegistry is
|
||||
// disabled) or the driver isn't found (CSIDriver is
|
||||
// optional)
|
||||
var csiDriver *storage.CSIDriver
|
||||
driver := string(c.driverName)
|
||||
if c.plugin.csiDriverLister != nil {
|
||||
c, err := c.plugin.getCSIDriver(driver)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
// Some internal error.
|
||||
return storage.ReadWriteOnceWithFSTypeFSGroupPolicy, err
|
||||
}
|
||||
csiDriver = c
|
||||
}
|
||||
|
||||
// If the csiDriver isn't defined, return the default behavior
|
||||
if csiDriver == nil {
|
||||
return storage.ReadWriteOnceWithFSTypeFSGroupPolicy, nil
|
||||
}
|
||||
// If the csiDriver exists but the fsGroupPolicy isn't defined, return an error
|
||||
if csiDriver.Spec.FSGroupPolicy == nil || *csiDriver.Spec.FSGroupPolicy == "" {
|
||||
return storage.ReadWriteOnceWithFSTypeFSGroupPolicy, errors.New(log("expected valid fsGroupPolicy, received nil value or empty string"))
|
||||
}
|
||||
return *csiDriver.Spec.FSGroupPolicy, nil
|
||||
}
|
||||
|
||||
// supportsVolumeMode checks whether the CSI driver supports a volume in the given mode.
|
||||
// An error indicates that it isn't supported and explains why.
|
||||
func (c *csiMountMgr) supportsVolumeLifecycleMode() error {
|
||||
// Retrieve CSIDriver. It's not an error if that isn't
|
||||
// possible (we don't have the lister if CSIDriverRegistry is
|
||||
// disabled) or the driver isn't found (CSIDriver is
|
||||
// optional), but then only persistent volumes are supported.
|
||||
var csiDriver *storage.CSIDriver
|
||||
driver := string(c.driverName)
|
||||
if c.plugin.csiDriverLister != nil {
|
||||
c, err := c.plugin.getCSIDriver(driver)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
// Some internal error.
|
||||
return err
|
||||
}
|
||||
csiDriver = c
|
||||
}
|
||||
|
||||
// The right response depends on whether we have information
|
||||
// about the driver and the volume mode.
|
||||
switch {
|
||||
case csiDriver == nil && c.volumeLifecycleMode == storage.VolumeLifecyclePersistent:
|
||||
// No information, but that's okay for persistent volumes (and only those).
|
||||
return nil
|
||||
case csiDriver == nil:
|
||||
return fmt.Errorf("volume mode %q not supported by driver %s (no CSIDriver object)", c.volumeLifecycleMode, driver)
|
||||
case containsVolumeMode(csiDriver.Spec.VolumeLifecycleModes, c.volumeLifecycleMode):
|
||||
// Explicitly listed.
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("volume mode %q not supported by driver %s (only supports %q)", c.volumeLifecycleMode, driver, csiDriver.Spec.VolumeLifecycleModes)
|
||||
}
|
||||
}
|
||||
|
||||
// containsVolumeMode checks whether the given volume mode is listed.
|
||||
func containsVolumeMode(modes []storage.VolumeLifecycleMode, mode storage.VolumeLifecycleMode) bool {
|
||||
for _, m := range modes {
|
||||
if m == mode {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isDirMounted returns the !notMounted result from IsLikelyNotMountPoint check
|
||||
func isDirMounted(plug *csiPlugin, dir string) (bool, error) {
|
||||
mounter := plug.host.GetMounter(plug.GetPluginName())
|
||||
|
@ -30,7 +30,6 @@ import (
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
@ -361,20 +360,21 @@ func TestMounterSetUpSimple(t *testing.T) {
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
podUID types.UID
|
||||
mode storage.VolumeLifecycleMode
|
||||
fsType string
|
||||
options []string
|
||||
spec func(string, []string) *volume.Spec
|
||||
shouldFail bool
|
||||
name string
|
||||
podUID types.UID
|
||||
mode storage.VolumeLifecycleMode
|
||||
fsType string
|
||||
options []string
|
||||
spec func(string, []string) *volume.Spec
|
||||
newMounterShouldFail bool
|
||||
setupShouldFail bool
|
||||
}{
|
||||
{
|
||||
name: "setup with ephemeral source",
|
||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||
mode: storage.VolumeLifecycleEphemeral,
|
||||
fsType: "ext4",
|
||||
shouldFail: true,
|
||||
name: "setup with ephemeral source",
|
||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||
mode: storage.VolumeLifecycleEphemeral,
|
||||
fsType: "ext4",
|
||||
setupShouldFail: true,
|
||||
spec: func(fsType string, options []string) *volume.Spec {
|
||||
volSrc := makeTestVol("pv1", testDriver)
|
||||
volSrc.CSI.FSType = &fsType
|
||||
@ -402,9 +402,9 @@ func TestMounterSetUpSimple(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "setup with missing spec",
|
||||
shouldFail: true,
|
||||
spec: func(fsType string, options []string) *volume.Spec { return nil },
|
||||
name: "setup with missing spec",
|
||||
newMounterShouldFail: true,
|
||||
spec: func(fsType string, options []string) *volume.Spec { return nil },
|
||||
},
|
||||
}
|
||||
|
||||
@ -416,11 +416,11 @@ func TestMounterSetUpSimple(t *testing.T) {
|
||||
&corev1.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if tc.shouldFail && err != nil {
|
||||
if tc.newMounterShouldFail && err != nil {
|
||||
t.Log(err)
|
||||
return
|
||||
}
|
||||
if !tc.shouldFail && err != nil {
|
||||
if !tc.newMounterShouldFail && err != nil {
|
||||
t.Fatal("unexpected error:", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
@ -430,7 +430,7 @@ func TestMounterSetUpSimple(t *testing.T) {
|
||||
csiMounter := mounter.(*csiMountMgr)
|
||||
csiMounter.csiClient = setupClient(t, true)
|
||||
|
||||
if csiMounter.volumeLifecycleMode != storage.VolumeLifecyclePersistent {
|
||||
if csiMounter.volumeLifecycleMode != tc.mode {
|
||||
t.Fatal("unexpected volume mode: ", csiMounter.volumeLifecycleMode)
|
||||
}
|
||||
|
||||
@ -442,8 +442,13 @@ func TestMounterSetUpSimple(t *testing.T) {
|
||||
}
|
||||
|
||||
// Mounter.SetUp()
|
||||
if err := csiMounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Fatalf("mounter.Setup failed: %v", err)
|
||||
err = csiMounter.SetUp(volume.MounterArgs{})
|
||||
if tc.setupShouldFail && err != nil {
|
||||
t.Log(err)
|
||||
return
|
||||
}
|
||||
if !tc.setupShouldFail && err != nil {
|
||||
t.Fatal("unexpected error:", err)
|
||||
}
|
||||
|
||||
// ensure call went all the way
|
||||
@ -969,9 +974,6 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
|
||||
}
|
||||
|
||||
csiMounter := mounter.(*csiMountMgr)
|
||||
if tc.driverFSGroupPolicy {
|
||||
csiMounter.fsGroupPolicy = tc.supportMode
|
||||
}
|
||||
csiMounter.csiClient = setupClientWithVolumeMountGroup(t, true /* stageUnstageSet */, tc.driverSupportsVolumeMountGroup)
|
||||
|
||||
attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
|
||||
@ -1219,7 +1221,6 @@ func Test_csiMountMgr_supportsFSGroup(t *testing.T) {
|
||||
plugin *csiPlugin
|
||||
driverName csiDriverName
|
||||
volumeLifecycleMode storage.VolumeLifecycleMode
|
||||
fsGroupPolicy storage.FSGroupPolicy
|
||||
volumeID string
|
||||
specVolumeID string
|
||||
readOnly bool
|
||||
@ -1345,7 +1346,6 @@ func Test_csiMountMgr_supportsFSGroup(t *testing.T) {
|
||||
plugin: tt.fields.plugin,
|
||||
driverName: tt.fields.driverName,
|
||||
volumeLifecycleMode: tt.fields.volumeLifecycleMode,
|
||||
fsGroupPolicy: tt.fields.fsGroupPolicy,
|
||||
volumeID: tt.fields.volumeID,
|
||||
specVolumeID: tt.fields.specVolumeID,
|
||||
readOnly: tt.fields.readOnly,
|
||||
@ -1363,3 +1363,64 @@ func Test_csiMountMgr_supportsFSGroup(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMounterGetFSGroupPolicy(t *testing.T) {
|
||||
defaultPolicy := storage.ReadWriteOnceWithFSTypeFSGroupPolicy
|
||||
testCases := []struct {
|
||||
name string
|
||||
defined bool
|
||||
expectedFSGroupPolicy storage.FSGroupPolicy
|
||||
}{
|
||||
{
|
||||
name: "no FSGroupPolicy defined, expect default",
|
||||
defined: false,
|
||||
expectedFSGroupPolicy: storage.ReadWriteOnceWithFSTypeFSGroupPolicy,
|
||||
},
|
||||
{
|
||||
name: "File FSGroupPolicy defined, expect File",
|
||||
defined: true,
|
||||
expectedFSGroupPolicy: storage.FileFSGroupPolicy,
|
||||
},
|
||||
{
|
||||
name: "None FSGroupPolicy defined, expected None",
|
||||
defined: true,
|
||||
expectedFSGroupPolicy: storage.NoneFSGroupPolicy,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Logf("testing: %s", tc.name)
|
||||
// Define the driver and set the FSGroupPolicy
|
||||
driver := getTestCSIDriver(testDriver, nil, nil, nil)
|
||||
if tc.defined {
|
||||
driver.Spec.FSGroupPolicy = &tc.expectedFSGroupPolicy
|
||||
} else {
|
||||
driver.Spec.FSGroupPolicy = &defaultPolicy
|
||||
}
|
||||
|
||||
// Create the client and register the resources
|
||||
fakeClient := fakeclient.NewSimpleClientset(driver)
|
||||
plug, tmpDir := newTestPlugin(t, fakeClient)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
registerFakePlugin(testDriver, "endpoint", []string{"1.3.0"}, t)
|
||||
|
||||
mounter, err := plug.NewMounter(
|
||||
volume.NewSpecFromPersistentVolume(makeTestPV("test.vol.id", 20, testDriver, "testvol-handle1"), true),
|
||||
&corev1.Pod{ObjectMeta: meta.ObjectMeta{UID: "1", Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating a new mounter: %s", err)
|
||||
}
|
||||
|
||||
csiMounter := mounter.(*csiMountMgr)
|
||||
|
||||
// Check to see if we can obtain the CSIDriver, along with examining its FSGroupPolicy
|
||||
fsGroup, err := csiMounter.getFSGroupPolicy()
|
||||
if err != nil {
|
||||
t.Fatalf("Error attempting to obtain FSGroupPolicy: %v", err)
|
||||
}
|
||||
if fsGroup != *driver.Spec.FSGroupPolicy {
|
||||
t.Fatalf("FSGroupPolicy doesn't match expected value: %v, %v", fsGroup, tc.expectedFSGroupPolicy)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -388,17 +388,6 @@ func (p *csiPlugin) NewMounter(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check CSIDriver.Spec.Mode to ensure that the CSI driver
|
||||
// supports the current volumeLifecycleMode.
|
||||
if err := p.supportsVolumeLifecycleMode(driverName, volumeLifecycleMode); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fsGroupPolicy, err := p.getFSGroupPolicy(driverName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k8s := p.host.GetKubeClient()
|
||||
if k8s == nil {
|
||||
return nil, errors.New(log("failed to get a kubernetes client"))
|
||||
@ -417,7 +406,6 @@ func (p *csiPlugin) NewMounter(
|
||||
podUID: pod.UID,
|
||||
driverName: csiDriverName(driverName),
|
||||
volumeLifecycleMode: volumeLifecycleMode,
|
||||
fsGroupPolicy: fsGroupPolicy,
|
||||
volumeID: volumeHandle,
|
||||
specVolumeID: spec.Name(),
|
||||
readOnly: readOnly,
|
||||
@ -780,49 +768,6 @@ func (p *csiPlugin) getCSIDriver(driver string) (*storage.CSIDriver, error) {
|
||||
return csiDriver, err
|
||||
}
|
||||
|
||||
// supportsVolumeMode checks whether the CSI driver supports a volume in the given mode.
|
||||
// An error indicates that it isn't supported and explains why.
|
||||
func (p *csiPlugin) supportsVolumeLifecycleMode(driver string, volumeMode storage.VolumeLifecycleMode) error {
|
||||
// Retrieve CSIDriver. It's not an error if that isn't
|
||||
// possible (we don't have the lister if CSIDriverRegistry is
|
||||
// disabled) or the driver isn't found (CSIDriver is
|
||||
// optional), but then only persistent volumes are supported.
|
||||
var csiDriver *storage.CSIDriver
|
||||
if p.csiDriverLister != nil {
|
||||
c, err := p.getCSIDriver(driver)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
// Some internal error.
|
||||
return err
|
||||
}
|
||||
csiDriver = c
|
||||
}
|
||||
|
||||
// The right response depends on whether we have information
|
||||
// about the driver and the volume mode.
|
||||
switch {
|
||||
case csiDriver == nil && volumeMode == storage.VolumeLifecyclePersistent:
|
||||
// No information, but that's okay for persistent volumes (and only those).
|
||||
return nil
|
||||
case csiDriver == nil:
|
||||
return fmt.Errorf("volume mode %q not supported by driver %s (no CSIDriver object)", volumeMode, driver)
|
||||
case containsVolumeMode(csiDriver.Spec.VolumeLifecycleModes, volumeMode):
|
||||
// Explicitly listed.
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("volume mode %q not supported by driver %s (only supports %q)", volumeMode, driver, csiDriver.Spec.VolumeLifecycleModes)
|
||||
}
|
||||
}
|
||||
|
||||
// containsVolumeMode checks whether the given volume mode is listed.
|
||||
func containsVolumeMode(modes []storage.VolumeLifecycleMode, mode storage.VolumeLifecycleMode) bool {
|
||||
for _, m := range modes {
|
||||
if m == mode {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getVolumeLifecycleMode returns the mode for the specified spec: {persistent|ephemeral}.
|
||||
// 1) If mode cannot be determined, it will default to "persistent".
|
||||
// 2) If Mode cannot be resolved to either {persistent | ephemeral}, an error is returned
|
||||
@ -841,34 +786,6 @@ func (p *csiPlugin) getVolumeLifecycleMode(spec *volume.Spec) (storage.VolumeLif
|
||||
return storage.VolumeLifecyclePersistent, nil
|
||||
}
|
||||
|
||||
// getFSGroupPolicy returns if the CSI driver supports a volume in the given mode.
|
||||
// An error indicates that it isn't supported and explains why.
|
||||
func (p *csiPlugin) getFSGroupPolicy(driver string) (storage.FSGroupPolicy, error) {
|
||||
// Retrieve CSIDriver. It's not an error if that isn't
|
||||
// possible (we don't have the lister if CSIDriverRegistry is
|
||||
// disabled) or the driver isn't found (CSIDriver is
|
||||
// optional)
|
||||
var csiDriver *storage.CSIDriver
|
||||
if p.csiDriverLister != nil {
|
||||
c, err := p.getCSIDriver(driver)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
// Some internal error.
|
||||
return storage.ReadWriteOnceWithFSTypeFSGroupPolicy, err
|
||||
}
|
||||
csiDriver = c
|
||||
}
|
||||
|
||||
// If the csiDriver isn't defined, return the default behavior
|
||||
if csiDriver == nil {
|
||||
return storage.ReadWriteOnceWithFSTypeFSGroupPolicy, nil
|
||||
}
|
||||
// If the csiDriver exists but the fsGroupPolicy isn't defined, return an error
|
||||
if csiDriver.Spec.FSGroupPolicy == nil || *csiDriver.Spec.FSGroupPolicy == "" {
|
||||
return storage.ReadWriteOnceWithFSTypeFSGroupPolicy, errors.New(log("expected valid fsGroupPolicy, received nil value or empty string"))
|
||||
}
|
||||
return *csiDriver.Spec.FSGroupPolicy, nil
|
||||
}
|
||||
|
||||
func (p *csiPlugin) getPublishContext(client clientset.Interface, handle, driver, nodeName string) (map[string]string, error) {
|
||||
skip, err := p.skipAttach(driver)
|
||||
if err != nil {
|
||||
|
@ -159,57 +159,6 @@ func TestPluginGetPluginName(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginGetFSGroupPolicy(t *testing.T) {
|
||||
defaultPolicy := storage.ReadWriteOnceWithFSTypeFSGroupPolicy
|
||||
testCases := []struct {
|
||||
name string
|
||||
defined bool
|
||||
expectedFSGroupPolicy storage.FSGroupPolicy
|
||||
}{
|
||||
{
|
||||
name: "no FSGroupPolicy defined, expect default",
|
||||
defined: false,
|
||||
expectedFSGroupPolicy: storage.ReadWriteOnceWithFSTypeFSGroupPolicy,
|
||||
},
|
||||
{
|
||||
name: "File FSGroupPolicy defined, expect File",
|
||||
defined: true,
|
||||
expectedFSGroupPolicy: storage.FileFSGroupPolicy,
|
||||
},
|
||||
{
|
||||
name: "None FSGroupPolicy defined, expected None",
|
||||
defined: true,
|
||||
expectedFSGroupPolicy: storage.NoneFSGroupPolicy,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Logf("testing: %s", tc.name)
|
||||
// Define the driver and set the FSGroupPolicy
|
||||
driver := getTestCSIDriver(testDriver, nil, nil, nil)
|
||||
if tc.defined {
|
||||
driver.Spec.FSGroupPolicy = &tc.expectedFSGroupPolicy
|
||||
} else {
|
||||
driver.Spec.FSGroupPolicy = &defaultPolicy
|
||||
}
|
||||
|
||||
// Create the client and register the resources
|
||||
fakeClient := fakeclient.NewSimpleClientset(driver)
|
||||
plug, tmpDir := newTestPlugin(t, fakeClient)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
registerFakePlugin(testDriver, "endpoint", []string{"1.3.0"}, t)
|
||||
|
||||
// Check to see if we can obtain the CSIDriver, along with examining its FSGroupPolicy
|
||||
fsGroup, err := plug.getFSGroupPolicy(testDriver)
|
||||
if err != nil {
|
||||
t.Fatalf("Error attempting to obtain FSGroupPolicy: %v", err)
|
||||
}
|
||||
if fsGroup != *driver.Spec.FSGroupPolicy {
|
||||
t.Fatalf("FSGroupPolicy doesn't match expected value: %v, %v", fsGroup, tc.expectedFSGroupPolicy)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestPluginGetVolumeName(t *testing.T) {
|
||||
plug, tmpDir := newTestPlugin(t, nil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
@ -372,7 +321,6 @@ func TestPluginConstructVolumeSpec(t *testing.T) {
|
||||
specVolID string
|
||||
volHandle string
|
||||
podUID types.UID
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
name: "construct spec1 from original persistent spec",
|
||||
@ -388,13 +336,6 @@ func TestPluginConstructVolumeSpec(t *testing.T) {
|
||||
originSpec: volume.NewSpecFromPersistentVolume(makeTestPV("spec2", 20, testDriver, "handle2"), true),
|
||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||
},
|
||||
{
|
||||
name: "construct spec from original volume spec",
|
||||
specVolID: "volspec",
|
||||
originSpec: volume.NewSpecFromVolume(makeTestVol("spec2", testDriver)),
|
||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||
shouldFail: true, // csi inline off
|
||||
},
|
||||
}
|
||||
|
||||
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
|
||||
@ -406,11 +347,7 @@ func TestPluginConstructVolumeSpec(t *testing.T) {
|
||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if tc.shouldFail && err != nil {
|
||||
t.Log(err)
|
||||
return
|
||||
}
|
||||
if !tc.shouldFail && err != nil {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if mounter == nil {
|
||||
@ -628,7 +565,7 @@ func TestPluginNewMounter(t *testing.T) {
|
||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||
namespace: "test-ns2",
|
||||
volumeLifecycleMode: storage.VolumeLifecycleEphemeral,
|
||||
shouldFail: true, // csi inline not enabled
|
||||
shouldFail: false, // NewMounter works with disabled inline volumes
|
||||
},
|
||||
{
|
||||
name: "mounter from no spec provided",
|
||||
@ -753,10 +690,6 @@ func TestPluginNewMounterWithInline(t *testing.T) {
|
||||
|
||||
// Some test cases are meant to fail because their input data is broken.
|
||||
shouldFail := test.shouldFail
|
||||
// Others fail if the driver does not support the volume mode.
|
||||
if !containsVolumeMode(supported, test.volumeLifecycleMode) {
|
||||
shouldFail = true
|
||||
}
|
||||
if shouldFail != (err != nil) {
|
||||
t.Fatal("Unexpected error:", err)
|
||||
}
|
||||
|
@ -43,16 +43,16 @@ func TestCSI_VolumeAll(t *testing.T) {
|
||||
defaultFSGroupPolicy := storage.ReadWriteOnceWithFSTypeFSGroupPolicy
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
specName string
|
||||
driver string
|
||||
volName string
|
||||
specFunc func(specName, driver, volName string) *volume.Spec
|
||||
podFunc func() *api.Pod
|
||||
isInline bool
|
||||
shouldFail bool
|
||||
driverSpec *storage.CSIDriverSpec
|
||||
watchTimeout time.Duration
|
||||
name string
|
||||
specName string
|
||||
driver string
|
||||
volName string
|
||||
specFunc func(specName, driver, volName string) *volume.Spec
|
||||
podFunc func() *api.Pod
|
||||
isInline bool
|
||||
findPluginShouldFail bool
|
||||
driverSpec *storage.CSIDriverSpec
|
||||
watchTimeout time.Duration
|
||||
}{
|
||||
{
|
||||
name: "PersistentVolume",
|
||||
@ -102,7 +102,6 @@ func TestCSI_VolumeAll(t *testing.T) {
|
||||
VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecycleEphemeral},
|
||||
FSGroupPolicy: &defaultFSGroupPolicy,
|
||||
},
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
name: "ephemeral inline supported",
|
||||
@ -169,6 +168,7 @@ func TestCSI_VolumeAll(t *testing.T) {
|
||||
// This means the driver *cannot* handle the inline volume because
|
||||
// the default is "persistent".
|
||||
VolumeLifecycleModes: nil,
|
||||
FSGroupPolicy: &defaultFSGroupPolicy,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -186,6 +186,7 @@ func TestCSI_VolumeAll(t *testing.T) {
|
||||
driverSpec: &storage.CSIDriverSpec{
|
||||
// This means the driver *cannot* handle the inline volume.
|
||||
VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecyclePersistent},
|
||||
FSGroupPolicy: &defaultFSGroupPolicy,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -200,7 +201,7 @@ func TestCSI_VolumeAll(t *testing.T) {
|
||||
podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64()))
|
||||
return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}}
|
||||
},
|
||||
shouldFail: true,
|
||||
findPluginShouldFail: true,
|
||||
},
|
||||
{
|
||||
name: "incomplete spec",
|
||||
@ -214,7 +215,7 @@ func TestCSI_VolumeAll(t *testing.T) {
|
||||
podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64()))
|
||||
return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}}
|
||||
},
|
||||
shouldFail: true,
|
||||
findPluginShouldFail: true,
|
||||
},
|
||||
}
|
||||
|
||||
@ -277,7 +278,7 @@ func TestCSI_VolumeAll(t *testing.T) {
|
||||
t.Log("csiTest.VolumeAll Attaching volume...")
|
||||
attachPlug, err := attachDetachPlugMgr.FindAttachablePluginBySpec(volSpec)
|
||||
if err != nil {
|
||||
if !test.shouldFail {
|
||||
if !test.findPluginShouldFail {
|
||||
t.Fatalf("csiTest.VolumeAll PluginManager.FindAttachablePluginBySpec failed: %v", err)
|
||||
} else {
|
||||
t.Log("csiTest.VolumeAll failed: ", err)
|
||||
@ -398,22 +399,6 @@ func TestCSI_VolumeAll(t *testing.T) {
|
||||
}
|
||||
|
||||
mounter, err := volPlug.NewMounter(volSpec, pod, volume.VolumeOptions{})
|
||||
if test.isInline && (test.driverSpec == nil || !containsVolumeMode(test.driverSpec.VolumeLifecycleModes, storage.VolumeLifecycleEphemeral)) {
|
||||
// This *must* fail because a CSIDriver.Spec.VolumeLifecycleModes entry "ephemeral"
|
||||
// is required.
|
||||
if err == nil || mounter != nil {
|
||||
t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter should have failed for inline volume due to lack of support for inline volumes, got: %+v, %s", mounter, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if !test.isInline && test.driverSpec != nil && !containsVolumeMode(test.driverSpec.VolumeLifecycleModes, storage.VolumeLifecyclePersistent) {
|
||||
// This *must* fail because a CSIDriver.Spec.VolumeLifecycleModes entry "persistent"
|
||||
// is required when a driver object is available.
|
||||
if err == nil || mounter != nil {
|
||||
t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter should have failed for persistent volume due to lack of support for persistent volumes, got: %+v, %s", mounter, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if err != nil || mounter == nil {
|
||||
t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter is nil or error: %s", err)
|
||||
}
|
||||
@ -427,7 +412,24 @@ func TestCSI_VolumeAll(t *testing.T) {
|
||||
csiMounter.csiClient = csiClient
|
||||
var mounterArgs volume.MounterArgs
|
||||
mounterArgs.FsGroup = fsGroup
|
||||
if err := csiMounter.SetUp(mounterArgs); err != nil {
|
||||
err = csiMounter.SetUp(mounterArgs)
|
||||
if test.isInline && (test.driverSpec == nil || !containsVolumeMode(test.driverSpec.VolumeLifecycleModes, storage.VolumeLifecycleEphemeral)) {
|
||||
// This *must* fail because a CSIDriver.Spec.VolumeLifecycleModes entry "ephemeral"
|
||||
// is required.
|
||||
if err == nil {
|
||||
t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter should have failed for inline volume due to lack of support for inline volumes, got: %+v, %s", mounter, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if !test.isInline && test.driverSpec != nil && !containsVolumeMode(test.driverSpec.VolumeLifecycleModes, storage.VolumeLifecyclePersistent) {
|
||||
// This *must* fail because a CSIDriver.Spec.VolumeLifecycleModes entry "persistent"
|
||||
// is required when a driver object is available.
|
||||
if err == nil {
|
||||
t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter should have failed for persistent volume due to lack of support for persistent volumes, got: %+v, %s", mounter, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("csiTest.VolumeAll mounter.Setup(fsGroup) failed: %s", err)
|
||||
}
|
||||
t.Log("csiTest.VolumeAll mounter.Setup(fsGroup) done OK")
|
||||
|
Loading…
Reference in New Issue
Block a user