From 04183005e4816259244f5f94a2f42805ad232287 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Mon, 24 Oct 2022 14:48:07 +0200 Subject: [PATCH 1/2] Move lifecycle checks from NewMounter to SetUpAt The checks need to get CSIDriver from the API server and the API server may not be the case when NewMounter is called during volume reconstruction. --- pkg/volume/csi/csi_mounter.go | 52 +++++++++++++++++++++++- pkg/volume/csi/csi_mounter_test.go | 46 +++++++++++---------- pkg/volume/csi/csi_plugin.go | 49 ----------------------- pkg/volume/csi/csi_plugin_test.go | 20 +--------- pkg/volume/csi/csi_test.go | 64 +++++++++++++++--------------- 5 files changed, 112 insertions(+), 119 deletions(-) diff --git a/pkg/volume/csi/csi_mounter.go b/pkg/volume/csi/csi_mounter.go index 6c67b6ac86b..bf268ee1e7b 100644 --- a/pkg/volume/csi/csi_mounter.go +++ b/pkg/volume/csi/csi_mounter.go @@ -107,8 +107,8 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error csi, err := c.csiClientGetter.Get() if err != nil { return volumetypes.NewTransientOperationFailure(log("mounter.SetUpAt failed to get CSI client: %v", err)) - } + ctx, cancel := createCSIOperationContext(c.spec, csiTimeout) defer cancel() @@ -117,6 +117,12 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error return errors.New(log("mounter.SetupAt failed to get CSI persistent source: %v", err)) } + // Check CSIDriver.Spec.Mode to ensure that the CSI driver + // supports the current volumeLifecycleMode. + if err := c.supportsVolumeLifecycleMode(); err != nil { + return volumetypes.NewTransientOperationFailure(log("mounter.SetupAt failed to check volume lifecycle mode: %s", err)) + } + driverName := c.driverName volumeHandle := c.volumeID readOnly := c.readOnly @@ -435,6 +441,50 @@ func (c *csiMountMgr) supportsFSGroup(fsType string, fsGroup *int64, driverPolic return false } +// supportsVolumeMode checks whether the CSI driver supports a volume in the given mode. +// An error indicates that it isn't supported and explains why. +func (c *csiMountMgr) supportsVolumeLifecycleMode() error { + // Retrieve CSIDriver. It's not an error if that isn't + // possible (we don't have the lister if CSIDriverRegistry is + // disabled) or the driver isn't found (CSIDriver is + // optional), but then only persistent volumes are supported. + var csiDriver *storage.CSIDriver + driver := string(c.driverName) + if c.plugin.csiDriverLister != nil { + c, err := c.plugin.getCSIDriver(driver) + if err != nil && !apierrors.IsNotFound(err) { + // Some internal error. + return err + } + csiDriver = c + } + + // The right response depends on whether we have information + // about the driver and the volume mode. + switch { + case csiDriver == nil && c.volumeLifecycleMode == storage.VolumeLifecyclePersistent: + // No information, but that's okay for persistent volumes (and only those). + return nil + case csiDriver == nil: + return fmt.Errorf("volume mode %q not supported by driver %s (no CSIDriver object)", c.volumeLifecycleMode, driver) + case containsVolumeMode(csiDriver.Spec.VolumeLifecycleModes, c.volumeLifecycleMode): + // Explicitly listed. + return nil + default: + return fmt.Errorf("volume mode %q not supported by driver %s (only supports %q)", c.volumeLifecycleMode, driver, csiDriver.Spec.VolumeLifecycleModes) + } +} + +// containsVolumeMode checks whether the given volume mode is listed. +func containsVolumeMode(modes []storage.VolumeLifecycleMode, mode storage.VolumeLifecycleMode) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} + // isDirMounted returns the !notMounted result from IsLikelyNotMountPoint check func isDirMounted(plug *csiPlugin, dir string) (bool, error) { mounter := plug.host.GetMounter(plug.GetPluginName()) diff --git a/pkg/volume/csi/csi_mounter_test.go b/pkg/volume/csi/csi_mounter_test.go index c260302ae0b..acb97bf9b9a 100644 --- a/pkg/volume/csi/csi_mounter_test.go +++ b/pkg/volume/csi/csi_mounter_test.go @@ -311,20 +311,21 @@ func TestMounterSetUpSimple(t *testing.T) { defer os.RemoveAll(tmpDir) testCases := []struct { - name string - podUID types.UID - mode storage.VolumeLifecycleMode - fsType string - options []string - spec func(string, []string) *volume.Spec - shouldFail bool + name string + podUID types.UID + mode storage.VolumeLifecycleMode + fsType string + options []string + spec func(string, []string) *volume.Spec + newMounterShouldFail bool + setupShouldFail bool }{ { - name: "setup with ephemeral source", - podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())), - mode: storage.VolumeLifecycleEphemeral, - fsType: "ext4", - shouldFail: true, + name: "setup with ephemeral source", + podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())), + mode: storage.VolumeLifecycleEphemeral, + fsType: "ext4", + setupShouldFail: true, spec: func(fsType string, options []string) *volume.Spec { volSrc := makeTestVol("pv1", testDriver) volSrc.CSI.FSType = &fsType @@ -352,9 +353,9 @@ func TestMounterSetUpSimple(t *testing.T) { }, }, { - name: "setup with missing spec", - shouldFail: true, - spec: func(fsType string, options []string) *volume.Spec { return nil }, + name: "setup with missing spec", + newMounterShouldFail: true, + spec: func(fsType string, options []string) *volume.Spec { return nil }, }, } @@ -366,11 +367,11 @@ func TestMounterSetUpSimple(t *testing.T) { &corev1.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}}, volume.VolumeOptions{}, ) - if tc.shouldFail && err != nil { + if tc.newMounterShouldFail && err != nil { t.Log(err) return } - if !tc.shouldFail && err != nil { + if !tc.newMounterShouldFail && err != nil { t.Fatal("unexpected error:", err) } if mounter == nil { @@ -380,7 +381,7 @@ func TestMounterSetUpSimple(t *testing.T) { csiMounter := mounter.(*csiMountMgr) csiMounter.csiClient = setupClient(t, true) - if csiMounter.volumeLifecycleMode != storage.VolumeLifecyclePersistent { + if csiMounter.volumeLifecycleMode != tc.mode { t.Fatal("unexpected volume mode: ", csiMounter.volumeLifecycleMode) } @@ -392,8 +393,13 @@ func TestMounterSetUpSimple(t *testing.T) { } // Mounter.SetUp() - if err := csiMounter.SetUp(volume.MounterArgs{}); err != nil { - t.Fatalf("mounter.Setup failed: %v", err) + err = csiMounter.SetUp(volume.MounterArgs{}) + if tc.setupShouldFail && err != nil { + t.Log(err) + return + } + if !tc.setupShouldFail && err != nil { + t.Fatal("unexpected error:", err) } // ensure call went all the way diff --git a/pkg/volume/csi/csi_plugin.go b/pkg/volume/csi/csi_plugin.go index efe48cfb73b..ced4d3de0f9 100644 --- a/pkg/volume/csi/csi_plugin.go +++ b/pkg/volume/csi/csi_plugin.go @@ -388,12 +388,6 @@ func (p *csiPlugin) NewMounter( return nil, err } - // Check CSIDriver.Spec.Mode to ensure that the CSI driver - // supports the current volumeLifecycleMode. - if err := p.supportsVolumeLifecycleMode(driverName, volumeLifecycleMode); err != nil { - return nil, err - } - fsGroupPolicy, err := p.getFSGroupPolicy(driverName) if err != nil { return nil, err @@ -822,49 +816,6 @@ func (p *csiPlugin) getCSIDriver(driver string) (*storage.CSIDriver, error) { return csiDriver, err } -// supportsVolumeMode checks whether the CSI driver supports a volume in the given mode. -// An error indicates that it isn't supported and explains why. -func (p *csiPlugin) supportsVolumeLifecycleMode(driver string, volumeMode storage.VolumeLifecycleMode) error { - // Retrieve CSIDriver. It's not an error if that isn't - // possible (we don't have the lister if CSIDriverRegistry is - // disabled) or the driver isn't found (CSIDriver is - // optional), but then only persistent volumes are supported. - var csiDriver *storage.CSIDriver - if p.csiDriverLister != nil { - c, err := p.getCSIDriver(driver) - if err != nil && !apierrors.IsNotFound(err) { - // Some internal error. - return err - } - csiDriver = c - } - - // The right response depends on whether we have information - // about the driver and the volume mode. - switch { - case csiDriver == nil && volumeMode == storage.VolumeLifecyclePersistent: - // No information, but that's okay for persistent volumes (and only those). - return nil - case csiDriver == nil: - return fmt.Errorf("volume mode %q not supported by driver %s (no CSIDriver object)", volumeMode, driver) - case containsVolumeMode(csiDriver.Spec.VolumeLifecycleModes, volumeMode): - // Explicitly listed. - return nil - default: - return fmt.Errorf("volume mode %q not supported by driver %s (only supports %q)", volumeMode, driver, csiDriver.Spec.VolumeLifecycleModes) - } -} - -// containsVolumeMode checks whether the given volume mode is listed. -func containsVolumeMode(modes []storage.VolumeLifecycleMode, mode storage.VolumeLifecycleMode) bool { - for _, m := range modes { - if m == mode { - return true - } - } - return false -} - // getVolumeLifecycleMode returns the mode for the specified spec: {persistent|ephemeral}. // 1) If mode cannot be determined, it will default to "persistent". // 2) If Mode cannot be resolved to either {persistent | ephemeral}, an error is returned diff --git a/pkg/volume/csi/csi_plugin_test.go b/pkg/volume/csi/csi_plugin_test.go index fbabaf4b37a..534de164131 100644 --- a/pkg/volume/csi/csi_plugin_test.go +++ b/pkg/volume/csi/csi_plugin_test.go @@ -372,7 +372,6 @@ func TestPluginConstructVolumeSpec(t *testing.T) { specVolID string volHandle string podUID types.UID - shouldFail bool }{ { name: "construct spec1 from original persistent spec", @@ -388,13 +387,6 @@ func TestPluginConstructVolumeSpec(t *testing.T) { originSpec: volume.NewSpecFromPersistentVolume(makeTestPV("spec2", 20, testDriver, "handle2"), true), podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())), }, - { - name: "construct spec from original volume spec", - specVolID: "volspec", - originSpec: volume.NewSpecFromVolume(makeTestVol("spec2", testDriver)), - podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())), - shouldFail: true, // csi inline off - }, } registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t) @@ -406,11 +398,7 @@ func TestPluginConstructVolumeSpec(t *testing.T) { &api.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}}, volume.VolumeOptions{}, ) - if tc.shouldFail && err != nil { - t.Log(err) - return - } - if !tc.shouldFail && err != nil { + if err != nil { t.Fatal(err) } if mounter == nil { @@ -617,7 +605,7 @@ func TestPluginNewMounter(t *testing.T) { podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())), namespace: "test-ns2", volumeLifecycleMode: storage.VolumeLifecycleEphemeral, - shouldFail: true, // csi inline not enabled + shouldFail: false, // NewMounter works with disabled inline volumes }, { name: "mounter from no spec provided", @@ -772,10 +760,6 @@ func TestPluginNewMounterWithInline(t *testing.T) { // Some test cases are meant to fail because their input data is broken. shouldFail := test.shouldFail - // Others fail if the driver does not support the volume mode. - if !containsVolumeMode(supported, test.volumeLifecycleMode) { - shouldFail = true - } if shouldFail != (err != nil) { t.Fatal("Unexpected error:", err) } diff --git a/pkg/volume/csi/csi_test.go b/pkg/volume/csi/csi_test.go index 7aa5e39c46a..55ea7c23822 100644 --- a/pkg/volume/csi/csi_test.go +++ b/pkg/volume/csi/csi_test.go @@ -43,16 +43,16 @@ func TestCSI_VolumeAll(t *testing.T) { defaultFSGroupPolicy := storage.ReadWriteOnceWithFSTypeFSGroupPolicy tests := []struct { - name string - specName string - driver string - volName string - specFunc func(specName, driver, volName string) *volume.Spec - podFunc func() *api.Pod - isInline bool - shouldFail bool - driverSpec *storage.CSIDriverSpec - watchTimeout time.Duration + name string + specName string + driver string + volName string + specFunc func(specName, driver, volName string) *volume.Spec + podFunc func() *api.Pod + isInline bool + findPluginShouldFail bool + driverSpec *storage.CSIDriverSpec + watchTimeout time.Duration }{ { name: "PersistentVolume", @@ -102,7 +102,6 @@ func TestCSI_VolumeAll(t *testing.T) { VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecycleEphemeral}, FSGroupPolicy: &defaultFSGroupPolicy, }, - shouldFail: true, }, { name: "ephemeral inline supported", @@ -169,6 +168,7 @@ func TestCSI_VolumeAll(t *testing.T) { // This means the driver *cannot* handle the inline volume because // the default is "persistent". VolumeLifecycleModes: nil, + FSGroupPolicy: &defaultFSGroupPolicy, }, }, { @@ -186,6 +186,7 @@ func TestCSI_VolumeAll(t *testing.T) { driverSpec: &storage.CSIDriverSpec{ // This means the driver *cannot* handle the inline volume. VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecyclePersistent}, + FSGroupPolicy: &defaultFSGroupPolicy, }, }, { @@ -200,7 +201,7 @@ func TestCSI_VolumeAll(t *testing.T) { podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64())) return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}} }, - shouldFail: true, + findPluginShouldFail: true, }, { name: "incomplete spec", @@ -214,7 +215,7 @@ func TestCSI_VolumeAll(t *testing.T) { podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64())) return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}} }, - shouldFail: true, + findPluginShouldFail: true, }, } @@ -277,7 +278,7 @@ func TestCSI_VolumeAll(t *testing.T) { t.Log("csiTest.VolumeAll Attaching volume...") attachPlug, err := attachDetachPlugMgr.FindAttachablePluginBySpec(volSpec) if err != nil { - if !test.shouldFail { + if !test.findPluginShouldFail { t.Fatalf("csiTest.VolumeAll PluginManager.FindAttachablePluginBySpec failed: %v", err) } else { t.Log("csiTest.VolumeAll failed: ", err) @@ -398,22 +399,6 @@ func TestCSI_VolumeAll(t *testing.T) { } mounter, err := volPlug.NewMounter(volSpec, pod, volume.VolumeOptions{}) - if test.isInline && (test.driverSpec == nil || !containsVolumeMode(test.driverSpec.VolumeLifecycleModes, storage.VolumeLifecycleEphemeral)) { - // This *must* fail because a CSIDriver.Spec.VolumeLifecycleModes entry "ephemeral" - // is required. - if err == nil || mounter != nil { - t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter should have failed for inline volume due to lack of support for inline volumes, got: %+v, %s", mounter, err) - } - return - } - if !test.isInline && test.driverSpec != nil && !containsVolumeMode(test.driverSpec.VolumeLifecycleModes, storage.VolumeLifecyclePersistent) { - // This *must* fail because a CSIDriver.Spec.VolumeLifecycleModes entry "persistent" - // is required when a driver object is available. - if err == nil || mounter != nil { - t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter should have failed for persistent volume due to lack of support for persistent volumes, got: %+v, %s", mounter, err) - } - return - } if err != nil || mounter == nil { t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter is nil or error: %s", err) } @@ -427,7 +412,24 @@ func TestCSI_VolumeAll(t *testing.T) { csiMounter.csiClient = csiClient var mounterArgs volume.MounterArgs mounterArgs.FsGroup = fsGroup - if err := csiMounter.SetUp(mounterArgs); err != nil { + err = csiMounter.SetUp(mounterArgs) + if test.isInline && (test.driverSpec == nil || !containsVolumeMode(test.driverSpec.VolumeLifecycleModes, storage.VolumeLifecycleEphemeral)) { + // This *must* fail because a CSIDriver.Spec.VolumeLifecycleModes entry "ephemeral" + // is required. + if err == nil { + t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter should have failed for inline volume due to lack of support for inline volumes, got: %+v, %s", mounter, err) + } + return + } + if !test.isInline && test.driverSpec != nil && !containsVolumeMode(test.driverSpec.VolumeLifecycleModes, storage.VolumeLifecyclePersistent) { + // This *must* fail because a CSIDriver.Spec.VolumeLifecycleModes entry "persistent" + // is required when a driver object is available. + if err == nil { + t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter should have failed for persistent volume due to lack of support for persistent volumes, got: %+v, %s", mounter, err) + } + return + } + if err != nil { t.Fatalf("csiTest.VolumeAll mounter.Setup(fsGroup) failed: %s", err) } t.Log("csiTest.VolumeAll mounter.Setup(fsGroup) done OK") From 483fd45e8e4ff3c17f3639a21038dc7c9c61c132 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Mon, 24 Oct 2022 15:30:12 +0200 Subject: [PATCH 2/2] Move fsGroupPolicy from NewMounter to SetUpAt getFSGroupPolicy needs to get CSIDriver from the API server, which may not be available during volume reconstruction at kubelet startup. --- pkg/volume/csi/csi_mounter.go | 37 ++++++++++++++++- pkg/volume/csi/csi_mounter_test.go | 67 +++++++++++++++++++++++++++--- pkg/volume/csi/csi_plugin.go | 34 --------------- pkg/volume/csi/csi_plugin_test.go | 51 ----------------------- 4 files changed, 96 insertions(+), 93 deletions(-) diff --git a/pkg/volume/csi/csi_mounter.go b/pkg/volume/csi/csi_mounter.go index bf268ee1e7b..026b0b60200 100644 --- a/pkg/volume/csi/csi_mounter.go +++ b/pkg/volume/csi/csi_mounter.go @@ -67,7 +67,6 @@ type csiMountMgr struct { plugin *csiPlugin driverName csiDriverName volumeLifecycleMode storage.VolumeLifecycleMode - fsGroupPolicy storage.FSGroupPolicy volumeID string specVolumeID string readOnly bool @@ -123,6 +122,11 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error return volumetypes.NewTransientOperationFailure(log("mounter.SetupAt failed to check volume lifecycle mode: %s", err)) } + fsGroupPolicy, err := c.getFSGroupPolicy() + if err != nil { + return volumetypes.NewTransientOperationFailure(log("mounter.SetupAt failed to check fsGroup policy: %s", err)) + } + driverName := c.driverName volumeHandle := c.volumeID readOnly := c.readOnly @@ -294,7 +298,7 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error } } - if !driverSupportsCSIVolumeMountGroup && c.supportsFSGroup(fsType, mounterArgs.FsGroup, c.fsGroupPolicy) { + if !driverSupportsCSIVolumeMountGroup && c.supportsFSGroup(fsType, mounterArgs.FsGroup, fsGroupPolicy) { // Driver doesn't support applying FSGroup. Kubelet must apply it instead. // fullPluginName helps to distinguish different driver from csi plugin @@ -441,6 +445,35 @@ func (c *csiMountMgr) supportsFSGroup(fsType string, fsGroup *int64, driverPolic return false } +// getFSGroupPolicy returns if the CSI driver supports a volume in the given mode. +// An error indicates that it isn't supported and explains why. +func (c *csiMountMgr) getFSGroupPolicy() (storage.FSGroupPolicy, error) { + // Retrieve CSIDriver. It's not an error if that isn't + // possible (we don't have the lister if CSIDriverRegistry is + // disabled) or the driver isn't found (CSIDriver is + // optional) + var csiDriver *storage.CSIDriver + driver := string(c.driverName) + if c.plugin.csiDriverLister != nil { + c, err := c.plugin.getCSIDriver(driver) + if err != nil && !apierrors.IsNotFound(err) { + // Some internal error. + return storage.ReadWriteOnceWithFSTypeFSGroupPolicy, err + } + csiDriver = c + } + + // If the csiDriver isn't defined, return the default behavior + if csiDriver == nil { + return storage.ReadWriteOnceWithFSTypeFSGroupPolicy, nil + } + // If the csiDriver exists but the fsGroupPolicy isn't defined, return an error + if csiDriver.Spec.FSGroupPolicy == nil || *csiDriver.Spec.FSGroupPolicy == "" { + return storage.ReadWriteOnceWithFSTypeFSGroupPolicy, errors.New(log("expected valid fsGroupPolicy, received nil value or empty string")) + } + return *csiDriver.Spec.FSGroupPolicy, nil +} + // supportsVolumeMode checks whether the CSI driver supports a volume in the given mode. // An error indicates that it isn't supported and explains why. func (c *csiMountMgr) supportsVolumeLifecycleMode() error { diff --git a/pkg/volume/csi/csi_mounter_test.go b/pkg/volume/csi/csi_mounter_test.go index acb97bf9b9a..b8bd2669b1e 100644 --- a/pkg/volume/csi/csi_mounter_test.go +++ b/pkg/volume/csi/csi_mounter_test.go @@ -30,7 +30,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" - authenticationv1 "k8s.io/api/authentication/v1" corev1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" @@ -895,9 +894,6 @@ func TestMounterSetUpWithFSGroup(t *testing.T) { } csiMounter := mounter.(*csiMountMgr) - if tc.driverFSGroupPolicy { - csiMounter.fsGroupPolicy = tc.supportMode - } csiMounter.csiClient = setupClientWithVolumeMountGroup(t, true /* stageUnstageSet */, tc.driverSupportsVolumeMountGroup) attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName())) @@ -1145,7 +1141,6 @@ func Test_csiMountMgr_supportsFSGroup(t *testing.T) { plugin *csiPlugin driverName csiDriverName volumeLifecycleMode storage.VolumeLifecycleMode - fsGroupPolicy storage.FSGroupPolicy volumeID string specVolumeID string readOnly bool @@ -1271,7 +1266,6 @@ func Test_csiMountMgr_supportsFSGroup(t *testing.T) { plugin: tt.fields.plugin, driverName: tt.fields.driverName, volumeLifecycleMode: tt.fields.volumeLifecycleMode, - fsGroupPolicy: tt.fields.fsGroupPolicy, volumeID: tt.fields.volumeID, specVolumeID: tt.fields.specVolumeID, readOnly: tt.fields.readOnly, @@ -1289,3 +1283,64 @@ func Test_csiMountMgr_supportsFSGroup(t *testing.T) { }) } } + +func TestMounterGetFSGroupPolicy(t *testing.T) { + defaultPolicy := storage.ReadWriteOnceWithFSTypeFSGroupPolicy + testCases := []struct { + name string + defined bool + expectedFSGroupPolicy storage.FSGroupPolicy + }{ + { + name: "no FSGroupPolicy defined, expect default", + defined: false, + expectedFSGroupPolicy: storage.ReadWriteOnceWithFSTypeFSGroupPolicy, + }, + { + name: "File FSGroupPolicy defined, expect File", + defined: true, + expectedFSGroupPolicy: storage.FileFSGroupPolicy, + }, + { + name: "None FSGroupPolicy defined, expected None", + defined: true, + expectedFSGroupPolicy: storage.NoneFSGroupPolicy, + }, + } + for _, tc := range testCases { + t.Logf("testing: %s", tc.name) + // Define the driver and set the FSGroupPolicy + driver := getTestCSIDriver(testDriver, nil, nil, nil) + if tc.defined { + driver.Spec.FSGroupPolicy = &tc.expectedFSGroupPolicy + } else { + driver.Spec.FSGroupPolicy = &defaultPolicy + } + + // Create the client and register the resources + fakeClient := fakeclient.NewSimpleClientset(driver) + plug, tmpDir := newTestPlugin(t, fakeClient) + defer os.RemoveAll(tmpDir) + registerFakePlugin(testDriver, "endpoint", []string{"1.3.0"}, t) + + mounter, err := plug.NewMounter( + volume.NewSpecFromPersistentVolume(makeTestPV("test.vol.id", 20, testDriver, "testvol-handle1"), true), + &corev1.Pod{ObjectMeta: meta.ObjectMeta{UID: "1", Namespace: testns}}, + volume.VolumeOptions{}, + ) + if err != nil { + t.Fatalf("Error creating a new mounter: %s", err) + } + + csiMounter := mounter.(*csiMountMgr) + + // Check to see if we can obtain the CSIDriver, along with examining its FSGroupPolicy + fsGroup, err := csiMounter.getFSGroupPolicy() + if err != nil { + t.Fatalf("Error attempting to obtain FSGroupPolicy: %v", err) + } + if fsGroup != *driver.Spec.FSGroupPolicy { + t.Fatalf("FSGroupPolicy doesn't match expected value: %v, %v", fsGroup, tc.expectedFSGroupPolicy) + } + } +} diff --git a/pkg/volume/csi/csi_plugin.go b/pkg/volume/csi/csi_plugin.go index ced4d3de0f9..dfc053ede4c 100644 --- a/pkg/volume/csi/csi_plugin.go +++ b/pkg/volume/csi/csi_plugin.go @@ -388,11 +388,6 @@ func (p *csiPlugin) NewMounter( return nil, err } - fsGroupPolicy, err := p.getFSGroupPolicy(driverName) - if err != nil { - return nil, err - } - k8s := p.host.GetKubeClient() if k8s == nil { return nil, errors.New(log("failed to get a kubernetes client")) @@ -411,7 +406,6 @@ func (p *csiPlugin) NewMounter( podUID: pod.UID, driverName: csiDriverName(driverName), volumeLifecycleMode: volumeLifecycleMode, - fsGroupPolicy: fsGroupPolicy, volumeID: volumeHandle, specVolumeID: spec.Name(), readOnly: readOnly, @@ -834,34 +828,6 @@ func (p *csiPlugin) getVolumeLifecycleMode(spec *volume.Spec) (storage.VolumeLif return storage.VolumeLifecyclePersistent, nil } -// getFSGroupPolicy returns if the CSI driver supports a volume in the given mode. -// An error indicates that it isn't supported and explains why. -func (p *csiPlugin) getFSGroupPolicy(driver string) (storage.FSGroupPolicy, error) { - // Retrieve CSIDriver. It's not an error if that isn't - // possible (we don't have the lister if CSIDriverRegistry is - // disabled) or the driver isn't found (CSIDriver is - // optional) - var csiDriver *storage.CSIDriver - if p.csiDriverLister != nil { - c, err := p.getCSIDriver(driver) - if err != nil && !apierrors.IsNotFound(err) { - // Some internal error. - return storage.ReadWriteOnceWithFSTypeFSGroupPolicy, err - } - csiDriver = c - } - - // If the csiDriver isn't defined, return the default behavior - if csiDriver == nil { - return storage.ReadWriteOnceWithFSTypeFSGroupPolicy, nil - } - // If the csiDriver exists but the fsGroupPolicy isn't defined, return an error - if csiDriver.Spec.FSGroupPolicy == nil || *csiDriver.Spec.FSGroupPolicy == "" { - return storage.ReadWriteOnceWithFSTypeFSGroupPolicy, errors.New(log("expected valid fsGroupPolicy, received nil value or empty string")) - } - return *csiDriver.Spec.FSGroupPolicy, nil -} - func (p *csiPlugin) getPublishContext(client clientset.Interface, handle, driver, nodeName string) (map[string]string, error) { skip, err := p.skipAttach(driver) if err != nil { diff --git a/pkg/volume/csi/csi_plugin_test.go b/pkg/volume/csi/csi_plugin_test.go index 534de164131..3cc12517103 100644 --- a/pkg/volume/csi/csi_plugin_test.go +++ b/pkg/volume/csi/csi_plugin_test.go @@ -159,57 +159,6 @@ func TestPluginGetPluginName(t *testing.T) { } } -func TestPluginGetFSGroupPolicy(t *testing.T) { - defaultPolicy := storage.ReadWriteOnceWithFSTypeFSGroupPolicy - testCases := []struct { - name string - defined bool - expectedFSGroupPolicy storage.FSGroupPolicy - }{ - { - name: "no FSGroupPolicy defined, expect default", - defined: false, - expectedFSGroupPolicy: storage.ReadWriteOnceWithFSTypeFSGroupPolicy, - }, - { - name: "File FSGroupPolicy defined, expect File", - defined: true, - expectedFSGroupPolicy: storage.FileFSGroupPolicy, - }, - { - name: "None FSGroupPolicy defined, expected None", - defined: true, - expectedFSGroupPolicy: storage.NoneFSGroupPolicy, - }, - } - for _, tc := range testCases { - t.Logf("testing: %s", tc.name) - // Define the driver and set the FSGroupPolicy - driver := getTestCSIDriver(testDriver, nil, nil, nil) - if tc.defined { - driver.Spec.FSGroupPolicy = &tc.expectedFSGroupPolicy - } else { - driver.Spec.FSGroupPolicy = &defaultPolicy - } - - // Create the client and register the resources - fakeClient := fakeclient.NewSimpleClientset(driver) - plug, tmpDir := newTestPlugin(t, fakeClient) - defer os.RemoveAll(tmpDir) - registerFakePlugin(testDriver, "endpoint", []string{"1.3.0"}, t) - - // Check to see if we can obtain the CSIDriver, along with examining its FSGroupPolicy - fsGroup, err := plug.getFSGroupPolicy(testDriver) - if err != nil { - t.Fatalf("Error attempting to obtain FSGroupPolicy: %v", err) - } - if fsGroup != *driver.Spec.FSGroupPolicy { - t.Fatalf("FSGroupPolicy doesn't match expected value: %v, %v", fsGroup, tc.expectedFSGroupPolicy) - } - } - -} - func TestPluginGetVolumeName(t *testing.T) { plug, tmpDir := newTestPlugin(t, nil) defer os.RemoveAll(tmpDir)