From 04183005e4816259244f5f94a2f42805ad232287 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Mon, 24 Oct 2022 14:48:07 +0200 Subject: [PATCH] Move lifecycle checks from NewMounter to SetUpAt The checks need to get CSIDriver from the API server and the API server may not be the case when NewMounter is called during volume reconstruction. --- pkg/volume/csi/csi_mounter.go | 52 +++++++++++++++++++++++- pkg/volume/csi/csi_mounter_test.go | 46 +++++++++++---------- pkg/volume/csi/csi_plugin.go | 49 ----------------------- pkg/volume/csi/csi_plugin_test.go | 20 +--------- pkg/volume/csi/csi_test.go | 64 +++++++++++++++--------------- 5 files changed, 112 insertions(+), 119 deletions(-) diff --git a/pkg/volume/csi/csi_mounter.go b/pkg/volume/csi/csi_mounter.go index 6c67b6ac86b..bf268ee1e7b 100644 --- a/pkg/volume/csi/csi_mounter.go +++ b/pkg/volume/csi/csi_mounter.go @@ -107,8 +107,8 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error csi, err := c.csiClientGetter.Get() if err != nil { return volumetypes.NewTransientOperationFailure(log("mounter.SetUpAt failed to get CSI client: %v", err)) - } + ctx, cancel := createCSIOperationContext(c.spec, csiTimeout) defer cancel() @@ -117,6 +117,12 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error return errors.New(log("mounter.SetupAt failed to get CSI persistent source: %v", err)) } + // Check CSIDriver.Spec.Mode to ensure that the CSI driver + // supports the current volumeLifecycleMode. + if err := c.supportsVolumeLifecycleMode(); err != nil { + return volumetypes.NewTransientOperationFailure(log("mounter.SetupAt failed to check volume lifecycle mode: %s", err)) + } + driverName := c.driverName volumeHandle := c.volumeID readOnly := c.readOnly @@ -435,6 +441,50 @@ func (c *csiMountMgr) supportsFSGroup(fsType string, fsGroup *int64, driverPolic return false } +// supportsVolumeMode checks whether the CSI driver supports a volume in the given mode. +// An error indicates that it isn't supported and explains why. +func (c *csiMountMgr) supportsVolumeLifecycleMode() error { + // Retrieve CSIDriver. It's not an error if that isn't + // possible (we don't have the lister if CSIDriverRegistry is + // disabled) or the driver isn't found (CSIDriver is + // optional), but then only persistent volumes are supported. + var csiDriver *storage.CSIDriver + driver := string(c.driverName) + if c.plugin.csiDriverLister != nil { + c, err := c.plugin.getCSIDriver(driver) + if err != nil && !apierrors.IsNotFound(err) { + // Some internal error. + return err + } + csiDriver = c + } + + // The right response depends on whether we have information + // about the driver and the volume mode. + switch { + case csiDriver == nil && c.volumeLifecycleMode == storage.VolumeLifecyclePersistent: + // No information, but that's okay for persistent volumes (and only those). + return nil + case csiDriver == nil: + return fmt.Errorf("volume mode %q not supported by driver %s (no CSIDriver object)", c.volumeLifecycleMode, driver) + case containsVolumeMode(csiDriver.Spec.VolumeLifecycleModes, c.volumeLifecycleMode): + // Explicitly listed. + return nil + default: + return fmt.Errorf("volume mode %q not supported by driver %s (only supports %q)", c.volumeLifecycleMode, driver, csiDriver.Spec.VolumeLifecycleModes) + } +} + +// containsVolumeMode checks whether the given volume mode is listed. +func containsVolumeMode(modes []storage.VolumeLifecycleMode, mode storage.VolumeLifecycleMode) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} + // isDirMounted returns the !notMounted result from IsLikelyNotMountPoint check func isDirMounted(plug *csiPlugin, dir string) (bool, error) { mounter := plug.host.GetMounter(plug.GetPluginName()) diff --git a/pkg/volume/csi/csi_mounter_test.go b/pkg/volume/csi/csi_mounter_test.go index c260302ae0b..acb97bf9b9a 100644 --- a/pkg/volume/csi/csi_mounter_test.go +++ b/pkg/volume/csi/csi_mounter_test.go @@ -311,20 +311,21 @@ func TestMounterSetUpSimple(t *testing.T) { defer os.RemoveAll(tmpDir) testCases := []struct { - name string - podUID types.UID - mode storage.VolumeLifecycleMode - fsType string - options []string - spec func(string, []string) *volume.Spec - shouldFail bool + name string + podUID types.UID + mode storage.VolumeLifecycleMode + fsType string + options []string + spec func(string, []string) *volume.Spec + newMounterShouldFail bool + setupShouldFail bool }{ { - name: "setup with ephemeral source", - podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())), - mode: storage.VolumeLifecycleEphemeral, - fsType: "ext4", - shouldFail: true, + name: "setup with ephemeral source", + podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())), + mode: storage.VolumeLifecycleEphemeral, + fsType: "ext4", + setupShouldFail: true, spec: func(fsType string, options []string) *volume.Spec { volSrc := makeTestVol("pv1", testDriver) volSrc.CSI.FSType = &fsType @@ -352,9 +353,9 @@ func TestMounterSetUpSimple(t *testing.T) { }, }, { - name: "setup with missing spec", - shouldFail: true, - spec: func(fsType string, options []string) *volume.Spec { return nil }, + name: "setup with missing spec", + newMounterShouldFail: true, + spec: func(fsType string, options []string) *volume.Spec { return nil }, }, } @@ -366,11 +367,11 @@ func TestMounterSetUpSimple(t *testing.T) { &corev1.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}}, volume.VolumeOptions{}, ) - if tc.shouldFail && err != nil { + if tc.newMounterShouldFail && err != nil { t.Log(err) return } - if !tc.shouldFail && err != nil { + if !tc.newMounterShouldFail && err != nil { t.Fatal("unexpected error:", err) } if mounter == nil { @@ -380,7 +381,7 @@ func TestMounterSetUpSimple(t *testing.T) { csiMounter := mounter.(*csiMountMgr) csiMounter.csiClient = setupClient(t, true) - if csiMounter.volumeLifecycleMode != storage.VolumeLifecyclePersistent { + if csiMounter.volumeLifecycleMode != tc.mode { t.Fatal("unexpected volume mode: ", csiMounter.volumeLifecycleMode) } @@ -392,8 +393,13 @@ func TestMounterSetUpSimple(t *testing.T) { } // Mounter.SetUp() - if err := csiMounter.SetUp(volume.MounterArgs{}); err != nil { - t.Fatalf("mounter.Setup failed: %v", err) + err = csiMounter.SetUp(volume.MounterArgs{}) + if tc.setupShouldFail && err != nil { + t.Log(err) + return + } + if !tc.setupShouldFail && err != nil { + t.Fatal("unexpected error:", err) } // ensure call went all the way diff --git a/pkg/volume/csi/csi_plugin.go b/pkg/volume/csi/csi_plugin.go index efe48cfb73b..ced4d3de0f9 100644 --- a/pkg/volume/csi/csi_plugin.go +++ b/pkg/volume/csi/csi_plugin.go @@ -388,12 +388,6 @@ func (p *csiPlugin) NewMounter( return nil, err } - // Check CSIDriver.Spec.Mode to ensure that the CSI driver - // supports the current volumeLifecycleMode. - if err := p.supportsVolumeLifecycleMode(driverName, volumeLifecycleMode); err != nil { - return nil, err - } - fsGroupPolicy, err := p.getFSGroupPolicy(driverName) if err != nil { return nil, err @@ -822,49 +816,6 @@ func (p *csiPlugin) getCSIDriver(driver string) (*storage.CSIDriver, error) { return csiDriver, err } -// supportsVolumeMode checks whether the CSI driver supports a volume in the given mode. -// An error indicates that it isn't supported and explains why. -func (p *csiPlugin) supportsVolumeLifecycleMode(driver string, volumeMode storage.VolumeLifecycleMode) error { - // Retrieve CSIDriver. It's not an error if that isn't - // possible (we don't have the lister if CSIDriverRegistry is - // disabled) or the driver isn't found (CSIDriver is - // optional), but then only persistent volumes are supported. - var csiDriver *storage.CSIDriver - if p.csiDriverLister != nil { - c, err := p.getCSIDriver(driver) - if err != nil && !apierrors.IsNotFound(err) { - // Some internal error. - return err - } - csiDriver = c - } - - // The right response depends on whether we have information - // about the driver and the volume mode. - switch { - case csiDriver == nil && volumeMode == storage.VolumeLifecyclePersistent: - // No information, but that's okay for persistent volumes (and only those). - return nil - case csiDriver == nil: - return fmt.Errorf("volume mode %q not supported by driver %s (no CSIDriver object)", volumeMode, driver) - case containsVolumeMode(csiDriver.Spec.VolumeLifecycleModes, volumeMode): - // Explicitly listed. - return nil - default: - return fmt.Errorf("volume mode %q not supported by driver %s (only supports %q)", volumeMode, driver, csiDriver.Spec.VolumeLifecycleModes) - } -} - -// containsVolumeMode checks whether the given volume mode is listed. -func containsVolumeMode(modes []storage.VolumeLifecycleMode, mode storage.VolumeLifecycleMode) bool { - for _, m := range modes { - if m == mode { - return true - } - } - return false -} - // getVolumeLifecycleMode returns the mode for the specified spec: {persistent|ephemeral}. // 1) If mode cannot be determined, it will default to "persistent". // 2) If Mode cannot be resolved to either {persistent | ephemeral}, an error is returned diff --git a/pkg/volume/csi/csi_plugin_test.go b/pkg/volume/csi/csi_plugin_test.go index fbabaf4b37a..534de164131 100644 --- a/pkg/volume/csi/csi_plugin_test.go +++ b/pkg/volume/csi/csi_plugin_test.go @@ -372,7 +372,6 @@ func TestPluginConstructVolumeSpec(t *testing.T) { specVolID string volHandle string podUID types.UID - shouldFail bool }{ { name: "construct spec1 from original persistent spec", @@ -388,13 +387,6 @@ func TestPluginConstructVolumeSpec(t *testing.T) { originSpec: volume.NewSpecFromPersistentVolume(makeTestPV("spec2", 20, testDriver, "handle2"), true), podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())), }, - { - name: "construct spec from original volume spec", - specVolID: "volspec", - originSpec: volume.NewSpecFromVolume(makeTestVol("spec2", testDriver)), - podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())), - shouldFail: true, // csi inline off - }, } registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t) @@ -406,11 +398,7 @@ func TestPluginConstructVolumeSpec(t *testing.T) { &api.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}}, volume.VolumeOptions{}, ) - if tc.shouldFail && err != nil { - t.Log(err) - return - } - if !tc.shouldFail && err != nil { + if err != nil { t.Fatal(err) } if mounter == nil { @@ -617,7 +605,7 @@ func TestPluginNewMounter(t *testing.T) { podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())), namespace: "test-ns2", volumeLifecycleMode: storage.VolumeLifecycleEphemeral, - shouldFail: true, // csi inline not enabled + shouldFail: false, // NewMounter works with disabled inline volumes }, { name: "mounter from no spec provided", @@ -772,10 +760,6 @@ func TestPluginNewMounterWithInline(t *testing.T) { // Some test cases are meant to fail because their input data is broken. shouldFail := test.shouldFail - // Others fail if the driver does not support the volume mode. - if !containsVolumeMode(supported, test.volumeLifecycleMode) { - shouldFail = true - } if shouldFail != (err != nil) { t.Fatal("Unexpected error:", err) } diff --git a/pkg/volume/csi/csi_test.go b/pkg/volume/csi/csi_test.go index 7aa5e39c46a..55ea7c23822 100644 --- a/pkg/volume/csi/csi_test.go +++ b/pkg/volume/csi/csi_test.go @@ -43,16 +43,16 @@ func TestCSI_VolumeAll(t *testing.T) { defaultFSGroupPolicy := storage.ReadWriteOnceWithFSTypeFSGroupPolicy tests := []struct { - name string - specName string - driver string - volName string - specFunc func(specName, driver, volName string) *volume.Spec - podFunc func() *api.Pod - isInline bool - shouldFail bool - driverSpec *storage.CSIDriverSpec - watchTimeout time.Duration + name string + specName string + driver string + volName string + specFunc func(specName, driver, volName string) *volume.Spec + podFunc func() *api.Pod + isInline bool + findPluginShouldFail bool + driverSpec *storage.CSIDriverSpec + watchTimeout time.Duration }{ { name: "PersistentVolume", @@ -102,7 +102,6 @@ func TestCSI_VolumeAll(t *testing.T) { VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecycleEphemeral}, FSGroupPolicy: &defaultFSGroupPolicy, }, - shouldFail: true, }, { name: "ephemeral inline supported", @@ -169,6 +168,7 @@ func TestCSI_VolumeAll(t *testing.T) { // This means the driver *cannot* handle the inline volume because // the default is "persistent". VolumeLifecycleModes: nil, + FSGroupPolicy: &defaultFSGroupPolicy, }, }, { @@ -186,6 +186,7 @@ func TestCSI_VolumeAll(t *testing.T) { driverSpec: &storage.CSIDriverSpec{ // This means the driver *cannot* handle the inline volume. VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecyclePersistent}, + FSGroupPolicy: &defaultFSGroupPolicy, }, }, { @@ -200,7 +201,7 @@ func TestCSI_VolumeAll(t *testing.T) { podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64())) return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}} }, - shouldFail: true, + findPluginShouldFail: true, }, { name: "incomplete spec", @@ -214,7 +215,7 @@ func TestCSI_VolumeAll(t *testing.T) { podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64())) return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}} }, - shouldFail: true, + findPluginShouldFail: true, }, } @@ -277,7 +278,7 @@ func TestCSI_VolumeAll(t *testing.T) { t.Log("csiTest.VolumeAll Attaching volume...") attachPlug, err := attachDetachPlugMgr.FindAttachablePluginBySpec(volSpec) if err != nil { - if !test.shouldFail { + if !test.findPluginShouldFail { t.Fatalf("csiTest.VolumeAll PluginManager.FindAttachablePluginBySpec failed: %v", err) } else { t.Log("csiTest.VolumeAll failed: ", err) @@ -398,22 +399,6 @@ func TestCSI_VolumeAll(t *testing.T) { } mounter, err := volPlug.NewMounter(volSpec, pod, volume.VolumeOptions{}) - if test.isInline && (test.driverSpec == nil || !containsVolumeMode(test.driverSpec.VolumeLifecycleModes, storage.VolumeLifecycleEphemeral)) { - // This *must* fail because a CSIDriver.Spec.VolumeLifecycleModes entry "ephemeral" - // is required. - if err == nil || mounter != nil { - t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter should have failed for inline volume due to lack of support for inline volumes, got: %+v, %s", mounter, err) - } - return - } - if !test.isInline && test.driverSpec != nil && !containsVolumeMode(test.driverSpec.VolumeLifecycleModes, storage.VolumeLifecyclePersistent) { - // This *must* fail because a CSIDriver.Spec.VolumeLifecycleModes entry "persistent" - // is required when a driver object is available. - if err == nil || mounter != nil { - t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter should have failed for persistent volume due to lack of support for persistent volumes, got: %+v, %s", mounter, err) - } - return - } if err != nil || mounter == nil { t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter is nil or error: %s", err) } @@ -427,7 +412,24 @@ func TestCSI_VolumeAll(t *testing.T) { csiMounter.csiClient = csiClient var mounterArgs volume.MounterArgs mounterArgs.FsGroup = fsGroup - if err := csiMounter.SetUp(mounterArgs); err != nil { + err = csiMounter.SetUp(mounterArgs) + if test.isInline && (test.driverSpec == nil || !containsVolumeMode(test.driverSpec.VolumeLifecycleModes, storage.VolumeLifecycleEphemeral)) { + // This *must* fail because a CSIDriver.Spec.VolumeLifecycleModes entry "ephemeral" + // is required. + if err == nil { + t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter should have failed for inline volume due to lack of support for inline volumes, got: %+v, %s", mounter, err) + } + return + } + if !test.isInline && test.driverSpec != nil && !containsVolumeMode(test.driverSpec.VolumeLifecycleModes, storage.VolumeLifecyclePersistent) { + // This *must* fail because a CSIDriver.Spec.VolumeLifecycleModes entry "persistent" + // is required when a driver object is available. + if err == nil { + t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter should have failed for persistent volume due to lack of support for persistent volumes, got: %+v, %s", mounter, err) + } + return + } + if err != nil { t.Fatalf("csiTest.VolumeAll mounter.Setup(fsGroup) failed: %s", err) } t.Log("csiTest.VolumeAll mounter.Setup(fsGroup) done OK")