mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 15:25:57 +00:00
Move lifecycle checks from NewMounter to SetUpAt
The checks need to get CSIDriver from the API server and the API server may not be the case when NewMounter is called during volume reconstruction.
This commit is contained in:
parent
7ad4b04632
commit
04183005e4
@ -107,8 +107,8 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
|
||||
csi, err := c.csiClientGetter.Get()
|
||||
if err != nil {
|
||||
return volumetypes.NewTransientOperationFailure(log("mounter.SetUpAt failed to get CSI client: %v", err))
|
||||
|
||||
}
|
||||
|
||||
ctx, cancel := createCSIOperationContext(c.spec, csiTimeout)
|
||||
defer cancel()
|
||||
|
||||
@ -117,6 +117,12 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
|
||||
return errors.New(log("mounter.SetupAt failed to get CSI persistent source: %v", err))
|
||||
}
|
||||
|
||||
// Check CSIDriver.Spec.Mode to ensure that the CSI driver
|
||||
// supports the current volumeLifecycleMode.
|
||||
if err := c.supportsVolumeLifecycleMode(); err != nil {
|
||||
return volumetypes.NewTransientOperationFailure(log("mounter.SetupAt failed to check volume lifecycle mode: %s", err))
|
||||
}
|
||||
|
||||
driverName := c.driverName
|
||||
volumeHandle := c.volumeID
|
||||
readOnly := c.readOnly
|
||||
@ -435,6 +441,50 @@ func (c *csiMountMgr) supportsFSGroup(fsType string, fsGroup *int64, driverPolic
|
||||
return false
|
||||
}
|
||||
|
||||
// supportsVolumeMode checks whether the CSI driver supports a volume in the given mode.
|
||||
// An error indicates that it isn't supported and explains why.
|
||||
func (c *csiMountMgr) supportsVolumeLifecycleMode() error {
|
||||
// Retrieve CSIDriver. It's not an error if that isn't
|
||||
// possible (we don't have the lister if CSIDriverRegistry is
|
||||
// disabled) or the driver isn't found (CSIDriver is
|
||||
// optional), but then only persistent volumes are supported.
|
||||
var csiDriver *storage.CSIDriver
|
||||
driver := string(c.driverName)
|
||||
if c.plugin.csiDriverLister != nil {
|
||||
c, err := c.plugin.getCSIDriver(driver)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
// Some internal error.
|
||||
return err
|
||||
}
|
||||
csiDriver = c
|
||||
}
|
||||
|
||||
// The right response depends on whether we have information
|
||||
// about the driver and the volume mode.
|
||||
switch {
|
||||
case csiDriver == nil && c.volumeLifecycleMode == storage.VolumeLifecyclePersistent:
|
||||
// No information, but that's okay for persistent volumes (and only those).
|
||||
return nil
|
||||
case csiDriver == nil:
|
||||
return fmt.Errorf("volume mode %q not supported by driver %s (no CSIDriver object)", c.volumeLifecycleMode, driver)
|
||||
case containsVolumeMode(csiDriver.Spec.VolumeLifecycleModes, c.volumeLifecycleMode):
|
||||
// Explicitly listed.
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("volume mode %q not supported by driver %s (only supports %q)", c.volumeLifecycleMode, driver, csiDriver.Spec.VolumeLifecycleModes)
|
||||
}
|
||||
}
|
||||
|
||||
// containsVolumeMode checks whether the given volume mode is listed.
|
||||
func containsVolumeMode(modes []storage.VolumeLifecycleMode, mode storage.VolumeLifecycleMode) bool {
|
||||
for _, m := range modes {
|
||||
if m == mode {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isDirMounted returns the !notMounted result from IsLikelyNotMountPoint check
|
||||
func isDirMounted(plug *csiPlugin, dir string) (bool, error) {
|
||||
mounter := plug.host.GetMounter(plug.GetPluginName())
|
||||
|
@ -311,20 +311,21 @@ func TestMounterSetUpSimple(t *testing.T) {
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
podUID types.UID
|
||||
mode storage.VolumeLifecycleMode
|
||||
fsType string
|
||||
options []string
|
||||
spec func(string, []string) *volume.Spec
|
||||
shouldFail bool
|
||||
name string
|
||||
podUID types.UID
|
||||
mode storage.VolumeLifecycleMode
|
||||
fsType string
|
||||
options []string
|
||||
spec func(string, []string) *volume.Spec
|
||||
newMounterShouldFail bool
|
||||
setupShouldFail bool
|
||||
}{
|
||||
{
|
||||
name: "setup with ephemeral source",
|
||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||
mode: storage.VolumeLifecycleEphemeral,
|
||||
fsType: "ext4",
|
||||
shouldFail: true,
|
||||
name: "setup with ephemeral source",
|
||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||
mode: storage.VolumeLifecycleEphemeral,
|
||||
fsType: "ext4",
|
||||
setupShouldFail: true,
|
||||
spec: func(fsType string, options []string) *volume.Spec {
|
||||
volSrc := makeTestVol("pv1", testDriver)
|
||||
volSrc.CSI.FSType = &fsType
|
||||
@ -352,9 +353,9 @@ func TestMounterSetUpSimple(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "setup with missing spec",
|
||||
shouldFail: true,
|
||||
spec: func(fsType string, options []string) *volume.Spec { return nil },
|
||||
name: "setup with missing spec",
|
||||
newMounterShouldFail: true,
|
||||
spec: func(fsType string, options []string) *volume.Spec { return nil },
|
||||
},
|
||||
}
|
||||
|
||||
@ -366,11 +367,11 @@ func TestMounterSetUpSimple(t *testing.T) {
|
||||
&corev1.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if tc.shouldFail && err != nil {
|
||||
if tc.newMounterShouldFail && err != nil {
|
||||
t.Log(err)
|
||||
return
|
||||
}
|
||||
if !tc.shouldFail && err != nil {
|
||||
if !tc.newMounterShouldFail && err != nil {
|
||||
t.Fatal("unexpected error:", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
@ -380,7 +381,7 @@ func TestMounterSetUpSimple(t *testing.T) {
|
||||
csiMounter := mounter.(*csiMountMgr)
|
||||
csiMounter.csiClient = setupClient(t, true)
|
||||
|
||||
if csiMounter.volumeLifecycleMode != storage.VolumeLifecyclePersistent {
|
||||
if csiMounter.volumeLifecycleMode != tc.mode {
|
||||
t.Fatal("unexpected volume mode: ", csiMounter.volumeLifecycleMode)
|
||||
}
|
||||
|
||||
@ -392,8 +393,13 @@ func TestMounterSetUpSimple(t *testing.T) {
|
||||
}
|
||||
|
||||
// Mounter.SetUp()
|
||||
if err := csiMounter.SetUp(volume.MounterArgs{}); err != nil {
|
||||
t.Fatalf("mounter.Setup failed: %v", err)
|
||||
err = csiMounter.SetUp(volume.MounterArgs{})
|
||||
if tc.setupShouldFail && err != nil {
|
||||
t.Log(err)
|
||||
return
|
||||
}
|
||||
if !tc.setupShouldFail && err != nil {
|
||||
t.Fatal("unexpected error:", err)
|
||||
}
|
||||
|
||||
// ensure call went all the way
|
||||
|
@ -388,12 +388,6 @@ func (p *csiPlugin) NewMounter(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check CSIDriver.Spec.Mode to ensure that the CSI driver
|
||||
// supports the current volumeLifecycleMode.
|
||||
if err := p.supportsVolumeLifecycleMode(driverName, volumeLifecycleMode); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fsGroupPolicy, err := p.getFSGroupPolicy(driverName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -822,49 +816,6 @@ func (p *csiPlugin) getCSIDriver(driver string) (*storage.CSIDriver, error) {
|
||||
return csiDriver, err
|
||||
}
|
||||
|
||||
// supportsVolumeMode checks whether the CSI driver supports a volume in the given mode.
|
||||
// An error indicates that it isn't supported and explains why.
|
||||
func (p *csiPlugin) supportsVolumeLifecycleMode(driver string, volumeMode storage.VolumeLifecycleMode) error {
|
||||
// Retrieve CSIDriver. It's not an error if that isn't
|
||||
// possible (we don't have the lister if CSIDriverRegistry is
|
||||
// disabled) or the driver isn't found (CSIDriver is
|
||||
// optional), but then only persistent volumes are supported.
|
||||
var csiDriver *storage.CSIDriver
|
||||
if p.csiDriverLister != nil {
|
||||
c, err := p.getCSIDriver(driver)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
// Some internal error.
|
||||
return err
|
||||
}
|
||||
csiDriver = c
|
||||
}
|
||||
|
||||
// The right response depends on whether we have information
|
||||
// about the driver and the volume mode.
|
||||
switch {
|
||||
case csiDriver == nil && volumeMode == storage.VolumeLifecyclePersistent:
|
||||
// No information, but that's okay for persistent volumes (and only those).
|
||||
return nil
|
||||
case csiDriver == nil:
|
||||
return fmt.Errorf("volume mode %q not supported by driver %s (no CSIDriver object)", volumeMode, driver)
|
||||
case containsVolumeMode(csiDriver.Spec.VolumeLifecycleModes, volumeMode):
|
||||
// Explicitly listed.
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("volume mode %q not supported by driver %s (only supports %q)", volumeMode, driver, csiDriver.Spec.VolumeLifecycleModes)
|
||||
}
|
||||
}
|
||||
|
||||
// containsVolumeMode checks whether the given volume mode is listed.
|
||||
func containsVolumeMode(modes []storage.VolumeLifecycleMode, mode storage.VolumeLifecycleMode) bool {
|
||||
for _, m := range modes {
|
||||
if m == mode {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getVolumeLifecycleMode returns the mode for the specified spec: {persistent|ephemeral}.
|
||||
// 1) If mode cannot be determined, it will default to "persistent".
|
||||
// 2) If Mode cannot be resolved to either {persistent | ephemeral}, an error is returned
|
||||
|
@ -372,7 +372,6 @@ func TestPluginConstructVolumeSpec(t *testing.T) {
|
||||
specVolID string
|
||||
volHandle string
|
||||
podUID types.UID
|
||||
shouldFail bool
|
||||
}{
|
||||
{
|
||||
name: "construct spec1 from original persistent spec",
|
||||
@ -388,13 +387,6 @@ func TestPluginConstructVolumeSpec(t *testing.T) {
|
||||
originSpec: volume.NewSpecFromPersistentVolume(makeTestPV("spec2", 20, testDriver, "handle2"), true),
|
||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||
},
|
||||
{
|
||||
name: "construct spec from original volume spec",
|
||||
specVolID: "volspec",
|
||||
originSpec: volume.NewSpecFromVolume(makeTestVol("spec2", testDriver)),
|
||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||
shouldFail: true, // csi inline off
|
||||
},
|
||||
}
|
||||
|
||||
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
|
||||
@ -406,11 +398,7 @@ func TestPluginConstructVolumeSpec(t *testing.T) {
|
||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}},
|
||||
volume.VolumeOptions{},
|
||||
)
|
||||
if tc.shouldFail && err != nil {
|
||||
t.Log(err)
|
||||
return
|
||||
}
|
||||
if !tc.shouldFail && err != nil {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if mounter == nil {
|
||||
@ -617,7 +605,7 @@ func TestPluginNewMounter(t *testing.T) {
|
||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||
namespace: "test-ns2",
|
||||
volumeLifecycleMode: storage.VolumeLifecycleEphemeral,
|
||||
shouldFail: true, // csi inline not enabled
|
||||
shouldFail: false, // NewMounter works with disabled inline volumes
|
||||
},
|
||||
{
|
||||
name: "mounter from no spec provided",
|
||||
@ -772,10 +760,6 @@ func TestPluginNewMounterWithInline(t *testing.T) {
|
||||
|
||||
// Some test cases are meant to fail because their input data is broken.
|
||||
shouldFail := test.shouldFail
|
||||
// Others fail if the driver does not support the volume mode.
|
||||
if !containsVolumeMode(supported, test.volumeLifecycleMode) {
|
||||
shouldFail = true
|
||||
}
|
||||
if shouldFail != (err != nil) {
|
||||
t.Fatal("Unexpected error:", err)
|
||||
}
|
||||
|
@ -43,16 +43,16 @@ func TestCSI_VolumeAll(t *testing.T) {
|
||||
defaultFSGroupPolicy := storage.ReadWriteOnceWithFSTypeFSGroupPolicy
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
specName string
|
||||
driver string
|
||||
volName string
|
||||
specFunc func(specName, driver, volName string) *volume.Spec
|
||||
podFunc func() *api.Pod
|
||||
isInline bool
|
||||
shouldFail bool
|
||||
driverSpec *storage.CSIDriverSpec
|
||||
watchTimeout time.Duration
|
||||
name string
|
||||
specName string
|
||||
driver string
|
||||
volName string
|
||||
specFunc func(specName, driver, volName string) *volume.Spec
|
||||
podFunc func() *api.Pod
|
||||
isInline bool
|
||||
findPluginShouldFail bool
|
||||
driverSpec *storage.CSIDriverSpec
|
||||
watchTimeout time.Duration
|
||||
}{
|
||||
{
|
||||
name: "PersistentVolume",
|
||||
@ -102,7 +102,6 @@ func TestCSI_VolumeAll(t *testing.T) {
|
||||
VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecycleEphemeral},
|
||||
FSGroupPolicy: &defaultFSGroupPolicy,
|
||||
},
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
name: "ephemeral inline supported",
|
||||
@ -169,6 +168,7 @@ func TestCSI_VolumeAll(t *testing.T) {
|
||||
// This means the driver *cannot* handle the inline volume because
|
||||
// the default is "persistent".
|
||||
VolumeLifecycleModes: nil,
|
||||
FSGroupPolicy: &defaultFSGroupPolicy,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -186,6 +186,7 @@ func TestCSI_VolumeAll(t *testing.T) {
|
||||
driverSpec: &storage.CSIDriverSpec{
|
||||
// This means the driver *cannot* handle the inline volume.
|
||||
VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecyclePersistent},
|
||||
FSGroupPolicy: &defaultFSGroupPolicy,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -200,7 +201,7 @@ func TestCSI_VolumeAll(t *testing.T) {
|
||||
podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64()))
|
||||
return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}}
|
||||
},
|
||||
shouldFail: true,
|
||||
findPluginShouldFail: true,
|
||||
},
|
||||
{
|
||||
name: "incomplete spec",
|
||||
@ -214,7 +215,7 @@ func TestCSI_VolumeAll(t *testing.T) {
|
||||
podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64()))
|
||||
return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}}
|
||||
},
|
||||
shouldFail: true,
|
||||
findPluginShouldFail: true,
|
||||
},
|
||||
}
|
||||
|
||||
@ -277,7 +278,7 @@ func TestCSI_VolumeAll(t *testing.T) {
|
||||
t.Log("csiTest.VolumeAll Attaching volume...")
|
||||
attachPlug, err := attachDetachPlugMgr.FindAttachablePluginBySpec(volSpec)
|
||||
if err != nil {
|
||||
if !test.shouldFail {
|
||||
if !test.findPluginShouldFail {
|
||||
t.Fatalf("csiTest.VolumeAll PluginManager.FindAttachablePluginBySpec failed: %v", err)
|
||||
} else {
|
||||
t.Log("csiTest.VolumeAll failed: ", err)
|
||||
@ -398,22 +399,6 @@ func TestCSI_VolumeAll(t *testing.T) {
|
||||
}
|
||||
|
||||
mounter, err := volPlug.NewMounter(volSpec, pod, volume.VolumeOptions{})
|
||||
if test.isInline && (test.driverSpec == nil || !containsVolumeMode(test.driverSpec.VolumeLifecycleModes, storage.VolumeLifecycleEphemeral)) {
|
||||
// This *must* fail because a CSIDriver.Spec.VolumeLifecycleModes entry "ephemeral"
|
||||
// is required.
|
||||
if err == nil || mounter != nil {
|
||||
t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter should have failed for inline volume due to lack of support for inline volumes, got: %+v, %s", mounter, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if !test.isInline && test.driverSpec != nil && !containsVolumeMode(test.driverSpec.VolumeLifecycleModes, storage.VolumeLifecyclePersistent) {
|
||||
// This *must* fail because a CSIDriver.Spec.VolumeLifecycleModes entry "persistent"
|
||||
// is required when a driver object is available.
|
||||
if err == nil || mounter != nil {
|
||||
t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter should have failed for persistent volume due to lack of support for persistent volumes, got: %+v, %s", mounter, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if err != nil || mounter == nil {
|
||||
t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter is nil or error: %s", err)
|
||||
}
|
||||
@ -427,7 +412,24 @@ func TestCSI_VolumeAll(t *testing.T) {
|
||||
csiMounter.csiClient = csiClient
|
||||
var mounterArgs volume.MounterArgs
|
||||
mounterArgs.FsGroup = fsGroup
|
||||
if err := csiMounter.SetUp(mounterArgs); err != nil {
|
||||
err = csiMounter.SetUp(mounterArgs)
|
||||
if test.isInline && (test.driverSpec == nil || !containsVolumeMode(test.driverSpec.VolumeLifecycleModes, storage.VolumeLifecycleEphemeral)) {
|
||||
// This *must* fail because a CSIDriver.Spec.VolumeLifecycleModes entry "ephemeral"
|
||||
// is required.
|
||||
if err == nil {
|
||||
t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter should have failed for inline volume due to lack of support for inline volumes, got: %+v, %s", mounter, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if !test.isInline && test.driverSpec != nil && !containsVolumeMode(test.driverSpec.VolumeLifecycleModes, storage.VolumeLifecyclePersistent) {
|
||||
// This *must* fail because a CSIDriver.Spec.VolumeLifecycleModes entry "persistent"
|
||||
// is required when a driver object is available.
|
||||
if err == nil {
|
||||
t.Fatalf("csiTest.VolumeAll volPlugin.NewMounter should have failed for persistent volume due to lack of support for persistent volumes, got: %+v, %s", mounter, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("csiTest.VolumeAll mounter.Setup(fsGroup) failed: %s", err)
|
||||
}
|
||||
t.Log("csiTest.VolumeAll mounter.Setup(fsGroup) done OK")
|
||||
|
Loading…
Reference in New Issue
Block a user