mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 14:07:14 +00:00
Merge pull request #103244 from verult/fsgroup-to-csi
Delegate applying FSGroup to CSI driver through NodeStageVolume and NodePublishVolume
This commit is contained in:
commit
15222a599f
@ -544,7 +544,7 @@ func (attacher *testPluginAttacher) GetDeviceMountPath(spec *volume.Spec) (strin
|
|||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (attacher *testPluginAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
func (attacher *testPluginAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, _ volume.DeviceMounterArgs) error {
|
||||||
attacher.pluginLock.Lock()
|
attacher.pluginLock.Lock()
|
||||||
defer attacher.pluginLock.Unlock()
|
defer attacher.pluginLock.Unlock()
|
||||||
if spec == nil {
|
if spec == nil {
|
||||||
|
@ -369,6 +369,13 @@ const (
|
|||||||
// a volume in a Pod.
|
// a volume in a Pod.
|
||||||
ConfigurableFSGroupPolicy featuregate.Feature = "ConfigurableFSGroupPolicy"
|
ConfigurableFSGroupPolicy featuregate.Feature = "ConfigurableFSGroupPolicy"
|
||||||
|
|
||||||
|
// owner: @gnufied, @verult
|
||||||
|
// alpha: v1.22
|
||||||
|
// If supported by the CSI driver, delegates the role of applying FSGroup to
|
||||||
|
// the driver by passing FSGroup through the NodeStageVolume and
|
||||||
|
// NodePublishVolume calls.
|
||||||
|
DelegateFSGroupToCSIDriver featuregate.Feature = "DelegateFSGroupToCSIDriver"
|
||||||
|
|
||||||
// owner: @RobertKrawitz, @derekwaynecarr
|
// owner: @RobertKrawitz, @derekwaynecarr
|
||||||
// beta: v1.15
|
// beta: v1.15
|
||||||
// GA: v1.20
|
// GA: v1.20
|
||||||
@ -860,6 +867,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
|||||||
PodSecurity: {Default: false, PreRelease: featuregate.Alpha},
|
PodSecurity: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
ReadWriteOncePod: {Default: false, PreRelease: featuregate.Alpha},
|
ReadWriteOncePod: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
CSRDuration: {Default: true, PreRelease: featuregate.Beta},
|
CSRDuration: {Default: true, PreRelease: featuregate.Beta},
|
||||||
|
DelegateFSGroupToCSIDriver: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
|
|
||||||
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
|
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
|
||||||
// unintentionally on either side:
|
// unintentionally on either side:
|
||||||
|
@ -206,7 +206,7 @@ func (attacher *awsElasticBlockStoreAttacher) GetDeviceMountPath(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: this method can be further pruned.
|
// FIXME: this method can be further pruned.
|
||||||
func (attacher *awsElasticBlockStoreAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
func (attacher *awsElasticBlockStoreAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, _ volume.DeviceMounterArgs) error {
|
||||||
mounter := attacher.host.GetMounter(awsElasticBlockStorePluginName)
|
mounter := attacher.host.GetMounter(awsElasticBlockStorePluginName)
|
||||||
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -202,7 +202,7 @@ func (a *azureDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error
|
|||||||
return makeGlobalPDPath(a.plugin.host, volumeSource.DataDiskURI, isManagedDisk)
|
return makeGlobalPDPath(a.plugin.host, volumeSource.DataDiskURI, isManagedDisk)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
func (a *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, _ volume.DeviceMounterArgs) error {
|
||||||
mounter := a.plugin.host.GetMounter(azureDataDiskPluginName)
|
mounter := a.plugin.host.GetMounter(azureDataDiskPluginName)
|
||||||
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
||||||
|
|
||||||
|
@ -268,7 +268,7 @@ func (attacher *cinderDiskAttacher) GetDeviceMountPath(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: this method can be further pruned.
|
// FIXME: this method can be further pruned.
|
||||||
func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, _ volume.DeviceMounterArgs) error {
|
||||||
mounter := attacher.host.GetMounter(cinderVolumePluginName)
|
mounter := attacher.host.GetMounter(cinderVolumePluginName)
|
||||||
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
"k8s.io/apimachinery/pkg/util/clock"
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
@ -38,6 +39,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
)
|
)
|
||||||
@ -264,7 +266,7 @@ func (c *csiAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
|
|||||||
return deviceMountPath, nil
|
return deviceMountPath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, deviceMounterArgs volume.DeviceMounterArgs) error {
|
||||||
klog.V(4).Infof(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath))
|
klog.V(4).Infof(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath))
|
||||||
|
|
||||||
if deviceMountPath == "" {
|
if deviceMountPath == "" {
|
||||||
@ -365,6 +367,19 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
|
|||||||
mountOptions = spec.PersistentVolume.Spec.MountOptions
|
mountOptions = spec.PersistentVolume.Spec.MountOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var nodeStageFSGroupArg *int64
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.DelegateFSGroupToCSIDriver) {
|
||||||
|
driverSupportsCSIVolumeMountGroup, err := csi.NodeSupportsVolumeMountGroup(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return volumetypes.NewTransientOperationFailure(log("attacher.MountDevice failed to determine if the node service has VOLUME_MOUNT_GROUP capability: %v", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
if driverSupportsCSIVolumeMountGroup {
|
||||||
|
klog.V(3).Infof("Driver %s supports applying FSGroup (has VOLUME_MOUNT_GROUP node capability). Delegating FSGroup application to the driver through NodeStageVolume.", csiSource.Driver)
|
||||||
|
nodeStageFSGroupArg = deviceMounterArgs.FsGroup
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fsType := csiSource.FSType
|
fsType := csiSource.FSType
|
||||||
err = csi.NodeStageVolume(ctx,
|
err = csi.NodeStageVolume(ctx,
|
||||||
csiSource.VolumeHandle,
|
csiSource.VolumeHandle,
|
||||||
@ -374,7 +389,8 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
|
|||||||
accessMode,
|
accessMode,
|
||||||
nodeStageSecrets,
|
nodeStageSecrets,
|
||||||
csiSource.VolumeAttributes,
|
csiSource.VolumeAttributes,
|
||||||
mountOptions)
|
mountOptions,
|
||||||
|
nodeStageFSGroupArg)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -1068,22 +1068,29 @@ func TestAttacherGetDeviceMountPath(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAttacherMountDevice(t *testing.T) {
|
func TestAttacherMountDevice(t *testing.T) {
|
||||||
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DelegateFSGroupToCSIDriver, true)()
|
||||||
|
|
||||||
pvName := "test-pv"
|
pvName := "test-pv"
|
||||||
|
var testFSGroup int64 = 3000
|
||||||
nonFinalError := volumetypes.NewUncertainProgressError("")
|
nonFinalError := volumetypes.NewUncertainProgressError("")
|
||||||
transientError := volumetypes.NewTransientOperationFailure("")
|
transientError := volumetypes.NewTransientOperationFailure("")
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
testName string
|
testName string
|
||||||
volName string
|
volName string
|
||||||
devicePath string
|
devicePath string
|
||||||
deviceMountPath string
|
deviceMountPath string
|
||||||
stageUnstageSet bool
|
stageUnstageSet bool
|
||||||
shouldFail bool
|
fsGroup *int64
|
||||||
createAttachment bool
|
expectedVolumeMountGroup string
|
||||||
populateDeviceMountPath bool
|
delegateFSGroupFeatureGate bool
|
||||||
exitError error
|
driverSupportsVolumeMountGroup bool
|
||||||
spec *volume.Spec
|
shouldFail bool
|
||||||
watchTimeout time.Duration
|
createAttachment bool
|
||||||
|
populateDeviceMountPath bool
|
||||||
|
exitError error
|
||||||
|
spec *volume.Spec
|
||||||
|
watchTimeout time.Duration
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
testName: "normal PV",
|
testName: "normal PV",
|
||||||
@ -1184,6 +1191,57 @@ func TestAttacherMountDevice(t *testing.T) {
|
|||||||
shouldFail: true,
|
shouldFail: true,
|
||||||
spec: volume.NewSpecFromPersistentVolume(makeTestPV(pvName, 10, testDriver, "test-vol1"), true),
|
spec: volume.NewSpecFromPersistentVolume(makeTestPV(pvName, 10, testDriver, "test-vol1"), true),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
testName: "fsgroup provided, DelegateFSGroupToCSIDriver feature enabled, driver supports volume mount group; expect fsgroup to be passed to NodeStageVolume",
|
||||||
|
volName: "test-vol1",
|
||||||
|
devicePath: "path1",
|
||||||
|
deviceMountPath: "path2",
|
||||||
|
fsGroup: &testFSGroup,
|
||||||
|
delegateFSGroupFeatureGate: true,
|
||||||
|
driverSupportsVolumeMountGroup: true,
|
||||||
|
expectedVolumeMountGroup: "3000",
|
||||||
|
stageUnstageSet: true,
|
||||||
|
createAttachment: true,
|
||||||
|
spec: volume.NewSpecFromPersistentVolume(makeTestPV(pvName, 10, testDriver, "test-vol1"), false),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "fsgroup not provided, DelegateFSGroupToCSIDriver feature enabled, driver supports volume mount group; expect fsgroup not to be passed to NodeStageVolume",
|
||||||
|
volName: "test-vol1",
|
||||||
|
devicePath: "path1",
|
||||||
|
deviceMountPath: "path2",
|
||||||
|
delegateFSGroupFeatureGate: true,
|
||||||
|
driverSupportsVolumeMountGroup: true,
|
||||||
|
expectedVolumeMountGroup: "",
|
||||||
|
stageUnstageSet: true,
|
||||||
|
createAttachment: true,
|
||||||
|
spec: volume.NewSpecFromPersistentVolume(makeTestPV(pvName, 10, testDriver, "test-vol1"), false),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "fsgroup provided, DelegateFSGroupToCSIDriver feature enabled, driver does not support volume mount group; expect fsgroup not to be passed to NodeStageVolume",
|
||||||
|
volName: "test-vol1",
|
||||||
|
devicePath: "path1",
|
||||||
|
deviceMountPath: "path2",
|
||||||
|
fsGroup: &testFSGroup,
|
||||||
|
delegateFSGroupFeatureGate: true,
|
||||||
|
driverSupportsVolumeMountGroup: false,
|
||||||
|
expectedVolumeMountGroup: "",
|
||||||
|
stageUnstageSet: true,
|
||||||
|
createAttachment: true,
|
||||||
|
spec: volume.NewSpecFromPersistentVolume(makeTestPV(pvName, 10, testDriver, "test-vol1"), false),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testName: "fsgroup provided, DelegateFSGroupToCSIDriver feature disabled, driver supports volume mount group; expect fsgroup not to be passed to NodeStageVolume",
|
||||||
|
volName: "test-vol1",
|
||||||
|
devicePath: "path1",
|
||||||
|
deviceMountPath: "path2",
|
||||||
|
fsGroup: &testFSGroup,
|
||||||
|
delegateFSGroupFeatureGate: false,
|
||||||
|
driverSupportsVolumeMountGroup: true,
|
||||||
|
expectedVolumeMountGroup: "",
|
||||||
|
stageUnstageSet: true,
|
||||||
|
createAttachment: true,
|
||||||
|
spec: volume.NewSpecFromPersistentVolume(makeTestPV(pvName, 10, testDriver, "test-vol1"), false),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
@ -1198,6 +1256,8 @@ func TestAttacherMountDevice(t *testing.T) {
|
|||||||
t.Run(tc.testName, func(t *testing.T) {
|
t.Run(tc.testName, func(t *testing.T) {
|
||||||
t.Logf("Running test case: %s", tc.testName)
|
t.Logf("Running test case: %s", tc.testName)
|
||||||
|
|
||||||
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DelegateFSGroupToCSIDriver, tc.delegateFSGroupFeatureGate)()
|
||||||
|
|
||||||
// Setup
|
// Setup
|
||||||
// Create a new attacher
|
// Create a new attacher
|
||||||
fakeClient := fakeclient.NewSimpleClientset()
|
fakeClient := fakeclient.NewSimpleClientset()
|
||||||
@ -1209,7 +1269,7 @@ func TestAttacherMountDevice(t *testing.T) {
|
|||||||
t.Fatalf("failed to create new attacher: %v", err0)
|
t.Fatalf("failed to create new attacher: %v", err0)
|
||||||
}
|
}
|
||||||
csiAttacher := getCsiAttacherFromVolumeAttacher(attacher, tc.watchTimeout)
|
csiAttacher := getCsiAttacherFromVolumeAttacher(attacher, tc.watchTimeout)
|
||||||
csiAttacher.csiClient = setupClient(t, tc.stageUnstageSet)
|
csiAttacher.csiClient = setupClientWithVolumeMountGroup(t, tc.stageUnstageSet, tc.driverSupportsVolumeMountGroup)
|
||||||
|
|
||||||
if tc.deviceMountPath != "" {
|
if tc.deviceMountPath != "" {
|
||||||
tc.deviceMountPath = filepath.Join(tmpDir, tc.deviceMountPath)
|
tc.deviceMountPath = filepath.Join(tmpDir, tc.deviceMountPath)
|
||||||
@ -1247,7 +1307,11 @@ func TestAttacherMountDevice(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Run
|
// Run
|
||||||
err := csiAttacher.MountDevice(tc.spec, tc.devicePath, tc.deviceMountPath)
|
err := csiAttacher.MountDevice(
|
||||||
|
tc.spec,
|
||||||
|
tc.devicePath,
|
||||||
|
tc.deviceMountPath,
|
||||||
|
volume.DeviceMounterArgs{FsGroup: tc.fsGroup})
|
||||||
|
|
||||||
// Verify
|
// Verify
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1302,6 +1366,9 @@ func TestAttacherMountDevice(t *testing.T) {
|
|||||||
if !reflect.DeepEqual(vol.MountFlags, tc.spec.PersistentVolume.Spec.MountOptions) {
|
if !reflect.DeepEqual(vol.MountFlags, tc.spec.PersistentVolume.Spec.MountOptions) {
|
||||||
t.Errorf("expected mount options: %v, got: %v", tc.spec.PersistentVolume.Spec.MountOptions, vol.MountFlags)
|
t.Errorf("expected mount options: %v, got: %v", tc.spec.PersistentVolume.Spec.MountOptions, vol.MountFlags)
|
||||||
}
|
}
|
||||||
|
if vol.VolumeMountGroup != tc.expectedVolumeMountGroup {
|
||||||
|
t.Errorf("expected volume mount group %q, got: %q", tc.expectedVolumeMountGroup, vol.VolumeMountGroup)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify the deviceMountPath was created by the plugin
|
// Verify the deviceMountPath was created by the plugin
|
||||||
@ -1321,16 +1388,20 @@ func TestAttacherMountDevice(t *testing.T) {
|
|||||||
|
|
||||||
func TestAttacherMountDeviceWithInline(t *testing.T) {
|
func TestAttacherMountDeviceWithInline(t *testing.T) {
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
||||||
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DelegateFSGroupToCSIDriver, true)()
|
||||||
pvName := "test-pv"
|
pvName := "test-pv"
|
||||||
|
var testFSGroup int64 = 3000
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
testName string
|
testName string
|
||||||
volName string
|
volName string
|
||||||
devicePath string
|
devicePath string
|
||||||
deviceMountPath string
|
deviceMountPath string
|
||||||
stageUnstageSet bool
|
fsGroup *int64
|
||||||
shouldFail bool
|
expectedVolumeMountGroup string
|
||||||
spec *volume.Spec
|
stageUnstageSet bool
|
||||||
watchTimeout time.Duration
|
shouldFail bool
|
||||||
|
spec *volume.Spec
|
||||||
|
watchTimeout time.Duration
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
testName: "normal PV",
|
testName: "normal PV",
|
||||||
@ -1390,6 +1461,16 @@ func TestAttacherMountDeviceWithInline(t *testing.T) {
|
|||||||
deviceMountPath: "path2",
|
deviceMountPath: "path2",
|
||||||
shouldFail: true,
|
shouldFail: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
testName: "fsgroup set",
|
||||||
|
volName: "test-vol1",
|
||||||
|
devicePath: "path1",
|
||||||
|
deviceMountPath: "path2",
|
||||||
|
fsGroup: &testFSGroup,
|
||||||
|
expectedVolumeMountGroup: "3000",
|
||||||
|
stageUnstageSet: true,
|
||||||
|
spec: volume.NewSpecFromPersistentVolume(makeTestPV(pvName, 10, testDriver, "test-vol1"), false),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
@ -1410,7 +1491,7 @@ func TestAttacherMountDeviceWithInline(t *testing.T) {
|
|||||||
t.Fatalf("failed to create new attacher: %v", err0)
|
t.Fatalf("failed to create new attacher: %v", err0)
|
||||||
}
|
}
|
||||||
csiAttacher := getCsiAttacherFromVolumeAttacher(attacher, tc.watchTimeout)
|
csiAttacher := getCsiAttacherFromVolumeAttacher(attacher, tc.watchTimeout)
|
||||||
csiAttacher.csiClient = setupClient(t, tc.stageUnstageSet)
|
csiAttacher.csiClient = setupClientWithVolumeMountGroup(t, tc.stageUnstageSet, true /* volumeMountGroup */)
|
||||||
|
|
||||||
if tc.deviceMountPath != "" {
|
if tc.deviceMountPath != "" {
|
||||||
tc.deviceMountPath = filepath.Join(tmpDir, tc.deviceMountPath)
|
tc.deviceMountPath = filepath.Join(tmpDir, tc.deviceMountPath)
|
||||||
@ -1435,7 +1516,11 @@ func TestAttacherMountDeviceWithInline(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// Run
|
// Run
|
||||||
err = csiAttacher.MountDevice(tc.spec, tc.devicePath, tc.deviceMountPath)
|
err = csiAttacher.MountDevice(
|
||||||
|
tc.spec,
|
||||||
|
tc.devicePath,
|
||||||
|
tc.deviceMountPath,
|
||||||
|
volume.DeviceMounterArgs{FsGroup: tc.fsGroup})
|
||||||
|
|
||||||
// Verify
|
// Verify
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1467,6 +1552,9 @@ func TestAttacherMountDeviceWithInline(t *testing.T) {
|
|||||||
if vol.Path != tc.deviceMountPath {
|
if vol.Path != tc.deviceMountPath {
|
||||||
t.Errorf("expected mount path: %s. got: %s", tc.deviceMountPath, vol.Path)
|
t.Errorf("expected mount path: %s. got: %s", tc.deviceMountPath, vol.Path)
|
||||||
}
|
}
|
||||||
|
if vol.VolumeMountGroup != tc.expectedVolumeMountGroup {
|
||||||
|
t.Errorf("expected volume mount group %q, got: %q", tc.expectedVolumeMountGroup, vol.VolumeMountGroup)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
@ -193,7 +193,8 @@ func (m *csiBlockMapper) stageVolumeForBlock(
|
|||||||
accessMode,
|
accessMode,
|
||||||
nodeStageSecrets,
|
nodeStageSecrets,
|
||||||
csiSource.VolumeAttributes,
|
csiSource.VolumeAttributes,
|
||||||
nil /* MountOptions */)
|
nil, /* MountOptions */
|
||||||
|
nil /* fsGroup */)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@ -265,7 +266,8 @@ func (m *csiBlockMapper) publishVolumeForBlock(
|
|||||||
volAttribs,
|
volAttribs,
|
||||||
nodePublishSecrets,
|
nodePublishSecrets,
|
||||||
fsTypeBlockName,
|
fsTypeBlockName,
|
||||||
[]string{},
|
[]string{}, /* mountOptions */
|
||||||
|
nil, /* fsGroup */
|
||||||
)
|
)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
csipbv1 "github.com/container-storage-interface/spec/lib/go/csi"
|
csipbv1 "github.com/container-storage-interface/spec/lib/go/csi"
|
||||||
@ -43,6 +44,10 @@ type csiClient interface {
|
|||||||
maxVolumePerNode int64,
|
maxVolumePerNode int64,
|
||||||
accessibleTopology map[string]string,
|
accessibleTopology map[string]string,
|
||||||
err error)
|
err error)
|
||||||
|
|
||||||
|
// The caller is responsible for checking whether the driver supports
|
||||||
|
// applying FSGroup by calling NodeSupportsVolumeMountGroup().
|
||||||
|
// If the driver does not, fsGroup must be set to nil.
|
||||||
NodePublishVolume(
|
NodePublishVolume(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
volumeid string,
|
volumeid string,
|
||||||
@ -55,13 +60,19 @@ type csiClient interface {
|
|||||||
secrets map[string]string,
|
secrets map[string]string,
|
||||||
fsType string,
|
fsType string,
|
||||||
mountOptions []string,
|
mountOptions []string,
|
||||||
|
fsGroup *int64,
|
||||||
) error
|
) error
|
||||||
|
|
||||||
NodeExpandVolume(ctx context.Context, rsOpts csiResizeOptions) (resource.Quantity, error)
|
NodeExpandVolume(ctx context.Context, rsOpts csiResizeOptions) (resource.Quantity, error)
|
||||||
NodeUnpublishVolume(
|
NodeUnpublishVolume(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
volID string,
|
volID string,
|
||||||
targetPath string,
|
targetPath string,
|
||||||
) error
|
) error
|
||||||
|
|
||||||
|
// The caller is responsible for checking whether the driver supports
|
||||||
|
// applying FSGroup by calling NodeSupportsVolumeMountGroup().
|
||||||
|
// If the driver does not, fsGroup must be set to nil.
|
||||||
NodeStageVolume(ctx context.Context,
|
NodeStageVolume(ctx context.Context,
|
||||||
volID string,
|
volID string,
|
||||||
publishVolumeInfo map[string]string,
|
publishVolumeInfo map[string]string,
|
||||||
@ -71,6 +82,7 @@ type csiClient interface {
|
|||||||
secrets map[string]string,
|
secrets map[string]string,
|
||||||
volumeContext map[string]string,
|
volumeContext map[string]string,
|
||||||
mountOptions []string,
|
mountOptions []string,
|
||||||
|
fsGroup *int64,
|
||||||
) error
|
) error
|
||||||
|
|
||||||
NodeGetVolumeStats(
|
NodeGetVolumeStats(
|
||||||
@ -83,6 +95,7 @@ type csiClient interface {
|
|||||||
NodeSupportsNodeExpand(ctx context.Context) (bool, error)
|
NodeSupportsNodeExpand(ctx context.Context) (bool, error)
|
||||||
NodeSupportsVolumeStats(ctx context.Context) (bool, error)
|
NodeSupportsVolumeStats(ctx context.Context) (bool, error)
|
||||||
NodeSupportsSingleNodeMultiWriterAccessMode(ctx context.Context) (bool, error)
|
NodeSupportsSingleNodeMultiWriterAccessMode(ctx context.Context) (bool, error)
|
||||||
|
NodeSupportsVolumeMountGroup(ctx context.Context) (bool, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Strongly typed address
|
// Strongly typed address
|
||||||
@ -209,6 +222,7 @@ func (c *csiDriverClient) NodePublishVolume(
|
|||||||
secrets map[string]string,
|
secrets map[string]string,
|
||||||
fsType string,
|
fsType string,
|
||||||
mountOptions []string,
|
mountOptions []string,
|
||||||
|
fsGroup *int64,
|
||||||
) error {
|
) error {
|
||||||
klog.V(4).Info(log("calling NodePublishVolume rpc [volid=%s,target_path=%s]", volID, targetPath))
|
klog.V(4).Info(log("calling NodePublishVolume rpc [volid=%s,target_path=%s]", volID, targetPath))
|
||||||
if volID == "" {
|
if volID == "" {
|
||||||
@ -255,11 +269,15 @@ func (c *csiDriverClient) NodePublishVolume(
|
|||||||
Block: &csipbv1.VolumeCapability_BlockVolume{},
|
Block: &csipbv1.VolumeCapability_BlockVolume{},
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
mountVolume := &csipbv1.VolumeCapability_MountVolume{
|
||||||
|
FsType: fsType,
|
||||||
|
MountFlags: mountOptions,
|
||||||
|
}
|
||||||
|
if fsGroup != nil {
|
||||||
|
mountVolume.VolumeMountGroup = strconv.FormatInt(*fsGroup, 10 /* base */)
|
||||||
|
}
|
||||||
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Mount{
|
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Mount{
|
||||||
Mount: &csipbv1.VolumeCapability_MountVolume{
|
Mount: mountVolume,
|
||||||
FsType: fsType,
|
|
||||||
MountFlags: mountOptions,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -371,6 +389,7 @@ func (c *csiDriverClient) NodeStageVolume(ctx context.Context,
|
|||||||
secrets map[string]string,
|
secrets map[string]string,
|
||||||
volumeContext map[string]string,
|
volumeContext map[string]string,
|
||||||
mountOptions []string,
|
mountOptions []string,
|
||||||
|
fsGroup *int64,
|
||||||
) error {
|
) error {
|
||||||
klog.V(4).Info(log("calling NodeStageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath))
|
klog.V(4).Info(log("calling NodeStageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath))
|
||||||
if volID == "" {
|
if volID == "" {
|
||||||
@ -412,11 +431,15 @@ func (c *csiDriverClient) NodeStageVolume(ctx context.Context,
|
|||||||
Block: &csipbv1.VolumeCapability_BlockVolume{},
|
Block: &csipbv1.VolumeCapability_BlockVolume{},
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
mountVolume := &csipbv1.VolumeCapability_MountVolume{
|
||||||
|
FsType: fsType,
|
||||||
|
MountFlags: mountOptions,
|
||||||
|
}
|
||||||
|
if fsGroup != nil {
|
||||||
|
mountVolume.VolumeMountGroup = strconv.FormatInt(*fsGroup, 10 /* base */)
|
||||||
|
}
|
||||||
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Mount{
|
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Mount{
|
||||||
Mount: &csipbv1.VolumeCapability_MountVolume{
|
Mount: mountVolume,
|
||||||
FsType: fsType,
|
|
||||||
MountFlags: mountOptions,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -454,12 +477,10 @@ func (c *csiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingT
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *csiDriverClient) NodeSupportsNodeExpand(ctx context.Context) (bool, error) {
|
func (c *csiDriverClient) NodeSupportsNodeExpand(ctx context.Context) (bool, error) {
|
||||||
klog.V(4).Info(log("calling NodeGetCapabilities rpc to determine if Node has EXPAND_VOLUME capability"))
|
|
||||||
return c.nodeSupportsCapability(ctx, csipbv1.NodeServiceCapability_RPC_EXPAND_VOLUME)
|
return c.nodeSupportsCapability(ctx, csipbv1.NodeServiceCapability_RPC_EXPAND_VOLUME)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *csiDriverClient) NodeSupportsStageUnstage(ctx context.Context) (bool, error) {
|
func (c *csiDriverClient) NodeSupportsStageUnstage(ctx context.Context) (bool, error) {
|
||||||
klog.V(4).Info(log("calling NodeGetCapabilities rpc to determine if NodeSupportsStageUnstage"))
|
|
||||||
return c.nodeSupportsCapability(ctx, csipbv1.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME)
|
return c.nodeSupportsCapability(ctx, csipbv1.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -553,12 +574,10 @@ func (c *csiClientGetter) Get() (csiClient, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *csiDriverClient) NodeSupportsVolumeStats(ctx context.Context) (bool, error) {
|
func (c *csiDriverClient) NodeSupportsVolumeStats(ctx context.Context) (bool, error) {
|
||||||
klog.V(5).Info(log("calling NodeGetCapabilities rpc to determine if NodeSupportsVolumeStats"))
|
|
||||||
return c.nodeSupportsCapability(ctx, csipbv1.NodeServiceCapability_RPC_GET_VOLUME_STATS)
|
return c.nodeSupportsCapability(ctx, csipbv1.NodeServiceCapability_RPC_GET_VOLUME_STATS)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *csiDriverClient) NodeSupportsSingleNodeMultiWriterAccessMode(ctx context.Context) (bool, error) {
|
func (c *csiDriverClient) NodeSupportsSingleNodeMultiWriterAccessMode(ctx context.Context) (bool, error) {
|
||||||
klog.V(4).Info(log("calling NodeGetCapabilities rpc to determine if NodeSupportsSingleNodeMultiWriterAccessMode"))
|
|
||||||
return c.nodeSupportsCapability(ctx, csipbv1.NodeServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER)
|
return c.nodeSupportsCapability(ctx, csipbv1.NodeServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -637,11 +656,15 @@ func (c *csiDriverClient) NodeGetVolumeStats(ctx context.Context, volID string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *csiDriverClient) nodeSupportsVolumeCondition(ctx context.Context) (bool, error) {
|
func (c *csiDriverClient) nodeSupportsVolumeCondition(ctx context.Context) (bool, error) {
|
||||||
klog.V(5).Info(log("calling NodeGetCapabilities rpc to determine if nodeSupportsVolumeCondition"))
|
|
||||||
return c.nodeSupportsCapability(ctx, csipbv1.NodeServiceCapability_RPC_VOLUME_CONDITION)
|
return c.nodeSupportsCapability(ctx, csipbv1.NodeServiceCapability_RPC_VOLUME_CONDITION)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *csiDriverClient) NodeSupportsVolumeMountGroup(ctx context.Context) (bool, error) {
|
||||||
|
return c.nodeSupportsCapability(ctx, csipbv1.NodeServiceCapability_RPC_VOLUME_MOUNT_GROUP)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *csiDriverClient) nodeSupportsCapability(ctx context.Context, capabilityType csipbv1.NodeServiceCapability_RPC_Type) (bool, error) {
|
func (c *csiDriverClient) nodeSupportsCapability(ctx context.Context, capabilityType csipbv1.NodeServiceCapability_RPC_Type) (bool, error) {
|
||||||
|
klog.V(4).Info(log("calling NodeGetCapabilities rpc to determine if the node service has %s capability", capabilityType))
|
||||||
capabilities, err := c.nodeGetCapabilities(ctx)
|
capabilities, err := c.nodeGetCapabilities(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
|
@ -23,6 +23,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
csipbv1 "github.com/container-storage-interface/spec/lib/go/csi"
|
csipbv1 "github.com/container-storage-interface/spec/lib/go/csi"
|
||||||
@ -72,6 +73,13 @@ func newFakeCsiDriverClientWithVolumeStatsAndCondition(t *testing.T, volumeStats
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newFakeCsiDriverClientWithVolumeMountGroup(t *testing.T, stagingCapable, volumeMountGroupSet bool) *fakeCsiDriverClient {
|
||||||
|
return &fakeCsiDriverClient{
|
||||||
|
t: t,
|
||||||
|
nodeClient: fake.NewNodeClientWithVolumeMountGroup(stagingCapable, volumeMountGroupSet),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (c *fakeCsiDriverClient) NodeGetInfo(ctx context.Context) (
|
func (c *fakeCsiDriverClient) NodeGetInfo(ctx context.Context) (
|
||||||
nodeID string,
|
nodeID string,
|
||||||
maxVolumePerNode int64,
|
maxVolumePerNode int64,
|
||||||
@ -152,6 +160,7 @@ func (c *fakeCsiDriverClient) NodePublishVolume(
|
|||||||
secrets map[string]string,
|
secrets map[string]string,
|
||||||
fsType string,
|
fsType string,
|
||||||
mountOptions []string,
|
mountOptions []string,
|
||||||
|
fsGroup *int64,
|
||||||
) error {
|
) error {
|
||||||
c.t.Log("calling fake.NodePublishVolume...")
|
c.t.Log("calling fake.NodePublishVolume...")
|
||||||
req := &csipbv1.NodePublishVolumeRequest{
|
req := &csipbv1.NodePublishVolumeRequest{
|
||||||
@ -174,11 +183,15 @@ func (c *fakeCsiDriverClient) NodePublishVolume(
|
|||||||
Block: &csipbv1.VolumeCapability_BlockVolume{},
|
Block: &csipbv1.VolumeCapability_BlockVolume{},
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
mountVolume := &csipbv1.VolumeCapability_MountVolume{
|
||||||
|
FsType: fsType,
|
||||||
|
MountFlags: mountOptions,
|
||||||
|
}
|
||||||
|
if fsGroup != nil {
|
||||||
|
mountVolume.VolumeMountGroup = strconv.FormatInt(*fsGroup, 10 /* base */)
|
||||||
|
}
|
||||||
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Mount{
|
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Mount{
|
||||||
Mount: &csipbv1.VolumeCapability_MountVolume{
|
Mount: mountVolume,
|
||||||
FsType: fsType,
|
|
||||||
MountFlags: mountOptions,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -209,6 +222,7 @@ func (c *fakeCsiDriverClient) NodeStageVolume(ctx context.Context,
|
|||||||
secrets map[string]string,
|
secrets map[string]string,
|
||||||
volumeContext map[string]string,
|
volumeContext map[string]string,
|
||||||
mountOptions []string,
|
mountOptions []string,
|
||||||
|
fsGroup *int64,
|
||||||
) error {
|
) error {
|
||||||
c.t.Log("calling fake.NodeStageVolume...")
|
c.t.Log("calling fake.NodeStageVolume...")
|
||||||
req := &csipbv1.NodeStageVolumeRequest{
|
req := &csipbv1.NodeStageVolumeRequest{
|
||||||
@ -228,11 +242,15 @@ func (c *fakeCsiDriverClient) NodeStageVolume(ctx context.Context,
|
|||||||
Block: &csipbv1.VolumeCapability_BlockVolume{},
|
Block: &csipbv1.VolumeCapability_BlockVolume{},
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
mountVolume := &csipbv1.VolumeCapability_MountVolume{
|
||||||
|
FsType: fsType,
|
||||||
|
MountFlags: mountOptions,
|
||||||
|
}
|
||||||
|
if fsGroup != nil {
|
||||||
|
mountVolume.VolumeMountGroup = strconv.FormatInt(*fsGroup, 10 /* base */)
|
||||||
|
}
|
||||||
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Mount{
|
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Mount{
|
||||||
Mount: &csipbv1.VolumeCapability_MountVolume{
|
Mount: mountVolume,
|
||||||
FsType: fsType,
|
|
||||||
MountFlags: mountOptions,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -263,6 +281,28 @@ func (c *fakeCsiDriverClient) NodeSupportsStageUnstage(ctx context.Context) (boo
|
|||||||
return c.nodeSupportsCapability(ctx, csipbv1.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME)
|
return c.nodeSupportsCapability(ctx, csipbv1.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *fakeCsiDriverClient) NodeSupportsVolumeMountGroup(ctx context.Context) (bool, error) {
|
||||||
|
c.t.Log("calling fake.NodeGetCapabilities for NodeSupportsVolumeMountGroup...")
|
||||||
|
req := &csipbv1.NodeGetCapabilitiesRequest{}
|
||||||
|
resp, err := c.nodeClient.NodeGetCapabilities(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
capabilities := resp.GetCapabilities()
|
||||||
|
|
||||||
|
volumeMountGroupSet := false
|
||||||
|
if capabilities == nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
for _, capability := range capabilities {
|
||||||
|
if capability.GetRpc().GetType() == csipbv1.NodeServiceCapability_RPC_VOLUME_MOUNT_GROUP {
|
||||||
|
volumeMountGroupSet = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return volumeMountGroupSet, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (c *fakeCsiDriverClient) NodeExpandVolume(ctx context.Context, opts csiResizeOptions) (resource.Quantity, error) {
|
func (c *fakeCsiDriverClient) NodeExpandVolume(ctx context.Context, opts csiResizeOptions) (resource.Quantity, error) {
|
||||||
c.t.Log("calling fake.NodeExpandVolume")
|
c.t.Log("calling fake.NodeExpandVolume")
|
||||||
req := &csipbv1.NodeExpandVolumeRequest{
|
req := &csipbv1.NodeExpandVolumeRequest{
|
||||||
@ -345,6 +385,10 @@ func setupClientWithVolumeStats(t *testing.T, volumeStatsSet bool) csiClient {
|
|||||||
return newFakeCsiDriverClientWithVolumeStats(t, volumeStatsSet)
|
return newFakeCsiDriverClientWithVolumeStats(t, volumeStatsSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func setupClientWithVolumeMountGroup(t *testing.T, stageUnstageSet bool, volumeMountGroupSet bool) csiClient {
|
||||||
|
return newFakeCsiDriverClientWithVolumeMountGroup(t, stageUnstageSet, volumeMountGroupSet)
|
||||||
|
}
|
||||||
|
|
||||||
func checkErr(t *testing.T, expectedAnError bool, actualError error) {
|
func checkErr(t *testing.T, expectedAnError bool, actualError error) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
@ -423,6 +467,8 @@ func TestClientNodeGetInfo(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestClientNodePublishVolume(t *testing.T) {
|
func TestClientNodePublishVolume(t *testing.T) {
|
||||||
|
var testFSGroup int64 = 3000
|
||||||
|
|
||||||
tmpDir, err := utiltesting.MkTmpdir("csi-test")
|
tmpDir, err := utiltesting.MkTmpdir("csi-test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("can't create temp dir: %v", err)
|
t.Fatalf("can't create temp dir: %v", err)
|
||||||
@ -431,28 +477,32 @@ func TestClientNodePublishVolume(t *testing.T) {
|
|||||||
testPath := filepath.Join(tmpDir, "path")
|
testPath := filepath.Join(tmpDir, "path")
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
volID string
|
volID string
|
||||||
targetPath string
|
targetPath string
|
||||||
fsType string
|
fsType string
|
||||||
mustFail bool
|
fsGroup *int64
|
||||||
err error
|
expectedVolumeMountGroup string
|
||||||
|
mustFail bool
|
||||||
|
err error
|
||||||
}{
|
}{
|
||||||
{name: "test ok", volID: "vol-test", targetPath: testPath},
|
{name: "test ok", volID: "vol-test", targetPath: testPath},
|
||||||
{name: "missing volID", targetPath: testPath, mustFail: true},
|
{name: "missing volID", targetPath: testPath, mustFail: true},
|
||||||
{name: "missing target path", volID: "vol-test", mustFail: true},
|
{name: "missing target path", volID: "vol-test", mustFail: true},
|
||||||
{name: "bad fs", volID: "vol-test", targetPath: testPath, fsType: "badfs", mustFail: true},
|
{name: "bad fs", volID: "vol-test", targetPath: testPath, fsType: "badfs", mustFail: true},
|
||||||
{name: "grpc error", volID: "vol-test", targetPath: testPath, mustFail: true, err: errors.New("grpc error")},
|
{name: "grpc error", volID: "vol-test", targetPath: testPath, mustFail: true, err: errors.New("grpc error")},
|
||||||
|
{name: "fsgroup", volID: "vol-test", targetPath: testPath, fsGroup: &testFSGroup, expectedVolumeMountGroup: "3000"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Logf("test case: %s", tc.name)
|
t.Logf("test case: %s", tc.name)
|
||||||
|
|
||||||
|
nodeClient := fake.NewNodeClient(false /* stagingCapable */)
|
||||||
|
nodeClient.SetNextError(tc.err)
|
||||||
fakeCloser := fake.NewCloser(t)
|
fakeCloser := fake.NewCloser(t)
|
||||||
client := &csiDriverClient{
|
client := &csiDriverClient{
|
||||||
driverName: "Fake Driver Name",
|
driverName: "Fake Driver Name",
|
||||||
nodeV1ClientCreator: func(addr csiAddr, m *MetricsManager) (csipbv1.NodeClient, io.Closer, error) {
|
nodeV1ClientCreator: func(addr csiAddr, m *MetricsManager) (csipbv1.NodeClient, io.Closer, error) {
|
||||||
nodeClient := fake.NewNodeClient(false /* stagingCapable */)
|
|
||||||
nodeClient.SetNextError(tc.err)
|
|
||||||
return nodeClient, fakeCloser, nil
|
return nodeClient, fakeCloser, nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -469,9 +519,15 @@ func TestClientNodePublishVolume(t *testing.T) {
|
|||||||
map[string]string{},
|
map[string]string{},
|
||||||
tc.fsType,
|
tc.fsType,
|
||||||
[]string{},
|
[]string{},
|
||||||
|
tc.fsGroup,
|
||||||
)
|
)
|
||||||
checkErr(t, tc.mustFail, err)
|
checkErr(t, tc.mustFail, err)
|
||||||
|
|
||||||
|
volumeMountGroup := nodeClient.GetNodePublishedVolumes()[tc.volID].VolumeMountGroup
|
||||||
|
if volumeMountGroup != tc.expectedVolumeMountGroup {
|
||||||
|
t.Errorf("Expected VolumeMountGroup in NodePublishVolumeRequest to be %q, got: %q", tc.expectedVolumeMountGroup, volumeMountGroup)
|
||||||
|
}
|
||||||
|
|
||||||
if !tc.mustFail {
|
if !tc.mustFail {
|
||||||
fakeCloser.Check()
|
fakeCloser.Check()
|
||||||
}
|
}
|
||||||
@ -521,6 +577,8 @@ func TestClientNodeUnpublishVolume(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestClientNodeStageVolume(t *testing.T) {
|
func TestClientNodeStageVolume(t *testing.T) {
|
||||||
|
var testFSGroup int64 = 3000
|
||||||
|
|
||||||
tmpDir, err := utiltesting.MkTmpdir("csi-test")
|
tmpDir, err := utiltesting.MkTmpdir("csi-test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("can't create temp dir: %v", err)
|
t.Fatalf("can't create temp dir: %v", err)
|
||||||
@ -529,30 +587,34 @@ func TestClientNodeStageVolume(t *testing.T) {
|
|||||||
testPath := filepath.Join(tmpDir, "/test/path")
|
testPath := filepath.Join(tmpDir, "/test/path")
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
volID string
|
volID string
|
||||||
stagingTargetPath string
|
stagingTargetPath string
|
||||||
fsType string
|
fsType string
|
||||||
secrets map[string]string
|
secrets map[string]string
|
||||||
mountOptions []string
|
mountOptions []string
|
||||||
mustFail bool
|
fsGroup *int64
|
||||||
err error
|
expectedVolumeMountGroup string
|
||||||
|
mustFail bool
|
||||||
|
err error
|
||||||
}{
|
}{
|
||||||
{name: "test ok", volID: "vol-test", stagingTargetPath: testPath, fsType: "ext4", mountOptions: []string{"unvalidated"}},
|
{name: "test ok", volID: "vol-test", stagingTargetPath: testPath, fsType: "ext4", mountOptions: []string{"unvalidated"}},
|
||||||
{name: "missing volID", stagingTargetPath: testPath, mustFail: true},
|
{name: "missing volID", stagingTargetPath: testPath, mustFail: true},
|
||||||
{name: "missing target path", volID: "vol-test", mustFail: true},
|
{name: "missing target path", volID: "vol-test", mustFail: true},
|
||||||
{name: "bad fs", volID: "vol-test", stagingTargetPath: testPath, fsType: "badfs", mustFail: true},
|
{name: "bad fs", volID: "vol-test", stagingTargetPath: testPath, fsType: "badfs", mustFail: true},
|
||||||
{name: "grpc error", volID: "vol-test", stagingTargetPath: testPath, mustFail: true, err: errors.New("grpc error")},
|
{name: "grpc error", volID: "vol-test", stagingTargetPath: testPath, mustFail: true, err: errors.New("grpc error")},
|
||||||
|
{name: "fsgroup", volID: "vol-test", stagingTargetPath: testPath, fsGroup: &testFSGroup, expectedVolumeMountGroup: "3000"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Logf("Running test case: %s", tc.name)
|
t.Logf("Running test case: %s", tc.name)
|
||||||
|
|
||||||
|
nodeClient := fake.NewNodeClientWithVolumeMountGroup(true /* stagingCapable */, true /* volumeMountGroupCapable */)
|
||||||
|
nodeClient.SetNextError(tc.err)
|
||||||
fakeCloser := fake.NewCloser(t)
|
fakeCloser := fake.NewCloser(t)
|
||||||
client := &csiDriverClient{
|
client := &csiDriverClient{
|
||||||
driverName: "Fake Driver Name",
|
driverName: "Fake Driver Name",
|
||||||
nodeV1ClientCreator: func(addr csiAddr, m *MetricsManager) (csipbv1.NodeClient, io.Closer, error) {
|
nodeV1ClientCreator: func(addr csiAddr, m *MetricsManager) (csipbv1.NodeClient, io.Closer, error) {
|
||||||
nodeClient := fake.NewNodeClient(false /* stagingCapable */)
|
|
||||||
nodeClient.SetNextError(tc.err)
|
|
||||||
return nodeClient, fakeCloser, nil
|
return nodeClient, fakeCloser, nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -567,9 +629,15 @@ func TestClientNodeStageVolume(t *testing.T) {
|
|||||||
tc.secrets,
|
tc.secrets,
|
||||||
map[string]string{"attr0": "val0"},
|
map[string]string{"attr0": "val0"},
|
||||||
tc.mountOptions,
|
tc.mountOptions,
|
||||||
|
tc.fsGroup,
|
||||||
)
|
)
|
||||||
checkErr(t, tc.mustFail, err)
|
checkErr(t, tc.mustFail, err)
|
||||||
|
|
||||||
|
volumeMountGroup := nodeClient.GetNodeStagedVolumes()[tc.volID].VolumeMountGroup
|
||||||
|
if volumeMountGroup != tc.expectedVolumeMountGroup {
|
||||||
|
t.Errorf("expected VolumeMountGroup parameter in NodePublishVolumeRequest to be %q, got: %q", tc.expectedVolumeMountGroup, volumeMountGroup)
|
||||||
|
}
|
||||||
|
|
||||||
if !tc.mustFail {
|
if !tc.mustFail {
|
||||||
fakeCloser.Check()
|
fakeCloser.Check()
|
||||||
}
|
}
|
||||||
@ -621,6 +689,81 @@ func TestClientNodeUnstageVolume(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestClientNodeSupportsStageUnstage(t *testing.T) {
|
||||||
|
testClientNodeSupportsCapabilities(t,
|
||||||
|
func(client *csiDriverClient) (bool, error) {
|
||||||
|
return client.NodeSupportsStageUnstage(context.Background())
|
||||||
|
},
|
||||||
|
func(stagingCapable bool) *fake.NodeClient {
|
||||||
|
// Creates a staging-capable client
|
||||||
|
return fake.NewNodeClient(stagingCapable)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClientNodeSupportsNodeExpand(t *testing.T) {
|
||||||
|
testClientNodeSupportsCapabilities(t,
|
||||||
|
func(client *csiDriverClient) (bool, error) {
|
||||||
|
return client.NodeSupportsNodeExpand(context.Background())
|
||||||
|
},
|
||||||
|
func(expansionCapable bool) *fake.NodeClient {
|
||||||
|
return fake.NewNodeClientWithExpansion(false /* stageCapable */, expansionCapable)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClientNodeSupportsVolumeStats(t *testing.T) {
|
||||||
|
testClientNodeSupportsCapabilities(t,
|
||||||
|
func(client *csiDriverClient) (bool, error) {
|
||||||
|
return client.NodeSupportsVolumeStats(context.Background())
|
||||||
|
},
|
||||||
|
func(volumeStatsCapable bool) *fake.NodeClient {
|
||||||
|
return fake.NewNodeClientWithVolumeStats(volumeStatsCapable)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClientNodeSupportsVolumeMountGroup(t *testing.T) {
|
||||||
|
testClientNodeSupportsCapabilities(t,
|
||||||
|
func(client *csiDriverClient) (bool, error) {
|
||||||
|
return client.NodeSupportsVolumeMountGroup(context.Background())
|
||||||
|
},
|
||||||
|
func(volumeMountGroupCapable bool) *fake.NodeClient {
|
||||||
|
return fake.NewNodeClientWithVolumeMountGroup(false /* stagingCapable */, volumeMountGroupCapable)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testClientNodeSupportsCapabilities(
|
||||||
|
t *testing.T,
|
||||||
|
capabilityMethodToTest func(*csiDriverClient) (bool, error),
|
||||||
|
nodeClientGenerator func(bool) *fake.NodeClient) {
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
capable bool
|
||||||
|
}{
|
||||||
|
{name: "positive", capable: true},
|
||||||
|
{name: "negative", capable: false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Logf("Running test case: %s", tc.name)
|
||||||
|
fakeCloser := fake.NewCloser(t)
|
||||||
|
client := &csiDriverClient{
|
||||||
|
driverName: "Fake Driver Name",
|
||||||
|
nodeV1ClientCreator: func(addr csiAddr, m *MetricsManager) (csipbv1.NodeClient, io.Closer, error) {
|
||||||
|
nodeClient := nodeClientGenerator(tc.capable)
|
||||||
|
return nodeClient, fakeCloser, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
got, _ := capabilityMethodToTest(client)
|
||||||
|
|
||||||
|
if got != tc.capable {
|
||||||
|
t.Errorf("Expected capability support to be %v, got: %v", tc.capable, got)
|
||||||
|
}
|
||||||
|
|
||||||
|
fakeCloser.Check()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestNodeExpandVolume(t *testing.T) {
|
func TestNodeExpandVolume(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
|
@ -235,6 +235,20 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
|
|||||||
}
|
}
|
||||||
volAttribs = mergeMap(volAttribs, serviceAccountTokenAttrs)
|
volAttribs = mergeMap(volAttribs, serviceAccountTokenAttrs)
|
||||||
|
|
||||||
|
driverSupportsCSIVolumeMountGroup := false
|
||||||
|
var nodePublishFSGroupArg *int64
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.DelegateFSGroupToCSIDriver) {
|
||||||
|
driverSupportsCSIVolumeMountGroup, err = csi.NodeSupportsVolumeMountGroup(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return volumetypes.NewTransientOperationFailure(log("mounter.SetUpAt failed to determine if the node service has VOLUME_MOUNT_GROUP capability: %v", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
if driverSupportsCSIVolumeMountGroup {
|
||||||
|
klog.V(3).Infof("Driver %s supports applying FSGroup (has VOLUME_MOUNT_GROUP node capability). Delegating FSGroup application to the driver through NodePublishVolume.", c.driverName)
|
||||||
|
nodePublishFSGroupArg = mounterArgs.FsGroup
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
err = csi.NodePublishVolume(
|
err = csi.NodePublishVolume(
|
||||||
ctx,
|
ctx,
|
||||||
volumeHandle,
|
volumeHandle,
|
||||||
@ -247,6 +261,7 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
|
|||||||
nodePublishSecrets,
|
nodePublishSecrets,
|
||||||
fsType,
|
fsType,
|
||||||
mountOptions,
|
mountOptions,
|
||||||
|
nodePublishFSGroupArg,
|
||||||
)
|
)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -264,7 +279,9 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
|
|||||||
klog.V(2).Info(log("error checking for SELinux support: %s", err))
|
klog.V(2).Info(log("error checking for SELinux support: %s", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.supportsFSGroup(fsType, mounterArgs.FsGroup, c.fsGroupPolicy) {
|
if !driverSupportsCSIVolumeMountGroup && c.supportsFSGroup(fsType, mounterArgs.FsGroup, c.fsGroupPolicy) {
|
||||||
|
// Driver doesn't support applying FSGroup. Kubelet must apply it instead.
|
||||||
|
|
||||||
// fullPluginName helps to distinguish different driver from csi plugin
|
// fullPluginName helps to distinguish different driver from csi plugin
|
||||||
err := volume.SetVolumeOwnership(c, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(c.plugin, c.spec))
|
err := volume.SetVolumeOwnership(c, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(c.plugin, c.spec))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -649,14 +649,17 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
|
|||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
accessModes []api.PersistentVolumeAccessMode
|
accessModes []api.PersistentVolumeAccessMode
|
||||||
readOnly bool
|
readOnly bool
|
||||||
fsType string
|
fsType string
|
||||||
setFsGroup bool
|
setFsGroup bool
|
||||||
fsGroup int64
|
fsGroup int64
|
||||||
driverFSGroupPolicy bool
|
driverFSGroupPolicy bool
|
||||||
supportMode storage.FSGroupPolicy
|
supportMode storage.FSGroupPolicy
|
||||||
|
delegateFSGroupFeatureGate bool
|
||||||
|
driverSupportsVolumeMountGroup bool
|
||||||
|
expectedFSGroupInNodePublish string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "default fstype, with no fsgroup (should not apply fsgroup)",
|
name: "default fstype, with no fsgroup (should not apply fsgroup)",
|
||||||
@ -785,12 +788,48 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
|
|||||||
driverFSGroupPolicy: true,
|
driverFSGroupPolicy: true,
|
||||||
supportMode: storage.FileFSGroupPolicy,
|
supportMode: storage.FileFSGroupPolicy,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "fsgroup provided, DelegateFSGroupToCSIDriver feature enabled, driver supports volume mount group; expect fsgroup to be passed to NodePublishVolume",
|
||||||
|
fsType: "ext4",
|
||||||
|
setFsGroup: true,
|
||||||
|
fsGroup: 3000,
|
||||||
|
delegateFSGroupFeatureGate: true,
|
||||||
|
driverSupportsVolumeMountGroup: true,
|
||||||
|
expectedFSGroupInNodePublish: "3000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fsgroup not provided, DelegateFSGroupToCSIDriver feature enabled, driver supports volume mount group; expect fsgroup not to be passed to NodePublishVolume",
|
||||||
|
fsType: "ext4",
|
||||||
|
setFsGroup: false,
|
||||||
|
delegateFSGroupFeatureGate: true,
|
||||||
|
driverSupportsVolumeMountGroup: true,
|
||||||
|
expectedFSGroupInNodePublish: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fsgroup provided, DelegateFSGroupToCSIDriver feature enabled, driver does not support volume mount group; expect fsgroup not to be passed to NodePublishVolume",
|
||||||
|
fsType: "ext4",
|
||||||
|
setFsGroup: true,
|
||||||
|
fsGroup: 3000,
|
||||||
|
delegateFSGroupFeatureGate: true,
|
||||||
|
driverSupportsVolumeMountGroup: false,
|
||||||
|
expectedFSGroupInNodePublish: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fsgroup provided, DelegateFSGroupToCSIDriver feature disabled, driver supports volume mount group; expect fsgroup not to be passed to NodePublishVolume",
|
||||||
|
fsType: "ext4",
|
||||||
|
setFsGroup: true,
|
||||||
|
fsGroup: 3000,
|
||||||
|
delegateFSGroupFeatureGate: false,
|
||||||
|
driverSupportsVolumeMountGroup: true,
|
||||||
|
expectedFSGroupInNodePublish: "",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, tc := range testCases {
|
for i, tc := range testCases {
|
||||||
t.Logf("Running test %s", tc.name)
|
t.Logf("Running test %s", tc.name)
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIVolumeFSGroupPolicy, tc.driverFSGroupPolicy)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIVolumeFSGroupPolicy, tc.driverFSGroupPolicy)()
|
||||||
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DelegateFSGroupToCSIDriver, tc.delegateFSGroupFeatureGate)()
|
||||||
|
|
||||||
volName := fmt.Sprintf("test-vol-%d", i)
|
volName := fmt.Sprintf("test-vol-%d", i)
|
||||||
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
|
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
|
||||||
@ -821,7 +860,7 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
|
|||||||
if tc.driverFSGroupPolicy {
|
if tc.driverFSGroupPolicy {
|
||||||
csiMounter.fsGroupPolicy = tc.supportMode
|
csiMounter.fsGroupPolicy = tc.supportMode
|
||||||
}
|
}
|
||||||
csiMounter.csiClient = setupClient(t, true)
|
csiMounter.csiClient = setupClientWithVolumeMountGroup(t, true /* stageUnstageSet */, tc.driverSupportsVolumeMountGroup)
|
||||||
|
|
||||||
attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
|
attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
|
||||||
attachment := makeTestAttachment(attachID, "test-node", pvName)
|
attachment := makeTestAttachment(attachID, "test-node", pvName)
|
||||||
@ -854,6 +893,9 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
|
|||||||
if pubs[csiMounter.volumeID].Path != csiMounter.GetPath() {
|
if pubs[csiMounter.volumeID].Path != csiMounter.GetPath() {
|
||||||
t.Error("csi server may not have received NodePublishVolume call")
|
t.Error("csi server may not have received NodePublishVolume call")
|
||||||
}
|
}
|
||||||
|
if pubs[csiMounter.volumeID].VolumeMountGroup != tc.expectedFSGroupInNodePublish {
|
||||||
|
t.Errorf("expected VolumeMountGroup parameter in NodePublishVolumeRequest to be %q, got: %q", tc.expectedFSGroupInNodePublish, pubs[csiMounter.volumeID].VolumeMountGroup)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -400,7 +400,7 @@ func TestCSI_VolumeAll(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("csiTest.VolumeAll deviceMounter.GetdeviceMountPath failed %s", err)
|
t.Fatalf("csiTest.VolumeAll deviceMounter.GetdeviceMountPath failed %s", err)
|
||||||
}
|
}
|
||||||
if err := csiDevMounter.MountDevice(volSpec, devicePath, devMountPath); err != nil {
|
if err := csiDevMounter.MountDevice(volSpec, devicePath, devMountPath, volume.DeviceMounterArgs{}); err != nil {
|
||||||
t.Fatalf("csiTest.VolumeAll deviceMounter.MountDevice failed: %v", err)
|
t.Fatalf("csiTest.VolumeAll deviceMounter.MountDevice failed: %v", err)
|
||||||
}
|
}
|
||||||
t.Log("csiTest.VolumeAll device mounted at path:", devMountPath)
|
t.Log("csiTest.VolumeAll device mounted at path:", devMountPath)
|
||||||
|
@ -69,12 +69,13 @@ func (f *IdentityClient) Probe(ctx context.Context, in *csipb.ProbeRequest, opts
|
|||||||
}
|
}
|
||||||
|
|
||||||
type CSIVolume struct {
|
type CSIVolume struct {
|
||||||
VolumeHandle string
|
VolumeHandle string
|
||||||
VolumeContext map[string]string
|
VolumeContext map[string]string
|
||||||
Path string
|
Path string
|
||||||
DeviceMountPath string
|
DeviceMountPath string
|
||||||
FSType string
|
FSType string
|
||||||
MountFlags []string
|
MountFlags []string
|
||||||
|
VolumeMountGroup string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeClient returns CSI node client
|
// NodeClient returns CSI node client
|
||||||
@ -86,6 +87,7 @@ type NodeClient struct {
|
|||||||
volumeStatsSet bool
|
volumeStatsSet bool
|
||||||
volumeConditionSet bool
|
volumeConditionSet bool
|
||||||
singleNodeMultiWriterSet bool
|
singleNodeMultiWriterSet bool
|
||||||
|
volumeMountGroupSet bool
|
||||||
nodeGetInfoResp *csipb.NodeGetInfoResponse
|
nodeGetInfoResp *csipb.NodeGetInfoResponse
|
||||||
nodeVolumeStatsResp *csipb.NodeGetVolumeStatsResponse
|
nodeVolumeStatsResp *csipb.NodeGetVolumeStatsResponse
|
||||||
FakeNodeExpansionRequest *csipb.NodeExpandVolumeRequest
|
FakeNodeExpansionRequest *csipb.NodeExpandVolumeRequest
|
||||||
@ -134,6 +136,15 @@ func NewNodeClientWithSingleNodeMultiWriter(singleNodeMultiWriterSet bool) *Node
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewNodeClientWithVolumeMountGroup(stageUnstageSet, volumeMountGroupSet bool) *NodeClient {
|
||||||
|
return &NodeClient{
|
||||||
|
nodePublishedVolumes: make(map[string]CSIVolume),
|
||||||
|
nodeStagedVolumes: make(map[string]CSIVolume),
|
||||||
|
stageUnstageSet: stageUnstageSet,
|
||||||
|
volumeMountGroupSet: volumeMountGroupSet,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// SetNextError injects next expected error
|
// SetNextError injects next expected error
|
||||||
func (f *NodeClient) SetNextError(err error) {
|
func (f *NodeClient) SetNextError(err error) {
|
||||||
f.nextErr = err
|
f.nextErr = err
|
||||||
@ -217,6 +228,7 @@ func (f *NodeClient) NodePublishVolume(ctx context.Context, req *csipb.NodePubli
|
|||||||
if req.GetVolumeCapability().GetMount() != nil {
|
if req.GetVolumeCapability().GetMount() != nil {
|
||||||
publishedVolume.FSType = req.GetVolumeCapability().GetMount().FsType
|
publishedVolume.FSType = req.GetVolumeCapability().GetMount().FsType
|
||||||
publishedVolume.MountFlags = req.GetVolumeCapability().GetMount().MountFlags
|
publishedVolume.MountFlags = req.GetVolumeCapability().GetMount().MountFlags
|
||||||
|
publishedVolume.VolumeMountGroup = req.GetVolumeCapability().GetMount().VolumeMountGroup
|
||||||
}
|
}
|
||||||
f.nodePublishedVolumes[req.GetVolumeId()] = publishedVolume
|
f.nodePublishedVolumes[req.GetVolumeId()] = publishedVolume
|
||||||
return &csipb.NodePublishVolumeResponse{}, nil
|
return &csipb.NodePublishVolumeResponse{}, nil
|
||||||
@ -268,6 +280,7 @@ func (f *NodeClient) NodeStageVolume(ctx context.Context, req *csipb.NodeStageVo
|
|||||||
if mounted != nil {
|
if mounted != nil {
|
||||||
fsType = mounted.GetFsType()
|
fsType = mounted.GetFsType()
|
||||||
csiVol.MountFlags = mounted.GetMountFlags()
|
csiVol.MountFlags = mounted.GetMountFlags()
|
||||||
|
csiVol.VolumeMountGroup = mounted.VolumeMountGroup
|
||||||
}
|
}
|
||||||
if !strings.Contains(fsTypes, fsType) {
|
if !strings.Contains(fsTypes, fsType) {
|
||||||
return nil, errors.New("invalid fstype")
|
return nil, errors.New("invalid fstype")
|
||||||
@ -385,6 +398,16 @@ func (f *NodeClient) NodeGetCapabilities(ctx context.Context, in *csipb.NodeGetC
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if f.volumeMountGroupSet {
|
||||||
|
resp.Capabilities = append(resp.Capabilities, &csipb.NodeServiceCapability{
|
||||||
|
Type: &csipb.NodeServiceCapability_Rpc{
|
||||||
|
Rpc: &csipb.NodeServiceCapability_RPC{
|
||||||
|
Type: csipb.NodeServiceCapability_RPC_VOLUME_MOUNT_GROUP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -94,7 +94,7 @@ func (attacher *fcAttacher) GetDeviceMountPath(
|
|||||||
return attacher.manager.MakeGlobalPDName(*mounter.fcDisk), nil
|
return attacher.manager.MakeGlobalPDName(*mounter.fcDisk), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (attacher *fcAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
func (attacher *fcAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, _ volume.DeviceMounterArgs) error {
|
||||||
mounter := attacher.host.GetMounter(fcPluginName)
|
mounter := attacher.host.GetMounter(fcPluginName)
|
||||||
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -19,7 +19,7 @@ package flexvolume
|
|||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
@ -70,7 +70,7 @@ func (a *flexVolumeAttacher) GetDeviceMountPath(spec *volume.Spec) (string, erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MountDevice is part of the volume.Attacher interface
|
// MountDevice is part of the volume.Attacher interface
|
||||||
func (a *flexVolumeAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
func (a *flexVolumeAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, _ volume.DeviceMounterArgs) error {
|
||||||
// Mount only once.
|
// Mount only once.
|
||||||
alreadyMounted, err := prepareForMount(a.plugin.host.GetMounter(a.plugin.GetPluginName()), deviceMountPath)
|
alreadyMounted, err := prepareForMount(a.plugin.host.GetMounter(a.plugin.GetPluginName()), deviceMountPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/test/utils/harness"
|
"k8s.io/kubernetes/test/utils/harness"
|
||||||
)
|
)
|
||||||
@ -70,7 +70,7 @@ func TestMountDevice(tt *testing.T) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
a, _ := plugin.NewAttacher()
|
a, _ := plugin.NewAttacher()
|
||||||
a.MountDevice(spec, "/dev/sdx", rootDir+"/mount-dir")
|
a.MountDevice(spec, "/dev/sdx", rootDir+"/mount-dir", volume.DeviceMounterArgs{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsVolumeAttached(tt *testing.T) {
|
func TestIsVolumeAttached(tt *testing.T) {
|
||||||
|
@ -288,7 +288,7 @@ func (attacher *gcePersistentDiskAttacher) GetDeviceMountPath(
|
|||||||
return makeGlobalPDName(attacher.host, volumeSource.PDName), nil
|
return makeGlobalPDName(attacher.host, volumeSource.PDName), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (attacher *gcePersistentDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
func (attacher *gcePersistentDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, _ volume.DeviceMounterArgs) error {
|
||||||
// Only mount the PD globally once.
|
// Only mount the PD globally once.
|
||||||
mounter := attacher.host.GetMounter(gcePersistentDiskPluginName)
|
mounter := attacher.host.GetMounter(gcePersistentDiskPluginName)
|
||||||
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
||||||
|
@ -98,7 +98,7 @@ func (attacher *iscsiAttacher) GetDeviceMountPath(
|
|||||||
return attacher.manager.MakeGlobalPDName(*mounter.iscsiDisk), nil
|
return attacher.manager.MakeGlobalPDName(*mounter.iscsiDisk), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (attacher *iscsiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
func (attacher *iscsiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, _ volume.DeviceMounterArgs) error {
|
||||||
mounter := attacher.host.GetMounter(iscsiPluginName)
|
mounter := attacher.host.GetMounter(iscsiPluginName)
|
||||||
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -355,7 +355,7 @@ func (dm *deviceMounter) mountLocalBlockDevice(spec *volume.Spec, devicePath str
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dm *deviceMounter) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
func (dm *deviceMounter) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, _ volume.DeviceMounterArgs) error {
|
||||||
if spec.PersistentVolume.Spec.Local == nil || len(spec.PersistentVolume.Spec.Local.Path) == 0 {
|
if spec.PersistentVolume.Spec.Local == nil || len(spec.PersistentVolume.Spec.Local.Path) == 0 {
|
||||||
return fmt.Errorf("local volume source is nil or local path is not set")
|
return fmt.Errorf("local volume source is nil or local path is not set")
|
||||||
}
|
}
|
||||||
|
@ -231,7 +231,7 @@ func TestBlockDeviceGlobalPathAndMountDevice(t *testing.T) {
|
|||||||
|
|
||||||
fmt.Println("expected global path is:", expectedGlobalPath)
|
fmt.Println("expected global path is:", expectedGlobalPath)
|
||||||
|
|
||||||
err = dm.MountDevice(pvSpec, tmpBlockDir, expectedGlobalPath)
|
err = dm.MountDevice(pvSpec, tmpBlockDir, expectedGlobalPath, volume.DeviceMounterArgs{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -276,7 +276,7 @@ func TestFSGlobalPathAndMountDevice(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Actually, we will do nothing if the local path is FS type
|
// Actually, we will do nothing if the local path is FS type
|
||||||
err = dm.MountDevice(pvSpec, tmpFSDir, expectedGlobalPath)
|
err = dm.MountDevice(pvSpec, tmpFSDir, expectedGlobalPath, volume.DeviceMounterArgs{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -146,7 +146,7 @@ func (attacher *rbdAttacher) GetDeviceMountPath(spec *volume.Spec) (string, erro
|
|||||||
// MountDevice implements Attacher.MountDevice. It is called by the kubelet to
|
// MountDevice implements Attacher.MountDevice. It is called by the kubelet to
|
||||||
// mount device at the given mount path.
|
// mount device at the given mount path.
|
||||||
// This method is idempotent, callers are responsible for retrying on failure.
|
// This method is idempotent, callers are responsible for retrying on failure.
|
||||||
func (attacher *rbdAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
func (attacher *rbdAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, _ volume.DeviceMounterArgs) error {
|
||||||
klog.V(4).Infof("rbd: mouting device %s to %s", devicePath, deviceMountPath)
|
klog.V(4).Infof("rbd: mouting device %s to %s", devicePath, deviceMountPath)
|
||||||
notMnt, err := attacher.mounter.IsLikelyNotMountPoint(deviceMountPath)
|
notMnt, err := attacher.mounter.IsLikelyNotMountPoint(deviceMountPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -281,7 +281,7 @@ func doTestPlugin(t *testing.T, c *testcase) {
|
|||||||
if deviceMountPath != c.expectedDeviceMountPath {
|
if deviceMountPath != c.expectedDeviceMountPath {
|
||||||
t.Errorf("Unexpected mount path, expected %q, not: %q", c.expectedDeviceMountPath, deviceMountPath)
|
t.Errorf("Unexpected mount path, expected %q, not: %q", c.expectedDeviceMountPath, deviceMountPath)
|
||||||
}
|
}
|
||||||
err = attacher.MountDevice(c.spec, devicePath, deviceMountPath)
|
err = attacher.MountDevice(c.spec, devicePath, deviceMountPath, volume.DeviceMounterArgs{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -1046,7 +1046,7 @@ func (fv *FakeVolume) mountDeviceInternal(spec *Spec, devicePath string, deviceM
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fv *FakeVolume) MountDevice(spec *Spec, devicePath string, deviceMountPath string) error {
|
func (fv *FakeVolume) MountDevice(spec *Spec, devicePath string, deviceMountPath string, _ volume.DeviceMounterArgs) error {
|
||||||
return fv.mountDeviceInternal(spec, devicePath, deviceMountPath)
|
return fv.mountDeviceInternal(spec, devicePath, deviceMountPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -616,7 +616,9 @@ func (og *operationGenerator) GenerateMountVolumeFunc(
|
|||||||
err = volumeDeviceMounter.MountDevice(
|
err = volumeDeviceMounter.MountDevice(
|
||||||
volumeToMount.VolumeSpec,
|
volumeToMount.VolumeSpec,
|
||||||
devicePath,
|
devicePath,
|
||||||
deviceMountPath)
|
deviceMountPath,
|
||||||
|
volume.DeviceMounterArgs{FsGroup: fsGroup},
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
og.checkForFailedMount(volumeToMount, err)
|
og.checkForFailedMount(volumeToMount, err)
|
||||||
og.markDeviceErrorState(volumeToMount, devicePath, deviceMountPath, err, actualStateOfWorld)
|
og.markDeviceErrorState(volumeToMount, devicePath, deviceMountPath, err, actualStateOfWorld)
|
||||||
|
@ -271,6 +271,11 @@ type Attacher interface {
|
|||||||
WaitForAttach(spec *Spec, devicePath string, pod *v1.Pod, timeout time.Duration) (string, error)
|
WaitForAttach(spec *Spec, devicePath string, pod *v1.Pod, timeout time.Duration) (string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeviceMounterArgs provides auxiliary, optional arguments to DeviceMounter.
|
||||||
|
type DeviceMounterArgs struct {
|
||||||
|
FsGroup *int64
|
||||||
|
}
|
||||||
|
|
||||||
// DeviceMounter can mount a block volume to a global path.
|
// DeviceMounter can mount a block volume to a global path.
|
||||||
type DeviceMounter interface {
|
type DeviceMounter interface {
|
||||||
// GetDeviceMountPath returns a path where the device should
|
// GetDeviceMountPath returns a path where the device should
|
||||||
@ -285,7 +290,7 @@ type DeviceMounter interface {
|
|||||||
// - TransientOperationFailure
|
// - TransientOperationFailure
|
||||||
// - UncertainProgressError
|
// - UncertainProgressError
|
||||||
// - Error of any other type should be considered a final error
|
// - Error of any other type should be considered a final error
|
||||||
MountDevice(spec *Spec, devicePath string, deviceMountPath string) error
|
MountDevice(spec *Spec, devicePath string, deviceMountPath string, deviceMounterArgs DeviceMounterArgs) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type BulkVolumeVerifier interface {
|
type BulkVolumeVerifier interface {
|
||||||
|
@ -208,7 +208,7 @@ func (plugin *vsphereVolumePlugin) GetDeviceMountRefs(deviceMountPath string) ([
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MountDevice mounts device to global mount point.
|
// MountDevice mounts device to global mount point.
|
||||||
func (attacher *vsphereVMDKAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
func (attacher *vsphereVMDKAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, _ volume.DeviceMounterArgs) error {
|
||||||
klog.Infof("vsphere MountDevice mount %s to %s", devicePath, deviceMountPath)
|
klog.Infof("vsphere MountDevice mount %s to %s", devicePath, deviceMountPath)
|
||||||
mounter := attacher.host.GetMounter(vsphereVolumePluginName)
|
mounter := attacher.host.GetMounter(vsphereVolumePluginName)
|
||||||
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
||||||
|
Loading…
Reference in New Issue
Block a user