Pass FsGroup to NodePublishVolume

This commit is contained in:
Cheng Xing 2021-06-23 16:12:29 -07:00
parent 65db13a3a5
commit ae5668edef
6 changed files with 192 additions and 33 deletions

View File

@ -265,7 +265,8 @@ func (m *csiBlockMapper) publishVolumeForBlock(
volAttribs,
nodePublishSecrets,
fsTypeBlockName,
[]string{},
[]string{}, /* mountOptions */
nil, /* fsGroup */
)
if err != nil {

View File

@ -22,6 +22,7 @@ import (
"fmt"
"io"
"net"
"strconv"
"sync"
csipbv1 "github.com/container-storage-interface/spec/lib/go/csi"
@ -43,6 +44,10 @@ type csiClient interface {
maxVolumePerNode int64,
accessibleTopology map[string]string,
err error)
// The caller is responsible for checking whether the driver supports
// applying FSGroup by calling NodeSupportsVolumeMountGroup().
// If the driver does not, fsGroup must be set to nil.
NodePublishVolume(
ctx context.Context,
volumeid string,
@ -55,7 +60,9 @@ type csiClient interface {
secrets map[string]string,
fsType string,
mountOptions []string,
fsGroup *int64,
) error
NodeExpandVolume(ctx context.Context, rsOpts csiResizeOptions) (resource.Quantity, error)
NodeUnpublishVolume(
ctx context.Context,
@ -83,6 +90,7 @@ type csiClient interface {
NodeSupportsNodeExpand(ctx context.Context) (bool, error)
NodeSupportsVolumeStats(ctx context.Context) (bool, error)
NodeSupportsSingleNodeMultiWriterAccessMode(ctx context.Context) (bool, error)
NodeSupportsVolumeMountGroup(ctx context.Context) (bool, error)
}
// Strongly typed address
@ -209,6 +217,7 @@ func (c *csiDriverClient) NodePublishVolume(
secrets map[string]string,
fsType string,
mountOptions []string,
fsGroup *int64,
) error {
klog.V(4).Info(log("calling NodePublishVolume rpc [volid=%s,target_path=%s]", volID, targetPath))
if volID == "" {
@ -255,11 +264,15 @@ func (c *csiDriverClient) NodePublishVolume(
Block: &csipbv1.VolumeCapability_BlockVolume{},
}
} else {
mountVolume := &csipbv1.VolumeCapability_MountVolume{
FsType: fsType,
MountFlags: mountOptions,
}
if fsGroup != nil {
mountVolume.VolumeMountGroup = strconv.FormatInt(*fsGroup, 10 /* base */)
}
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Mount{
Mount: &csipbv1.VolumeCapability_MountVolume{
FsType: fsType,
MountFlags: mountOptions,
},
Mount: mountVolume,
}
}
@ -636,6 +649,10 @@ func (c *csiDriverClient) nodeSupportsVolumeCondition(ctx context.Context) (bool
return c.nodeSupportsCapability(ctx, csipbv1.NodeServiceCapability_RPC_VOLUME_CONDITION)
}
func (c *csiDriverClient) NodeSupportsVolumeMountGroup(ctx context.Context) (bool, error) {
return c.nodeSupportsCapability(ctx, csipbv1.NodeServiceCapability_RPC_VOLUME_MOUNT_GROUP)
}
func (c *csiDriverClient) nodeSupportsCapability(ctx context.Context, capabilityType csipbv1.NodeServiceCapability_RPC_Type) (bool, error) {
klog.V(4).Info(log("calling NodeGetCapabilities rpc to determine if the node service has %s capability", capabilityType))
capabilities, err := c.nodeGetCapabilities(ctx)

View File

@ -23,6 +23,7 @@ import (
"os"
"path/filepath"
"reflect"
"strconv"
"testing"
csipbv1 "github.com/container-storage-interface/spec/lib/go/csi"
@ -72,6 +73,13 @@ func newFakeCsiDriverClientWithVolumeStatsAndCondition(t *testing.T, volumeStats
}
}
func newFakeCsiDriverClientWithVolumeMountGroup(t *testing.T, stagingCapable, volumeMountGroupSet bool) *fakeCsiDriverClient {
return &fakeCsiDriverClient{
t: t,
nodeClient: fake.NewNodeClientWithVolumeMountGroup(stagingCapable, volumeMountGroupSet),
}
}
func (c *fakeCsiDriverClient) NodeGetInfo(ctx context.Context) (
nodeID string,
maxVolumePerNode int64,
@ -152,6 +160,7 @@ func (c *fakeCsiDriverClient) NodePublishVolume(
secrets map[string]string,
fsType string,
mountOptions []string,
fsGroup *int64,
) error {
c.t.Log("calling fake.NodePublishVolume...")
req := &csipbv1.NodePublishVolumeRequest{
@ -174,11 +183,15 @@ func (c *fakeCsiDriverClient) NodePublishVolume(
Block: &csipbv1.VolumeCapability_BlockVolume{},
}
} else {
mountVolume := &csipbv1.VolumeCapability_MountVolume{
FsType: fsType,
MountFlags: mountOptions,
}
if fsGroup != nil {
mountVolume.VolumeMountGroup = strconv.FormatInt(*fsGroup, 10 /* base */)
}
req.VolumeCapability.AccessType = &csipbv1.VolumeCapability_Mount{
Mount: &csipbv1.VolumeCapability_MountVolume{
FsType: fsType,
MountFlags: mountOptions,
},
Mount: mountVolume,
}
}
@ -263,6 +276,28 @@ func (c *fakeCsiDriverClient) NodeSupportsStageUnstage(ctx context.Context) (boo
return c.nodeSupportsCapability(ctx, csipbv1.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME)
}
func (c *fakeCsiDriverClient) NodeSupportsVolumeMountGroup(ctx context.Context) (bool, error) {
c.t.Log("calling fake.NodeGetCapabilities for NodeSupportsVolumeMountGroup...")
req := &csipbv1.NodeGetCapabilitiesRequest{}
resp, err := c.nodeClient.NodeGetCapabilities(ctx, req)
if err != nil {
return false, err
}
capabilities := resp.GetCapabilities()
volumeMountGroupSet := false
if capabilities == nil {
return false, nil
}
for _, capability := range capabilities {
if capability.GetRpc().GetType() == csipbv1.NodeServiceCapability_RPC_VOLUME_MOUNT_GROUP {
volumeMountGroupSet = true
}
}
return volumeMountGroupSet, nil
}
func (c *fakeCsiDriverClient) NodeExpandVolume(ctx context.Context, opts csiResizeOptions) (resource.Quantity, error) {
c.t.Log("calling fake.NodeExpandVolume")
req := &csipbv1.NodeExpandVolumeRequest{
@ -345,6 +380,10 @@ func setupClientWithVolumeStats(t *testing.T, volumeStatsSet bool) csiClient {
return newFakeCsiDriverClientWithVolumeStats(t, volumeStatsSet)
}
func setupClientWithVolumeMountGroup(t *testing.T, stageUnstageSet bool, volumeMountGroupSet bool) csiClient {
return newFakeCsiDriverClientWithVolumeMountGroup(t, stageUnstageSet, volumeMountGroupSet)
}
func checkErr(t *testing.T, expectedAnError bool, actualError error) {
t.Helper()
@ -423,6 +462,8 @@ func TestClientNodeGetInfo(t *testing.T) {
}
func TestClientNodePublishVolume(t *testing.T) {
var testFSGroup int64 = 3000
tmpDir, err := utiltesting.MkTmpdir("csi-test")
if err != nil {
t.Fatalf("can't create temp dir: %v", err)
@ -431,28 +472,32 @@ func TestClientNodePublishVolume(t *testing.T) {
testPath := filepath.Join(tmpDir, "path")
testCases := []struct {
name string
volID string
targetPath string
fsType string
mustFail bool
err error
name string
volID string
targetPath string
fsType string
fsGroup *int64
expectedVolumeMountGroup string
mustFail bool
err error
}{
{name: "test ok", volID: "vol-test", targetPath: testPath},
{name: "missing volID", targetPath: testPath, mustFail: true},
{name: "missing target path", volID: "vol-test", mustFail: true},
{name: "bad fs", volID: "vol-test", targetPath: testPath, fsType: "badfs", mustFail: true},
{name: "grpc error", volID: "vol-test", targetPath: testPath, mustFail: true, err: errors.New("grpc error")},
{name: "fsgroup", volID: "vol-test", targetPath: testPath, fsGroup: &testFSGroup, expectedVolumeMountGroup: "3000"},
}
for _, tc := range testCases {
t.Logf("test case: %s", tc.name)
nodeClient := fake.NewNodeClient(false /* stagingCapable */)
nodeClient.SetNextError(tc.err)
fakeCloser := fake.NewCloser(t)
client := &csiDriverClient{
driverName: "Fake Driver Name",
nodeV1ClientCreator: func(addr csiAddr, m *MetricsManager) (csipbv1.NodeClient, io.Closer, error) {
nodeClient := fake.NewNodeClient(false /* stagingCapable */)
nodeClient.SetNextError(tc.err)
return nodeClient, fakeCloser, nil
},
}
@ -469,9 +514,15 @@ func TestClientNodePublishVolume(t *testing.T) {
map[string]string{},
tc.fsType,
[]string{},
tc.fsGroup,
)
checkErr(t, tc.mustFail, err)
volumeMountGroup := nodeClient.GetNodePublishedVolumes()[tc.volID].VolumeMountGroup
if volumeMountGroup != tc.expectedVolumeMountGroup {
t.Errorf("Expected VolumeMountGroup in NodePublishVolumeRequest to be %q, got: %q", tc.expectedVolumeMountGroup, volumeMountGroup)
}
if !tc.mustFail {
fakeCloser.Check()
}
@ -652,6 +703,16 @@ func TestClientNodeSupportsVolumeStats(t *testing.T) {
})
}
func TestClientNodeSupportsVolumeMountGroup(t *testing.T) {
testClientNodeSupportsCapabilities(t,
func(client *csiDriverClient) (bool, error) {
return client.NodeSupportsVolumeMountGroup(context.Background())
},
func(volumeMountGroupCapable bool) *fake.NodeClient {
return fake.NewNodeClientWithVolumeMountGroup(false /* stagingCapable */, volumeMountGroupCapable)
})
}
func testClientNodeSupportsCapabilities(
t *testing.T,
capabilityMethodToTest func(*csiDriverClient) (bool, error),

View File

@ -235,6 +235,19 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
}
volAttribs = mergeMap(volAttribs, serviceAccountTokenAttrs)
driverSupportsCSIVolumeMountGroup := false
var nodePublishFSGroupArg *int64
if utilfeature.DefaultFeatureGate.Enabled(features.DelegateFSGroupToCSIDriver) {
driverSupportsCSIVolumeMountGroup, err = csi.NodeSupportsVolumeMountGroup(ctx)
if err != nil {
return volumetypes.NewTransientOperationFailure(log("mounter.SetUpAt failed to determine if the node service has VOLUME_MOUNT_GROUP capability: %v", err))
}
if driverSupportsCSIVolumeMountGroup {
nodePublishFSGroupArg = mounterArgs.FsGroup
}
}
err = csi.NodePublishVolume(
ctx,
volumeHandle,
@ -247,6 +260,7 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
nodePublishSecrets,
fsType,
mountOptions,
nodePublishFSGroupArg,
)
if err != nil {
@ -264,7 +278,9 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
klog.V(2).Info(log("error checking for SELinux support: %s", err))
}
if c.supportsFSGroup(fsType, mounterArgs.FsGroup, c.fsGroupPolicy) {
if !driverSupportsCSIVolumeMountGroup && c.supportsFSGroup(fsType, mounterArgs.FsGroup, c.fsGroupPolicy) {
// Driver doesn't support applying FSGroup. Kubelet must apply it instead.
// fullPluginName helps to distinguish different driver from csi plugin
err := volume.SetVolumeOwnership(c, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(c.plugin, c.spec))
if err != nil {

View File

@ -649,14 +649,17 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
accessModes []api.PersistentVolumeAccessMode
readOnly bool
fsType string
setFsGroup bool
fsGroup int64
driverFSGroupPolicy bool
supportMode storage.FSGroupPolicy
name string
accessModes []api.PersistentVolumeAccessMode
readOnly bool
fsType string
setFsGroup bool
fsGroup int64
driverFSGroupPolicy bool
supportMode storage.FSGroupPolicy
delegateFSGroupFeatureGate bool
driverSupportsVolumeMountGroup bool
expectedFSGroupInNodePublish string
}{
{
name: "default fstype, with no fsgroup (should not apply fsgroup)",
@ -785,12 +788,48 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
driverFSGroupPolicy: true,
supportMode: storage.FileFSGroupPolicy,
},
{
name: "fsgroup provided, DelegateFSGroupToCSIDriver feature enabled, driver supports volume mount group; expect fsgroup to be passed to NodePublishVolume",
fsType: "ext4",
setFsGroup: true,
fsGroup: 3000,
delegateFSGroupFeatureGate: true,
driverSupportsVolumeMountGroup: true,
expectedFSGroupInNodePublish: "3000",
},
{
name: "fsgroup not provided, DelegateFSGroupToCSIDriver feature enabled, driver supports volume mount group; expect fsgroup not to be passed to NodePublishVolume",
fsType: "ext4",
setFsGroup: false,
delegateFSGroupFeatureGate: true,
driverSupportsVolumeMountGroup: true,
expectedFSGroupInNodePublish: "",
},
{
name: "fsgroup provided, DelegateFSGroupToCSIDriver feature enabled, driver does not support volume mount group; expect fsgroup not to be passed to NodePublishVolume",
fsType: "ext4",
setFsGroup: true,
fsGroup: 3000,
delegateFSGroupFeatureGate: true,
driverSupportsVolumeMountGroup: false,
expectedFSGroupInNodePublish: "",
},
{
name: "fsgroup provided, DelegateFSGroupToCSIDriver feature disabled, driver supports volume mount group; expect fsgroup not to be passed to NodePublishVolume",
fsType: "ext4",
setFsGroup: true,
fsGroup: 3000,
delegateFSGroupFeatureGate: false,
driverSupportsVolumeMountGroup: true,
expectedFSGroupInNodePublish: "",
},
}
for i, tc := range testCases {
t.Logf("Running test %s", tc.name)
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIVolumeFSGroupPolicy, tc.driverFSGroupPolicy)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DelegateFSGroupToCSIDriver, tc.delegateFSGroupFeatureGate)()
volName := fmt.Sprintf("test-vol-%d", i)
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
@ -821,7 +860,7 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
if tc.driverFSGroupPolicy {
csiMounter.fsGroupPolicy = tc.supportMode
}
csiMounter.csiClient = setupClient(t, true)
csiMounter.csiClient = setupClientWithVolumeMountGroup(t, true /* stageUnstageSet */, tc.driverSupportsVolumeMountGroup)
attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
attachment := makeTestAttachment(attachID, "test-node", pvName)
@ -854,6 +893,9 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
if pubs[csiMounter.volumeID].Path != csiMounter.GetPath() {
t.Error("csi server may not have received NodePublishVolume call")
}
if pubs[csiMounter.volumeID].VolumeMountGroup != tc.expectedFSGroupInNodePublish {
t.Errorf("expected VolumeMountGroup parameter in NodePublishVolumeRequest to be %q, got: %q", tc.expectedFSGroupInNodePublish, pubs[csiMounter.volumeID].VolumeMountGroup)
}
}
}

View File

@ -69,12 +69,13 @@ func (f *IdentityClient) Probe(ctx context.Context, in *csipb.ProbeRequest, opts
}
type CSIVolume struct {
VolumeHandle string
VolumeContext map[string]string
Path string
DeviceMountPath string
FSType string
MountFlags []string
VolumeHandle string
VolumeContext map[string]string
Path string
DeviceMountPath string
FSType string
MountFlags []string
VolumeMountGroup string
}
// NodeClient returns CSI node client
@ -86,6 +87,7 @@ type NodeClient struct {
volumeStatsSet bool
volumeConditionSet bool
singleNodeMultiWriterSet bool
volumeMountGroupSet bool
nodeGetInfoResp *csipb.NodeGetInfoResponse
nodeVolumeStatsResp *csipb.NodeGetVolumeStatsResponse
FakeNodeExpansionRequest *csipb.NodeExpandVolumeRequest
@ -134,6 +136,15 @@ func NewNodeClientWithSingleNodeMultiWriter(singleNodeMultiWriterSet bool) *Node
}
}
func NewNodeClientWithVolumeMountGroup(stageUnstageSet, volumeMountGroupSet bool) *NodeClient {
return &NodeClient{
nodePublishedVolumes: make(map[string]CSIVolume),
nodeStagedVolumes: make(map[string]CSIVolume),
stageUnstageSet: stageUnstageSet,
volumeMountGroupSet: volumeMountGroupSet,
}
}
// SetNextError injects next expected error
func (f *NodeClient) SetNextError(err error) {
f.nextErr = err
@ -217,6 +228,7 @@ func (f *NodeClient) NodePublishVolume(ctx context.Context, req *csipb.NodePubli
if req.GetVolumeCapability().GetMount() != nil {
publishedVolume.FSType = req.GetVolumeCapability().GetMount().FsType
publishedVolume.MountFlags = req.GetVolumeCapability().GetMount().MountFlags
publishedVolume.VolumeMountGroup = req.GetVolumeCapability().GetMount().VolumeMountGroup
}
f.nodePublishedVolumes[req.GetVolumeId()] = publishedVolume
return &csipb.NodePublishVolumeResponse{}, nil
@ -385,6 +397,16 @@ func (f *NodeClient) NodeGetCapabilities(ctx context.Context, in *csipb.NodeGetC
},
})
}
if f.volumeMountGroupSet {
resp.Capabilities = append(resp.Capabilities, &csipb.NodeServiceCapability{
Type: &csipb.NodeServiceCapability_Rpc{
Rpc: &csipb.NodeServiceCapability_RPC{
Type: csipb.NodeServiceCapability_RPC_VOLUME_MOUNT_GROUP,
},
},
})
}
return resp, nil
}