diff --git a/pkg/apis/storage/validation/validation.go b/pkg/apis/storage/validation/validation.go index e8510c152a4..81d74b188b7 100644 --- a/pkg/apis/storage/validation/validation.go +++ b/pkg/apis/storage/validation/validation.go @@ -393,10 +393,13 @@ func validateCSINodeDriver(driver storage.CSINodeDriver, driverNamesInSpecs sets return allErrs } +// ValidateCSIDriverName checks that a name is appropriate for a +// CSIDriver object. +var ValidateCSIDriverName = apimachineryvalidation.NameIsDNSSubdomain + // ValidateCSIDriver validates a CSIDriver. func ValidateCSIDriver(csiDriver *storage.CSIDriver) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateCSIDriverName(csiDriver.Name, field.NewPath("name"))...) + allErrs := apivalidation.ValidateObjectMeta(&csiDriver.ObjectMeta, false, ValidateCSIDriverName, field.NewPath("metadata")) allErrs = append(allErrs, validateCSIDriverSpec(&csiDriver.Spec, field.NewPath("spec"))...) return allErrs @@ -404,7 +407,7 @@ func ValidateCSIDriver(csiDriver *storage.CSIDriver) field.ErrorList { // ValidateCSIDriverUpdate validates a CSIDriver. func ValidateCSIDriverUpdate(new, old *storage.CSIDriver) field.ErrorList { - allErrs := ValidateCSIDriver(new) + allErrs := apivalidation.ValidateObjectMetaUpdate(&new.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata")) // Spec is read-only // If this ever relaxes in the future, make sure to increment the Generation number in PrepareForUpdate diff --git a/pkg/apis/storage/validation/validation_test.go b/pkg/apis/storage/validation/validation_test.go index 10c071f2240..c0e80641422 100644 --- a/pkg/apis/storage/validation/validation_test.go +++ b/pkg/apis/storage/validation/validation_test.go @@ -1870,8 +1870,11 @@ func TestCSIDriverValidationUpdate(t *testing.T) { podInfoOnMount := true notPodInfoOnMount := false notRequiresRepublish := false + resourceVersion := "1" + invalidFSGroupPolicy := storage.ReadWriteOnceWithFSTypeFSGroupPolicy + invalidFSGroupPolicy = "invalid-mode" old := storage.CSIDriver{ - ObjectMeta: metav1.ObjectMeta{Name: driverName}, + ObjectMeta: metav1.ObjectMeta{Name: driverName, ResourceVersion: resourceVersion}, Spec: storage.CSIDriverSpec{ AttachRequired: &attachNotRequired, PodInfoOnMount: ¬PodInfoOnMount, @@ -1883,11 +1886,27 @@ func TestCSIDriverValidationUpdate(t *testing.T) { }, } - // Currently there is only one success case: exactly the same - // as the existing object. - successCases := []storage.CSIDriver{old} + // Currently we compare the object against itself + // and ensure updates succeed + successCases := []storage.CSIDriver{ + old, + // An invalid FSGroupPolicy should still pass + { + ObjectMeta: metav1.ObjectMeta{Name: driverName, ResourceVersion: resourceVersion}, + Spec: storage.CSIDriverSpec{ + AttachRequired: &attachNotRequired, + PodInfoOnMount: ¬PodInfoOnMount, + VolumeLifecycleModes: []storage.VolumeLifecycleMode{ + storage.VolumeLifecycleEphemeral, + storage.VolumeLifecyclePersistent, + }, + FSGroupPolicy: &invalidFSGroupPolicy, + }, + }, + } for _, csiDriver := range successCases { - if errs := ValidateCSIDriverUpdate(&csiDriver, &old); len(errs) != 0 { + newDriver := csiDriver.DeepCopy() + if errs := ValidateCSIDriverUpdate(&csiDriver, newDriver); len(errs) != 0 { t.Errorf("expected success for %+v: %v", csiDriver, errs) } } @@ -1963,6 +1982,21 @@ func TestCSIDriverValidationUpdate(t *testing.T) { } }, }, + { + name: "FSGroupPolicy invalidated", + modify: func(new *storage.CSIDriver) { + invalidFSGroupPolicy := storage.ReadWriteOnceWithFSTypeFSGroupPolicy + invalidFSGroupPolicy = "invalid" + new.Spec.FSGroupPolicy = &invalidFSGroupPolicy + }, + }, + { + name: "FSGroupPolicy changed", + modify: func(new *storage.CSIDriver) { + fileFSGroupPolicy := storage.FileFSGroupPolicy + new.Spec.FSGroupPolicy = &fileFSGroupPolicy + }, + }, } for _, test := range errorCases { diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index ac898f7da72..41bf65ddbc4 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -432,6 +432,7 @@ const ( // owner: @huffmanca // alpha: v1.19 + // beta: v1.20 // // Determines if a CSI Driver supports applying fsGroup. CSIVolumeFSGroupPolicy featuregate.Feature = "CSIVolumeFSGroupPolicy" @@ -770,7 +771,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS CSIStorageCapacity: {Default: false, PreRelease: featuregate.Alpha}, CSIServiceAccountToken: {Default: false, PreRelease: featuregate.Alpha}, GenericEphemeralVolume: {Default: false, PreRelease: featuregate.Alpha}, - CSIVolumeFSGroupPolicy: {Default: false, PreRelease: featuregate.Alpha}, + CSIVolumeFSGroupPolicy: {Default: true, PreRelease: featuregate.Beta}, RuntimeClass: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23 NodeLease: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, SCTPSupport: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.22 diff --git a/pkg/registry/storage/csidriver/strategy.go b/pkg/registry/storage/csidriver/strategy.go index 8cf3f06144d..ec600ac4109 100644 --- a/pkg/registry/storage/csidriver/strategy.go +++ b/pkg/registry/storage/csidriver/strategy.go @@ -113,8 +113,7 @@ func (csiDriverStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime. func (csiDriverStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList { newCSIDriverObj := obj.(*storage.CSIDriver) oldCSIDriverObj := old.(*storage.CSIDriver) - errorList := validation.ValidateCSIDriver(newCSIDriverObj) - return append(errorList, validation.ValidateCSIDriverUpdate(newCSIDriverObj, oldCSIDriverObj)...) + return validation.ValidateCSIDriverUpdate(newCSIDriverObj, oldCSIDriverObj) } func (csiDriverStrategy) AllowUnconditionalUpdate() bool { diff --git a/pkg/volume/csi/csi_test.go b/pkg/volume/csi/csi_test.go index 993a7b35b90..125079eddd4 100644 --- a/pkg/volume/csi/csi_test.go +++ b/pkg/volume/csi/csi_test.go @@ -27,6 +27,7 @@ import ( api "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -47,17 +48,19 @@ import ( // based on operations from the volume manager/reconciler/operation executor func TestCSI_VolumeAll(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)() + defaultFSGroupPolicy := storagev1.ReadWriteOnceWithFSTypeFSGroupPolicy tests := []struct { - name string - specName string - driver string - volName string - specFunc func(specName, driver, volName string) *volume.Spec - podFunc func() *api.Pod - isInline bool - shouldFail bool - driverSpec *storage.CSIDriverSpec + name string + specName string + driver string + volName string + specFunc func(specName, driver, volName string) *volume.Spec + podFunc func() *api.Pod + isInline bool + shouldFail bool + disableFSGroupPolicyFeatureGate bool + driverSpec *storage.CSIDriverSpec }{ { name: "PersistentVolume", @@ -87,6 +90,26 @@ func TestCSI_VolumeAll(t *testing.T) { driverSpec: &storage.CSIDriverSpec{ // Required for the driver to be accepted for the persistent volume. VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecyclePersistent}, + FSGroupPolicy: &defaultFSGroupPolicy, + }, + }, + { + name: "PersistentVolume with driver info and FSGroup disabled", + specName: "pv2", + driver: "simple-driver", + volName: "vol2", + specFunc: func(specName, driver, volName string) *volume.Spec { + return volume.NewSpecFromPersistentVolume(makeTestPV(specName, 20, driver, volName), false) + }, + podFunc: func() *api.Pod { + podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64())) + return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}} + }, + disableFSGroupPolicyFeatureGate: true, + driverSpec: &storage.CSIDriverSpec{ + // Required for the driver to be accepted for the persistent volume. + VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecyclePersistent}, + FSGroupPolicy: &defaultFSGroupPolicy, }, }, { @@ -104,6 +127,7 @@ func TestCSI_VolumeAll(t *testing.T) { driverSpec: &storage.CSIDriverSpec{ // This will cause the volume to be rejected. VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecycleEphemeral}, + FSGroupPolicy: &defaultFSGroupPolicy, }, shouldFail: true, }, @@ -122,6 +146,7 @@ func TestCSI_VolumeAll(t *testing.T) { driverSpec: &storage.CSIDriverSpec{ // Required for the driver to be accepted for the inline volume. VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecycleEphemeral}, + FSGroupPolicy: &defaultFSGroupPolicy, }, }, { @@ -139,6 +164,7 @@ func TestCSI_VolumeAll(t *testing.T) { driverSpec: &storage.CSIDriverSpec{ // Required for the driver to be accepted for the inline volume. VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecyclePersistent, storage.VolumeLifecycleEphemeral}, + FSGroupPolicy: &defaultFSGroupPolicy, }, }, { @@ -221,6 +247,8 @@ func TestCSI_VolumeAll(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, !test.disableFSGroupPolicyFeatureGate)() + tmpDir, err := utiltesting.MkTmpdir("csi-test") if err != nil { t.Fatalf("can't create temp dir: %v", err) diff --git a/pkg/volume/csi/csi_util_test.go b/pkg/volume/csi/csi_util_test.go index 3df12babcde..f4d3582b004 100644 --- a/pkg/volume/csi/csi_util_test.go +++ b/pkg/volume/csi/csi_util_test.go @@ -85,6 +85,7 @@ func makeTestVol(name string, driverName string) *api.Volume { } func getTestCSIDriver(name string, podInfoMount *bool, attachable *bool, volumeLifecycleModes []storagev1.VolumeLifecycleMode) *storagev1.CSIDriver { + defaultFSGroupPolicy := storagev1.ReadWriteOnceWithFSTypeFSGroupPolicy return &storagev1.CSIDriver{ ObjectMeta: meta.ObjectMeta{ Name: name, @@ -93,6 +94,7 @@ func getTestCSIDriver(name string, podInfoMount *bool, attachable *bool, volumeL PodInfoOnMount: podInfoMount, AttachRequired: attachable, VolumeLifecycleModes: volumeLifecycleModes, + FSGroupPolicy: &defaultFSGroupPolicy, }, } } diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index 92a196b8673..c8d5e769894 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -22,6 +22,7 @@ import ( "encoding/json" "errors" "fmt" + "math/rand" "strconv" "strings" "time" @@ -49,6 +50,7 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/drivers" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testsuites" @@ -114,6 +116,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { javascriptHooks map[string]string tokenRequests []storagev1.TokenRequest requiresRepublish *bool + fsGroupPolicy *storagev1.FSGroupPolicy } type mockDriverSetup struct { @@ -155,6 +158,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { JavascriptHooks: tp.javascriptHooks, TokenRequests: tp.tokenRequests, RequiresRepublish: tp.requiresRepublish, + FSGroupPolicy: tp.fsGroupPolicy, } // this just disable resizing on driver, keeping resizing on SC enabled. @@ -229,6 +233,39 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { return pod, err } + createPodWithFSGroup := func(fsGroup *int64) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { + ginkgo.By("Creating pod with fsGroup") + nodeSelection := m.config.ClientNodeSelection + var sc *storagev1.StorageClass + if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok { + sc = dDriver.GetDynamicProvisionStorageClass(m.config, "") + } + scTest := testsuites.StorageClassTest{ + Name: m.driver.GetDriverInfo().Name, + Provisioner: sc.Provisioner, + Parameters: sc.Parameters, + ClaimSize: "1Gi", + ExpectedSize: "1Gi", + DelayBinding: m.tp.lateBinding, + AllowVolumeExpansion: m.tp.enableResizing, + } + + class, claim, pod := startBusyBoxPod(f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, fsGroup) + + if class != nil { + m.sc[class.Name] = class + } + if claim != nil { + m.pvcs = append(m.pvcs, claim) + } + + if pod != nil { + m.pods = append(m.pods, pod) + } + + return class, claim, pod + } + cleanup := func() { cs := f.ClientSet var errs []error @@ -1369,6 +1406,89 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { }) } }) + // These tests *only* work on a cluster which has the CSIVolumeFSGroupPolicy feature enabled. + ginkgo.Context("CSI FSGroupPolicy [LinuxOnly]", func() { + tests := []struct { + name string + fsGroupPolicy storagev1.FSGroupPolicy + modified bool + }{ + { + name: "should modify fsGroup if fsGroupPolicy=default", + fsGroupPolicy: storagev1.ReadWriteOnceWithFSTypeFSGroupPolicy, + modified: true, + }, + { + name: "should modify fsGroup if fsGroupPolicy=File", + fsGroupPolicy: storagev1.FileFSGroupPolicy, + modified: true, + }, + { + name: "should not modify fsGroup if fsGroupPolicy=None", + fsGroupPolicy: storagev1.NoneFSGroupPolicy, + modified: false, + }, + } + for _, t := range tests { + test := t + ginkgo.It(test.name, func() { + if framework.NodeOSDistroIs("windows") { + e2eskipper.Skipf("FSGroupPolicy is only applied on linux nodes -- skipping") + } + init(testParameters{ + disableAttach: true, + registerDriver: true, + fsGroupPolicy: &test.fsGroupPolicy, + }) + defer cleanup() + + // kube-scheduler may need some time before it gets the CSIDriver object. + // Without them, scheduling doesn't run as expected by the test. + syncDelay := 5 * time.Second + time.Sleep(syncDelay) + + fsGroupVal := int64(rand.Int63n(20000) + 1024) + fsGroup := &fsGroupVal + + _, _, pod := createPodWithFSGroup(fsGroup) /* persistent volume */ + + mountPath := pod.Spec.Containers[0].VolumeMounts[0].MountPath + dirName := mountPath + "/" + f.UniqueName + fileName := dirName + "/" + f.UniqueName + + err := e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + framework.ExpectNoError(err, "failed to start pod") + + // Create the subdirectory to ensure that fsGroup propagates + createDirectory := fmt.Sprintf("mkdir %s", dirName) + _, _, err = utils.PodExec(f, pod, createDirectory) + framework.ExpectNoError(err, "failed: creating the directory: %s", err) + + // Inject the contents onto the mount + createFile := fmt.Sprintf("echo '%s' > '%s'; sync", "filecontents", fileName) + _, _, err = utils.PodExec(f, pod, createFile) + framework.ExpectNoError(err, "failed: writing the contents: %s", err) + + // Delete the created file. This step is mandatory, as the mock driver + // won't clean up the contents automatically. + defer func() { + delete := fmt.Sprintf("rm -fr %s", dirName) + _, _, err = utils.PodExec(f, pod, delete) + framework.ExpectNoError(err, "failed: deleting the directory: %s", err) + }() + + // Ensure that the fsGroup matches what we expect + if test.modified { + utils.VerifyFSGroupInPod(f, fileName, strconv.FormatInt(*fsGroup, 10), pod) + } else { + utils.VerifyFSGroupInPod(f, fileName, "root", pod) + } + + // The created resources will be removed by the cleanup() function, + // so need to delete anything here. + }) + } + }) }) // A lot of this code was copied from e2e/framework. It would be nicer @@ -1505,7 +1625,7 @@ func checkCSINodeForLimits(nodeName string, driverName string, cs clientset.Inte return attachLimit, nil } -func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { +func createClaim(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim) { class := newStorageClass(t, ns, "") if scName != "" { class.Name = scName @@ -1530,9 +1650,21 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e _, err = e2epv.WaitForPVClaimBoundPhase(cs, pvcClaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err, "Failed waiting for PVC to be bound: %v", err) } + return class, claim +} + +func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { + class, claim := createClaim(cs, t, node, scName, ns) pod, err := startPausePodWithClaim(cs, claim, node, ns) - framework.ExpectNoError(err, "Failed to create pod: %v", err) + framework.ExpectNoError(err, "Failed to create pause pod: %v", err) + return class, claim, pod +} + +func startBusyBoxPod(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string, fsGroup *int64) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { + class, claim := createClaim(cs, t, node, scName, ns) + pod, err := startBusyBoxPodWithClaim(cs, claim, node, ns, fsGroup) + framework.ExpectNoError(err, "Failed to create busybox pod: %v", err) return class, claim, pod } @@ -1557,6 +1689,17 @@ func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClai node, ns) } +func startBusyBoxPodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection, ns string, fsGroup *int64) (*v1.Pod, error) { + return startBusyBoxPodWithVolumeSource(cs, + v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc.Name, + ReadOnly: false, + }, + }, + node, ns, fsGroup) +} + func startPausePodWithInlineVolume(cs clientset.Interface, inlineVolume *v1.CSIVolumeSource, node e2epod.NodeSelection, ns string) (*v1.Pod, error) { return startPausePodWithVolumeSource(cs, v1.VolumeSource{ @@ -1596,6 +1739,41 @@ func startPausePodWithVolumeSource(cs clientset.Interface, volumeSource v1.Volum return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) } +func startBusyBoxPodWithVolumeSource(cs clientset.Interface, volumeSource v1.VolumeSource, node e2epod.NodeSelection, ns string, fsGroup *int64) (*v1.Pod, error) { + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "pvc-volume-tester-", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "volume-tester", + Image: framework.BusyBoxImage, + VolumeMounts: []v1.VolumeMount{ + { + Name: "my-volume", + MountPath: "/mnt/test", + }, + }, + Command: e2evolume.GenerateScriptCmd("while true ; do sleep 2; done"), + }, + }, + SecurityContext: &v1.PodSecurityContext{ + FSGroup: fsGroup, + }, + RestartPolicy: v1.RestartPolicyNever, + Volumes: []v1.Volume{ + { + Name: "my-volume", + VolumeSource: volumeSource, + }, + }, + }, + } + e2epod.SetNodeSelection(&pod.Spec, node) + return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) +} + // Dummy structure that parses just volume_attributes and error code out of logged CSI call type mockCSICall struct { json string // full log entry diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index 1c92cee8d6f..03ec35e557c 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -255,6 +255,7 @@ type mockCSIDriver struct { javascriptHooks map[string]string tokenRequests []storagev1.TokenRequest requiresRepublish *bool + fsGroupPolicy *storagev1.FSGroupPolicy } // CSIMockDriverOpts defines options used for csi driver @@ -271,6 +272,7 @@ type CSIMockDriverOpts struct { JavascriptHooks map[string]string TokenRequests []storagev1.TokenRequest RequiresRepublish *bool + FSGroupPolicy *storagev1.FSGroupPolicy } var _ testsuites.TestDriver = &mockCSIDriver{} @@ -330,6 +332,7 @@ func InitMockCSIDriver(driverOpts CSIMockDriverOpts) testsuites.TestDriver { javascriptHooks: driverOpts.JavascriptHooks, tokenRequests: driverOpts.TokenRequests, requiresRepublish: driverOpts.RequiresRepublish, + fsGroupPolicy: driverOpts.FSGroupPolicy, } } @@ -433,6 +436,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest }, TokenRequests: m.tokenRequests, RequiresRepublish: m.requiresRepublish, + FSGroupPolicy: m.fsGroupPolicy, } cleanup, err := utils.CreateFromManifests(f, driverNamespace, func(item interface{}) error { return utils.PatchCSIDeployment(f, o, item) diff --git a/test/e2e/storage/utils/deployment.go b/test/e2e/storage/utils/deployment.go index c83b9194da6..4a2622d1f0c 100644 --- a/test/e2e/storage/utils/deployment.go +++ b/test/e2e/storage/utils/deployment.go @@ -139,6 +139,9 @@ func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interf if o.RequiresRepublish != nil { object.Spec.RequiresRepublish = o.RequiresRepublish } + if o.FSGroupPolicy != nil { + object.Spec.FSGroupPolicy = o.FSGroupPolicy + } } return nil @@ -194,4 +197,8 @@ type PatchCSIOptions struct { // field *if* the driver deploys a CSIDriver object. Ignored // otherwise. RequiresRepublish *bool + // If not nil, the value to use for the CSIDriver.Spec.FSGroupPolicy + // field *if* the driver deploys a CSIDriver object. Ignored + // otherwise. + FSGroupPolicy *storagev1.FSGroupPolicy } diff --git a/test/e2e/storage/utils/utils.go b/test/e2e/storage/utils/utils.go index cd0dfdb00ea..340be22c4c5 100644 --- a/test/e2e/storage/utils/utils.go +++ b/test/e2e/storage/utils/utils.go @@ -94,6 +94,17 @@ func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, shExec string) } } +// VerifyFSGroupInPod verifies that the passed in filePath contains the expectedFSGroup +func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string, pod *v1.Pod) { + cmd := fmt.Sprintf("ls -l %s", filePath) + stdout, stderr, err := PodExec(f, pod, cmd) + framework.ExpectNoError(err) + framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr) + fsGroupResult := strings.Fields(stdout)[3] + framework.ExpectEqual(expectedFSGroup, fsGroupResult, + "Expected fsGroup of %s, got %s", expectedFSGroup, fsGroupResult) +} + // VerifyExecInPodFail verifies shell cmd in target pod fail with certain exit code func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exitCode int) { stdout, stderr, err := PodExec(f, pod, shExec) diff --git a/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml b/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml index a5f3c9a7da4..81b2a22dcd4 100644 --- a/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml +++ b/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml @@ -48,10 +48,10 @@ spec: - mountPath: /registration name: registration-dir - name: mock - image: k8s.gcr.io/sig-storage/mock-driver:v3.1.0 + image: k8s.gcr.io/sig-storage/mock-driver:v4.0.2 args: - "--name=mock.storage.k8s.io" - - "--permissive-target-path" # because of https://github.com/kubernetes/kubernetes/issues/75535 + - "-v=3" # enabled the gRPC call logging env: - name: CSI_ENDPOINT value: /csi/csi.sock