mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 02:41:25 +00:00
Included e2e test for CSIDriver FSGroupPolicy
This commit is contained in:
parent
01f70d69b7
commit
4d2d063635
@ -59,6 +59,7 @@ func TestCSI_VolumeAll(t *testing.T) {
|
||||
podFunc func() *api.Pod
|
||||
isInline bool
|
||||
shouldFail bool
|
||||
disableFSGroupPolicyFeatureGate bool
|
||||
driverSpec *storage.CSIDriverSpec
|
||||
}{
|
||||
{
|
||||
@ -92,6 +93,25 @@ func TestCSI_VolumeAll(t *testing.T) {
|
||||
FSGroupPolicy: &defaultFSGroupPolicy,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "PersistentVolume with driver info and FSGroup disabled",
|
||||
specName: "pv2",
|
||||
driver: "simple-driver",
|
||||
volName: "vol2",
|
||||
specFunc: func(specName, driver, volName string) *volume.Spec {
|
||||
return volume.NewSpecFromPersistentVolume(makeTestPV(specName, 20, driver, volName), false)
|
||||
},
|
||||
podFunc: func() *api.Pod {
|
||||
podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64()))
|
||||
return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}}
|
||||
},
|
||||
disableFSGroupPolicyFeatureGate: true,
|
||||
driverSpec: &storage.CSIDriverSpec{
|
||||
// Required for the driver to be accepted for the persistent volume.
|
||||
VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecyclePersistent},
|
||||
FSGroupPolicy: &defaultFSGroupPolicy,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "PersistentVolume with wrong mode in driver info",
|
||||
specName: "pv2",
|
||||
@ -227,6 +247,8 @@ func TestCSI_VolumeAll(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, !test.disableFSGroupPolicyFeatureGate)()
|
||||
|
||||
tmpDir, err := utiltesting.MkTmpdir("csi-test")
|
||||
if err != nil {
|
||||
t.Fatalf("can't create temp dir: %v", err)
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@ -49,6 +50,7 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
@ -114,6 +116,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
javascriptHooks map[string]string
|
||||
tokenRequests []storagev1.TokenRequest
|
||||
requiresRepublish *bool
|
||||
fsGroupPolicy *storagev1.FSGroupPolicy
|
||||
}
|
||||
|
||||
type mockDriverSetup struct {
|
||||
@ -155,6 +158,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
JavascriptHooks: tp.javascriptHooks,
|
||||
TokenRequests: tp.tokenRequests,
|
||||
RequiresRepublish: tp.requiresRepublish,
|
||||
FSGroupPolicy: tp.fsGroupPolicy,
|
||||
}
|
||||
|
||||
// this just disable resizing on driver, keeping resizing on SC enabled.
|
||||
@ -229,6 +233,39 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
return pod, err
|
||||
}
|
||||
|
||||
createPodWithFSGroup := func(fsGroup *int64) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
|
||||
ginkgo.By("Creating pod with fsGroup")
|
||||
nodeSelection := m.config.ClientNodeSelection
|
||||
var sc *storagev1.StorageClass
|
||||
if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok {
|
||||
sc = dDriver.GetDynamicProvisionStorageClass(m.config, "")
|
||||
}
|
||||
scTest := testsuites.StorageClassTest{
|
||||
Name: m.driver.GetDriverInfo().Name,
|
||||
Provisioner: sc.Provisioner,
|
||||
Parameters: sc.Parameters,
|
||||
ClaimSize: "1Gi",
|
||||
ExpectedSize: "1Gi",
|
||||
DelayBinding: m.tp.lateBinding,
|
||||
AllowVolumeExpansion: m.tp.enableResizing,
|
||||
}
|
||||
|
||||
class, claim, pod := startBusyBoxPod(f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, fsGroup)
|
||||
|
||||
if class != nil {
|
||||
m.sc[class.Name] = class
|
||||
}
|
||||
if claim != nil {
|
||||
m.pvcs = append(m.pvcs, claim)
|
||||
}
|
||||
|
||||
if pod != nil {
|
||||
m.pods = append(m.pods, pod)
|
||||
}
|
||||
|
||||
return class, claim, pod
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
cs := f.ClientSet
|
||||
var errs []error
|
||||
@ -1366,6 +1403,86 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
ginkgo.By("Checking CSI driver logs")
|
||||
err = checkPodLogs(m.cs, m.config.DriverNamespace.Name, driverPodName, driverContainerName, pod, false, false, false, test.deployCSIDriverObject && csiServiceAccountTokenEnabled, numNodePublishVolume)
|
||||
framework.ExpectNoError(err)
|
||||
// These tests *only* work on a cluster which has the CSIVolumeFSGroupPolicy feature enabled.
|
||||
ginkgo.Context("CSI FSGroupPolicy [LinuxOnly]", func() {
|
||||
tests := []struct {
|
||||
name string
|
||||
fsGroupPolicy storagev1.FSGroupPolicy
|
||||
modified bool
|
||||
}{
|
||||
{
|
||||
name: "should modify fsGroup if fsGroupPolicy=default",
|
||||
fsGroupPolicy: storagev1.ReadWriteOnceWithFSTypeFSGroupPolicy,
|
||||
modified: true,
|
||||
},
|
||||
{
|
||||
name: "should modify fsGroup if fsGroupPolicy=File",
|
||||
fsGroupPolicy: storagev1.FileFSGroupPolicy,
|
||||
modified: true,
|
||||
},
|
||||
{
|
||||
name: "should not modify fsGroup if fsGroupPolicy=None",
|
||||
fsGroupPolicy: storagev1.NoneFSGroupPolicy,
|
||||
modified: false,
|
||||
},
|
||||
}
|
||||
for _, t := range tests {
|
||||
test := t
|
||||
ginkgo.It(test.name, func() {
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
e2eskipper.Skipf("FSGroupPolicy is only applied on linux nodes -- skipping")
|
||||
}
|
||||
init(testParameters{
|
||||
disableAttach: true,
|
||||
registerDriver: true,
|
||||
fsGroupPolicy: &test.fsGroupPolicy,
|
||||
})
|
||||
defer cleanup()
|
||||
|
||||
// kube-scheduler may need some time before it gets the CSIDriver object.
|
||||
// Without them, scheduling doesn't run as expected by the test.
|
||||
syncDelay := 5 * time.Second
|
||||
time.Sleep(syncDelay)
|
||||
|
||||
fsGroupVal := int64(rand.Int63n(20000) + 1024)
|
||||
fsGroup := &fsGroupVal
|
||||
|
||||
_, _, pod := createPodWithFSGroup(fsGroup) /* persistent volume */
|
||||
|
||||
mountPath := pod.Spec.Containers[0].VolumeMounts[0].MountPath
|
||||
dirName := mountPath + "/" + f.UniqueName
|
||||
fileName := dirName + "/" + f.UniqueName
|
||||
|
||||
err := e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
||||
framework.ExpectNoError(err, "failed to start pod")
|
||||
|
||||
// Create the subdirectory to ensure that fsGroup propagates
|
||||
createDirectory := fmt.Sprintf("mkdir %s", dirName)
|
||||
_, _, err = utils.PodExec(f, pod, createDirectory)
|
||||
framework.ExpectNoError(err, "failed: creating the directory: %s", err)
|
||||
|
||||
// Inject the contents onto the mount
|
||||
createFile := fmt.Sprintf("echo '%s' > '%s'; sync", "filecontents", fileName)
|
||||
_, _, err = utils.PodExec(f, pod, createFile)
|
||||
framework.ExpectNoError(err, "failed: writing the contents: %s", err)
|
||||
|
||||
// Delete the created file. This step is mandatory, as the mock driver
|
||||
// won't clean up the contents automatically.
|
||||
defer func() {
|
||||
delete := fmt.Sprintf("rm -fr %s", dirName)
|
||||
_, _, err = utils.PodExec(f, pod, delete)
|
||||
framework.ExpectNoError(err, "failed: deleting the directory: %s", err)
|
||||
}()
|
||||
|
||||
// Ensure that the fsGroup matches what we expect
|
||||
if test.modified {
|
||||
utils.VerifyFSGroupInPod(f, fileName, strconv.FormatInt(*fsGroup, 10), pod)
|
||||
} else {
|
||||
utils.VerifyFSGroupInPod(f, fileName, "root", pod)
|
||||
}
|
||||
|
||||
// The created resources will be removed by the cleanup() function,
|
||||
// so need to delete anything here.
|
||||
})
|
||||
}
|
||||
})
|
||||
@ -1505,7 +1622,7 @@ func checkCSINodeForLimits(nodeName string, driverName string, cs clientset.Inte
|
||||
return attachLimit, nil
|
||||
}
|
||||
|
||||
func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
|
||||
func createClaim(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim) {
|
||||
class := newStorageClass(t, ns, "")
|
||||
if scName != "" {
|
||||
class.Name = scName
|
||||
@ -1530,9 +1647,21 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e
|
||||
_, err = e2epv.WaitForPVClaimBoundPhase(cs, pvcClaims, framework.ClaimProvisionTimeout)
|
||||
framework.ExpectNoError(err, "Failed waiting for PVC to be bound: %v", err)
|
||||
}
|
||||
return class, claim
|
||||
}
|
||||
|
||||
func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
|
||||
class, claim := createClaim(cs, t, node, scName, ns)
|
||||
|
||||
pod, err := startPausePodWithClaim(cs, claim, node, ns)
|
||||
framework.ExpectNoError(err, "Failed to create pod: %v", err)
|
||||
framework.ExpectNoError(err, "Failed to create pause pod: %v", err)
|
||||
return class, claim, pod
|
||||
}
|
||||
|
||||
func startBusyBoxPod(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string, fsGroup *int64) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
|
||||
class, claim := createClaim(cs, t, node, scName, ns)
|
||||
pod, err := startBusyBoxPodWithClaim(cs, claim, node, ns, fsGroup)
|
||||
framework.ExpectNoError(err, "Failed to create busybox pod: %v", err)
|
||||
return class, claim, pod
|
||||
}
|
||||
|
||||
@ -1557,6 +1686,17 @@ func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClai
|
||||
node, ns)
|
||||
}
|
||||
|
||||
func startBusyBoxPodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection, ns string, fsGroup *int64) (*v1.Pod, error) {
|
||||
return startBusyBoxPodWithVolumeSource(cs,
|
||||
v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc.Name,
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
node, ns, fsGroup)
|
||||
}
|
||||
|
||||
func startPausePodWithInlineVolume(cs clientset.Interface, inlineVolume *v1.CSIVolumeSource, node e2epod.NodeSelection, ns string) (*v1.Pod, error) {
|
||||
return startPausePodWithVolumeSource(cs,
|
||||
v1.VolumeSource{
|
||||
@ -1596,6 +1736,41 @@ func startPausePodWithVolumeSource(cs clientset.Interface, volumeSource v1.Volum
|
||||
return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func startBusyBoxPodWithVolumeSource(cs clientset.Interface, volumeSource v1.VolumeSource, node e2epod.NodeSelection, ns string, fsGroup *int64) (*v1.Pod, error) {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pvc-volume-tester-",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "volume-tester",
|
||||
Image: framework.BusyBoxImage,
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "my-volume",
|
||||
MountPath: "/mnt/test",
|
||||
},
|
||||
},
|
||||
Command: e2evolume.GenerateScriptCmd("while true ; do sleep 2; done"),
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
FSGroup: fsGroup,
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "my-volume",
|
||||
VolumeSource: volumeSource,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
e2epod.SetNodeSelection(&pod.Spec, node)
|
||||
return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
// Dummy structure that parses just volume_attributes and error code out of logged CSI call
|
||||
type mockCSICall struct {
|
||||
json string // full log entry
|
||||
|
@ -255,6 +255,7 @@ type mockCSIDriver struct {
|
||||
javascriptHooks map[string]string
|
||||
tokenRequests []storagev1.TokenRequest
|
||||
requiresRepublish *bool
|
||||
fsGroupPolicy *storagev1.FSGroupPolicy
|
||||
}
|
||||
|
||||
// CSIMockDriverOpts defines options used for csi driver
|
||||
@ -271,6 +272,7 @@ type CSIMockDriverOpts struct {
|
||||
JavascriptHooks map[string]string
|
||||
TokenRequests []storagev1.TokenRequest
|
||||
RequiresRepublish *bool
|
||||
FSGroupPolicy *storagev1.FSGroupPolicy
|
||||
}
|
||||
|
||||
var _ testsuites.TestDriver = &mockCSIDriver{}
|
||||
@ -330,6 +332,7 @@ func InitMockCSIDriver(driverOpts CSIMockDriverOpts) testsuites.TestDriver {
|
||||
javascriptHooks: driverOpts.JavascriptHooks,
|
||||
tokenRequests: driverOpts.TokenRequests,
|
||||
requiresRepublish: driverOpts.RequiresRepublish,
|
||||
fsGroupPolicy: driverOpts.FSGroupPolicy,
|
||||
}
|
||||
}
|
||||
|
||||
@ -433,6 +436,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest
|
||||
},
|
||||
TokenRequests: m.tokenRequests,
|
||||
RequiresRepublish: m.requiresRepublish,
|
||||
FSGroupPolicy: m.fsGroupPolicy,
|
||||
}
|
||||
cleanup, err := utils.CreateFromManifests(f, driverNamespace, func(item interface{}) error {
|
||||
return utils.PatchCSIDeployment(f, o, item)
|
||||
|
@ -138,6 +138,8 @@ func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interf
|
||||
}
|
||||
if o.RequiresRepublish != nil {
|
||||
object.Spec.RequiresRepublish = o.RequiresRepublish
|
||||
if o.FSGroupPolicy != nil {
|
||||
object.Spec.FSGroupPolicy = o.FSGroupPolicy
|
||||
}
|
||||
}
|
||||
|
||||
@ -194,4 +196,8 @@ type PatchCSIOptions struct {
|
||||
// field *if* the driver deploys a CSIDriver object. Ignored
|
||||
// otherwise.
|
||||
RequiresRepublish *bool
|
||||
// If not nil, the value to use for the CSIDriver.Spec.FSGroupPolicy
|
||||
// field *if* the driver deploys a CSIDriver object. Ignored
|
||||
// otherwise.
|
||||
FSGroupPolicy *storagev1.FSGroupPolicy
|
||||
}
|
||||
|
@ -94,6 +94,17 @@ func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, shExec string)
|
||||
}
|
||||
}
|
||||
|
||||
// VerifyFSGroupInPod verifies that the passed in filePath contains the expectedFSGroup
|
||||
func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string, pod *v1.Pod) {
|
||||
cmd := fmt.Sprintf("ls -l %s", filePath)
|
||||
stdout, stderr, err := PodExec(f, pod, cmd)
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
|
||||
fsGroupResult := strings.Fields(stdout)[3]
|
||||
framework.ExpectEqual(expectedFSGroup, fsGroupResult,
|
||||
"Expected fsGroup of %s, got %s", expectedFSGroup, fsGroupResult)
|
||||
}
|
||||
|
||||
// VerifyExecInPodFail verifies shell cmd in target pod fail with certain exit code
|
||||
func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exitCode int) {
|
||||
stdout, stderr, err := PodExec(f, pod, shExec)
|
||||
|
Loading…
Reference in New Issue
Block a user