Add mock CSI driver test for SELinux mount

This commit is contained in:
Jan Safranek 2022-11-07 17:45:22 +01:00
parent 802979c295
commit d6c36736d5
5 changed files with 204 additions and 5 deletions

View File

@ -112,6 +112,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
tokenRequests []storagev1.TokenRequest tokenRequests []storagev1.TokenRequest
requiresRepublish *bool requiresRepublish *bool
fsGroupPolicy *storagev1.FSGroupPolicy fsGroupPolicy *storagev1.FSGroupPolicy
enableSELinuxMount *bool
} }
type mockDriverSetup struct { type mockDriverSetup struct {
@ -155,6 +156,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
TokenRequests: tp.tokenRequests, TokenRequests: tp.tokenRequests,
RequiresRepublish: tp.requiresRepublish, RequiresRepublish: tp.requiresRepublish,
FSGroupPolicy: tp.fsGroupPolicy, FSGroupPolicy: tp.fsGroupPolicy,
EnableSELinuxMount: tp.enableSELinuxMount,
} }
// At the moment, only tests which need hooks are // At the moment, only tests which need hooks are
@ -270,7 +272,6 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
DelayBinding: m.tp.lateBinding, DelayBinding: m.tp.lateBinding,
AllowVolumeExpansion: m.tp.enableResizing, AllowVolumeExpansion: m.tp.enableResizing,
} }
class, claim, pod := startBusyBoxPod(f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, fsGroup) class, claim, pod := startBusyBoxPod(f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, fsGroup)
if class != nil { if class != nil {
@ -287,6 +288,38 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
return class, claim, pod return class, claim, pod
} }
createPodWithSELinux := func(accessModes []v1.PersistentVolumeAccessMode, mountOptions []string, seLinuxOpts *v1.SELinuxOptions) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
ginkgo.By("Creating pod with SELinux context")
nodeSelection := m.config.ClientNodeSelection
sc := m.driver.GetDynamicProvisionStorageClass(m.config, "")
scTest := testsuites.StorageClassTest{
Name: m.driver.GetDriverInfo().Name,
Provisioner: sc.Provisioner,
Parameters: sc.Parameters,
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
DelayBinding: m.tp.lateBinding,
AllowVolumeExpansion: m.tp.enableResizing,
MountOptions: mountOptions,
}
class, claim := createClaim(f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, accessModes)
pod, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, seLinuxOpts)
framework.ExpectNoError(err, "Failed to create pause pod with SELinux context %s: %v", seLinuxOpts, err)
if class != nil {
m.sc[class.Name] = class
}
if claim != nil {
m.pvcs = append(m.pvcs, claim)
}
if pod != nil {
m.pods = append(m.pods, pod)
}
return class, claim, pod
}
cleanup := func() { cleanup := func() {
cs := f.ClientSet cs := f.ClientSet
var errs []error var errs []error
@ -1978,6 +2011,94 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
}) })
} }
}) })
ginkgo.Context("SELinuxMount [LinuxOnly][Feature:SELinuxMountReadWriteOncePod]", func() {
// Make sure all options are set so system specific defaults are not used.
seLinuxOpts := v1.SELinuxOptions{
User: "system_u",
Role: "object_r",
Type: "container_file_t",
Level: "s0:c0,c1",
}
seLinuxMountOption := "context=\"system_u:object_r:container_file_t:s0:c0,c1\""
tests := []struct {
name string
seLinuxEnabled bool
seLinuxSetInPod bool
mountOptions []string
volumeMode v1.PersistentVolumeAccessMode
expectedMountOptions []string
}{
{
name: "should pass SELinux mount option for RWOP volume and Pod with SELinux context set",
seLinuxEnabled: true,
seLinuxSetInPod: true,
volumeMode: v1.ReadWriteOncePod,
expectedMountOptions: []string{seLinuxMountOption},
},
{
name: "should add SELinux mount option to existing mount options",
seLinuxEnabled: true,
seLinuxSetInPod: true,
mountOptions: []string{"noexec", "noatime"},
volumeMode: v1.ReadWriteOncePod,
expectedMountOptions: []string{"noexec", "noatime", seLinuxMountOption},
},
{
name: "should not pass SELinux mount option for RWO volume",
seLinuxEnabled: true,
seLinuxSetInPod: true,
volumeMode: v1.ReadWriteOnce,
expectedMountOptions: nil,
},
{
name: "should not pass SELinux mount option for Pod without SELinux context",
seLinuxEnabled: true,
seLinuxSetInPod: false,
volumeMode: v1.ReadWriteOncePod,
expectedMountOptions: nil,
},
{
name: "should not pass SELinux mount option for CSI driver that does not support SELinux mount",
seLinuxEnabled: false,
seLinuxSetInPod: true,
volumeMode: v1.ReadWriteOncePod,
expectedMountOptions: nil,
},
}
for _, t := range tests {
t := t
ginkgo.It(t.name, func() {
if framework.NodeOSDistroIs("windows") {
e2eskipper.Skipf("SELinuxMount is only applied on linux nodes -- skipping")
}
var nodeStageMountOpts, nodePublishMountOpts []string
init(testParameters{
disableAttach: true,
registerDriver: true,
enableSELinuxMount: &t.seLinuxEnabled,
hooks: createSELinuxMountPreHook(&nodeStageMountOpts, &nodePublishMountOpts),
})
defer cleanup()
accessModes := []v1.PersistentVolumeAccessMode{t.volumeMode}
var podSELinuxOpts *v1.SELinuxOptions
if t.seLinuxSetInPod {
// Make sure all options are set so system specific defaults are not used.
podSELinuxOpts = &seLinuxOpts
}
_, _, pod := createPodWithSELinux(accessModes, t.mountOptions, podSELinuxOpts)
err := e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "failed to start pod")
framework.ExpectEqual(nodeStageMountOpts, t.expectedMountOptions, "Expect NodeStageVolumeRequest.VolumeCapability.MountVolume. to equal %q; got: %q", t.expectedMountOptions, nodeStageMountOpts)
framework.ExpectEqual(nodePublishMountOpts, t.expectedMountOptions, "Expect NodePublishVolumeRequest.VolumeCapability.MountVolume.VolumeMountGroup to equal %q; got: %q", t.expectedMountOptions, nodeStageMountOpts)
})
}
})
}) })
func deleteSnapshot(cs clientset.Interface, config *storageframework.PerTestConfig, snapshot *unstructured.Unstructured) { func deleteSnapshot(cs clientset.Interface, config *storageframework.PerTestConfig, snapshot *unstructured.Unstructured) {
@ -2122,12 +2243,13 @@ func createSC(cs clientset.Interface, t testsuites.StorageClassTest, scName, ns
return class return class
} }
func createClaim(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim) { func createClaim(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string, accessModes []v1.PersistentVolumeAccessMode) (*storagev1.StorageClass, *v1.PersistentVolumeClaim) {
class := createSC(cs, t, scName, ns) class := createSC(cs, t, scName, ns)
claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: t.ClaimSize, ClaimSize: t.ClaimSize,
StorageClassName: &(class.Name), StorageClassName: &(class.Name),
VolumeMode: &t.VolumeMode, VolumeMode: &t.VolumeMode,
AccessModes: accessModes,
}, ns) }, ns)
claim, err := cs.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), claim, metav1.CreateOptions{}) claim, err := cs.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), claim, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create claim: %v", err) framework.ExpectNoError(err, "Failed to create claim: %v", err)
@ -2141,7 +2263,7 @@ func createClaim(cs clientset.Interface, t testsuites.StorageClassTest, node e2e
} }
func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
class, claim := createClaim(cs, t, node, scName, ns) class, claim := createClaim(cs, t, node, scName, ns, nil)
pod, err := startPausePodWithClaim(cs, claim, node, ns) pod, err := startPausePodWithClaim(cs, claim, node, ns)
framework.ExpectNoError(err, "Failed to create pause pod: %v", err) framework.ExpectNoError(err, "Failed to create pause pod: %v", err)
@ -2149,7 +2271,7 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e
} }
func startBusyBoxPod(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string, fsGroup *int64) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { func startBusyBoxPod(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string, fsGroup *int64) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
class, claim := createClaim(cs, t, node, scName, ns) class, claim := createClaim(cs, t, node, scName, ns, nil)
pod, err := startBusyBoxPodWithClaim(cs, claim, node, ns, fsGroup) pod, err := startBusyBoxPodWithClaim(cs, claim, node, ns, fsGroup)
framework.ExpectNoError(err, "Failed to create busybox pod: %v", err) framework.ExpectNoError(err, "Failed to create busybox pod: %v", err)
return class, claim, pod return class, claim, pod
@ -2276,6 +2398,45 @@ func startBusyBoxPodWithVolumeSource(cs clientset.Interface, volumeSource v1.Vol
return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
} }
func startPausePodWithSELinuxOptions(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection, ns string, seLinuxOpts *v1.SELinuxOptions) (*v1.Pod, error) {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-volume-tester-",
},
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{
SELinuxOptions: seLinuxOpts,
},
Containers: []v1.Container{
{
Name: "volume-tester",
Image: imageutils.GetE2EImage(imageutils.Pause),
VolumeMounts: []v1.VolumeMount{
{
Name: "my-volume",
MountPath: "/mnt/test",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "my-volume",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
ReadOnly: false,
},
},
},
},
},
}
e2epod.SetNodeSelection(&pod.Spec, node)
return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
}
// checkPodLogs tests that NodePublish was called with expected volume_context and (for ephemeral inline volumes) // checkPodLogs tests that NodePublish was called with expected volume_context and (for ephemeral inline volumes)
// has the matching NodeUnpublish // has the matching NodeUnpublish
func checkPodLogs(getCalls func() ([]drivers.MockCSICall, error), pod *v1.Pod, expectPodInfo, ephemeralVolume, csiInlineVolumesEnabled, csiServiceAccountTokenEnabled bool, expectedNumNodePublish int) error { func checkPodLogs(getCalls func() ([]drivers.MockCSICall, error), pod *v1.Pod, expectPodInfo, ephemeralVolume, csiInlineVolumesEnabled, csiServiceAccountTokenEnabled bool, expectedNumNodePublish int) error {
@ -2501,6 +2662,30 @@ func createFSGroupRequestPreHook(nodeStageFsGroup, nodePublishFsGroup *string) *
} }
} }
// createSELinuxMountPreHook creates a hook that records the mountOptions passed in
// through NodeStageVolume and NodePublishVolume calls.
func createSELinuxMountPreHook(nodeStageMountOpts, nodePublishMountOpts *[]string) *drivers.Hooks {
return &drivers.Hooks{
Pre: func(ctx context.Context, fullMethod string, request interface{}) (reply interface{}, err error) {
nodeStageRequest, ok := request.(*csipbv1.NodeStageVolumeRequest)
if ok {
mountVolume := nodeStageRequest.GetVolumeCapability().GetMount()
if mountVolume != nil {
*nodeStageMountOpts = mountVolume.MountFlags
}
}
nodePublishRequest, ok := request.(*csipbv1.NodePublishVolumeRequest)
if ok {
mountVolume := nodePublishRequest.GetVolumeCapability().GetMount()
if mountVolume != nil {
*nodePublishMountOpts = mountVolume.MountFlags
}
}
return nil, nil
},
}
}
type snapshotMetricsTestConfig struct { type snapshotMetricsTestConfig struct {
// expected values // expected values
metricName string metricName string

View File

@ -309,6 +309,7 @@ type mockCSIDriver struct {
embedded bool embedded bool
calls MockCSICalls calls MockCSICalls
embeddedCSIDriver *mockdriver.CSIDriver embeddedCSIDriver *mockdriver.CSIDriver
enableSELinuxMount *bool
// Additional values set during PrepareTest // Additional values set during PrepareTest
clientSet clientset.Interface clientSet clientset.Interface
@ -355,6 +356,7 @@ type CSIMockDriverOpts struct {
TokenRequests []storagev1.TokenRequest TokenRequests []storagev1.TokenRequest
RequiresRepublish *bool RequiresRepublish *bool
FSGroupPolicy *storagev1.FSGroupPolicy FSGroupPolicy *storagev1.FSGroupPolicy
EnableSELinuxMount *bool
// Embedded defines whether the CSI mock driver runs // Embedded defines whether the CSI mock driver runs
// inside the cluster (false, the default) or just a proxy // inside the cluster (false, the default) or just a proxy
@ -507,6 +509,7 @@ func InitMockCSIDriver(driverOpts CSIMockDriverOpts) MockCSITestDriver {
requiresRepublish: driverOpts.RequiresRepublish, requiresRepublish: driverOpts.RequiresRepublish,
fsGroupPolicy: driverOpts.FSGroupPolicy, fsGroupPolicy: driverOpts.FSGroupPolicy,
enableVolumeMountGroup: driverOpts.EnableVolumeMountGroup, enableVolumeMountGroup: driverOpts.EnableVolumeMountGroup,
enableSELinuxMount: driverOpts.EnableSELinuxMount,
embedded: driverOpts.Embedded, embedded: driverOpts.Embedded,
hooks: driverOpts.Hooks, hooks: driverOpts.Hooks,
} }
@ -657,6 +660,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) *storageframework.Pe
TokenRequests: m.tokenRequests, TokenRequests: m.tokenRequests,
RequiresRepublish: m.requiresRepublish, RequiresRepublish: m.requiresRepublish,
FSGroupPolicy: m.fsGroupPolicy, FSGroupPolicy: m.fsGroupPolicy,
SELinuxMount: m.enableSELinuxMount,
} }
cleanup, err := utils.CreateFromManifests(f, m.driverNamespace, func(item interface{}) error { cleanup, err := utils.CreateFromManifests(f, m.driverNamespace, func(item interface{}) error {
if err := utils.PatchCSIDeployment(config.Framework, o, item); err != nil { if err := utils.PatchCSIDeployment(config.Framework, o, item); err != nil {

View File

@ -66,6 +66,7 @@ type StorageClassTest struct {
VolumeMode v1.PersistentVolumeMode VolumeMode v1.PersistentVolumeMode
AllowVolumeExpansion bool AllowVolumeExpansion bool
NodeSelection e2epod.NodeSelection NodeSelection e2epod.NodeSelection
MountOptions []string
} }
type provisioningTestSuite struct { type provisioningTestSuite struct {

View File

@ -152,6 +152,9 @@ func PatchCSIDeployment(f *e2eframework.Framework, o PatchCSIOptions, object int
if o.FSGroupPolicy != nil { if o.FSGroupPolicy != nil {
object.Spec.FSGroupPolicy = o.FSGroupPolicy object.Spec.FSGroupPolicy = o.FSGroupPolicy
} }
if o.SELinuxMount != nil {
object.Spec.SELinuxMount = o.SELinuxMount
}
} }
return nil return nil
@ -211,4 +214,8 @@ type PatchCSIOptions struct {
// field *if* the driver deploys a CSIDriver object. Ignored // field *if* the driver deploys a CSIDriver object. Ignored
// otherwise. // otherwise.
FSGroupPolicy *storagev1.FSGroupPolicy FSGroupPolicy *storagev1.FSGroupPolicy
// If not nil, the value to use for the CSIDriver.Spec.SELinuxMount
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
SELinuxMount *bool
} }

View File

@ -808,7 +808,7 @@ func newStorageClass(t testsuites.StorageClassTest, ns string, prefix string) *s
} }
} }
sc := getStorageClass(pluginName, t.Parameters, &bindingMode, ns, prefix) sc := getStorageClass(pluginName, t.Parameters, &bindingMode, t.MountOptions, ns, prefix)
if t.AllowVolumeExpansion { if t.AllowVolumeExpansion {
sc.AllowVolumeExpansion = &t.AllowVolumeExpansion sc.AllowVolumeExpansion = &t.AllowVolumeExpansion
} }
@ -819,6 +819,7 @@ func getStorageClass(
provisioner string, provisioner string,
parameters map[string]string, parameters map[string]string,
bindingMode *storagev1.VolumeBindingMode, bindingMode *storagev1.VolumeBindingMode,
mountOptions []string,
ns string, ns string,
prefix string, prefix string,
) *storagev1.StorageClass { ) *storagev1.StorageClass {
@ -837,6 +838,7 @@ func getStorageClass(
Provisioner: provisioner, Provisioner: provisioner,
Parameters: parameters, Parameters: parameters,
VolumeBindingMode: bindingMode, VolumeBindingMode: bindingMode,
MountOptions: mountOptions,
} }
} }