Add multi-pod tests with SELinux mounts

Check that a volume is fully unmounted (Unstaged) before kubelet starts a
pod with the same volume, but with a different SELinux context.
This commit is contained in:
Jan Safranek 2022-11-09 14:17:34 +01:00
parent 1edbb8cf1a
commit e1f62a9bc5
2 changed files with 180 additions and 47 deletions

View File

@ -664,6 +664,11 @@ func startPausePodWithSELinuxOptions(cs clientset.Interface, pvc *v1.PersistentV
},
},
}
if node.Name != "" {
// Force schedule the pod to skip scheduler RWOP checks
framework.Logf("Forcing node name %s", node.Name)
pod.Spec.NodeName = node.Name
}
e2epod.SetNodeSelection(&pod.Spec, node)
return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
}
@ -821,11 +826,12 @@ func compareCSICalls(ctx context.Context, trackedCalls []string, expectedCallSeq
// createSELinuxMountPreHook creates a hook that records the mountOptions passed in
// through NodeStageVolume and NodePublishVolume calls.
func createSELinuxMountPreHook(nodeStageMountOpts, nodePublishMountOpts *[]string) *drivers.Hooks {
func createSELinuxMountPreHook(nodeStageMountOpts, nodePublishMountOpts *[]string, stageCalls, unstageCalls, publishCalls, unpublishCalls *atomic.Int32) *drivers.Hooks {
return &drivers.Hooks{
Pre: func(ctx context.Context, fullMethod string, request interface{}) (reply interface{}, err error) {
nodeStageRequest, ok := request.(*csipbv1.NodeStageVolumeRequest)
if ok {
stageCalls.Add(1)
mountVolume := nodeStageRequest.GetVolumeCapability().GetMount()
if mountVolume != nil {
*nodeStageMountOpts = mountVolume.MountFlags
@ -833,11 +839,20 @@ func createSELinuxMountPreHook(nodeStageMountOpts, nodePublishMountOpts *[]strin
}
nodePublishRequest, ok := request.(*csipbv1.NodePublishVolumeRequest)
if ok {
publishCalls.Add(1)
mountVolume := nodePublishRequest.GetVolumeCapability().GetMount()
if mountVolume != nil {
*nodePublishMountOpts = mountVolume.MountFlags
}
}
_, ok = request.(*csipbv1.NodeUnstageVolumeRequest)
if ok {
unstageCalls.Add(1)
}
_, ok = request.(*csipbv1.NodeUnpublishVolumeRequest)
if ok {
unpublishCalls.Add(1)
}
return nil, nil
},
}

View File

@ -18,10 +18,17 @@ package csi_mock
import (
"context"
"fmt"
"sync/atomic"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/test/e2e/framework"
e2eevents "k8s.io/kubernetes/test/e2e/framework/events"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -35,57 +42,92 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() {
ginkgo.Context("SELinuxMount [LinuxOnly][Feature:SELinux][Feature:SELinuxMountReadWriteOncePod]", func() {
// Make sure all options are set so system specific defaults are not used.
seLinuxOpts := v1.SELinuxOptions{
seLinuxOpts1 := v1.SELinuxOptions{
User: "system_u",
Role: "object_r",
Type: "container_file_t",
Level: "s0:c0,c1",
}
seLinuxMountOption := "context=\"system_u:object_r:container_file_t:s0:c0,c1\""
seLinuxMountOption1 := "context=\"system_u:object_r:container_file_t:s0:c0,c1\""
seLinuxOpts2 := v1.SELinuxOptions{
User: "system_u",
Role: "object_r",
Type: "container_file_t",
Level: "s0:c98,c99",
}
seLinuxMountOption2 := "context=\"system_u:object_r:container_file_t:s0:c98,c99\""
tests := []struct {
name string
seLinuxEnabled bool
seLinuxSetInPod bool
mountOptions []string
volumeMode v1.PersistentVolumeAccessMode
expectedMountOptions []string
name string
csiDriverSELinuxEnabled bool
firstPodSELinuxOpts *v1.SELinuxOptions
startSecondPod bool
secondPodSELinuxOpts *v1.SELinuxOptions
mountOptions []string
volumeMode v1.PersistentVolumeAccessMode
expectedFirstMountOptions []string
expectedSecondMountOptions []string
expectedUnstage bool
}{
// Start just a single pod and check its volume is mounted correctly
{
name: "should pass SELinux mount option for RWOP volume and Pod with SELinux context set",
seLinuxEnabled: true,
seLinuxSetInPod: true,
volumeMode: v1.ReadWriteOncePod,
expectedMountOptions: []string{seLinuxMountOption},
name: "should pass SELinux mount option for RWOP volume and Pod with SELinux context set",
csiDriverSELinuxEnabled: true,
firstPodSELinuxOpts: &seLinuxOpts1,
volumeMode: v1.ReadWriteOncePod,
expectedFirstMountOptions: []string{seLinuxMountOption1},
},
{
name: "should add SELinux mount option to existing mount options",
seLinuxEnabled: true,
seLinuxSetInPod: true,
mountOptions: []string{"noexec", "noatime"},
volumeMode: v1.ReadWriteOncePod,
expectedMountOptions: []string{"noexec", "noatime", seLinuxMountOption},
name: "should add SELinux mount option to existing mount options",
csiDriverSELinuxEnabled: true,
firstPodSELinuxOpts: &seLinuxOpts1,
mountOptions: []string{"noexec", "noatime"},
volumeMode: v1.ReadWriteOncePod,
expectedFirstMountOptions: []string{"noexec", "noatime", seLinuxMountOption1},
},
{
name: "should not pass SELinux mount option for RWO volume",
seLinuxEnabled: true,
seLinuxSetInPod: true,
volumeMode: v1.ReadWriteOnce,
expectedMountOptions: nil,
name: "should not pass SELinux mount option for RWO volume",
csiDriverSELinuxEnabled: true,
firstPodSELinuxOpts: &seLinuxOpts1,
volumeMode: v1.ReadWriteOnce,
expectedFirstMountOptions: nil,
},
{
name: "should not pass SELinux mount option for Pod without SELinux context",
seLinuxEnabled: true,
seLinuxSetInPod: false,
volumeMode: v1.ReadWriteOncePod,
expectedMountOptions: nil,
name: "should not pass SELinux mount option for Pod without SELinux context",
csiDriverSELinuxEnabled: true,
firstPodSELinuxOpts: nil,
volumeMode: v1.ReadWriteOncePod,
expectedFirstMountOptions: nil,
},
{
name: "should not pass SELinux mount option for CSI driver that does not support SELinux mount",
seLinuxEnabled: false,
seLinuxSetInPod: true,
volumeMode: v1.ReadWriteOncePod,
expectedMountOptions: nil,
name: "should not pass SELinux mount option for CSI driver that does not support SELinux mount",
csiDriverSELinuxEnabled: false,
firstPodSELinuxOpts: &seLinuxOpts1,
volumeMode: v1.ReadWriteOncePod,
expectedFirstMountOptions: nil,
},
// Start two pods in a sequence and check their volume is / is not unmounted in between
{
name: "should not unstage volume when starting a second pod with the same SELinux context",
csiDriverSELinuxEnabled: true,
firstPodSELinuxOpts: &seLinuxOpts1,
startSecondPod: true,
secondPodSELinuxOpts: &seLinuxOpts1,
volumeMode: v1.ReadWriteOncePod,
expectedFirstMountOptions: []string{seLinuxMountOption1},
expectedSecondMountOptions: []string{seLinuxMountOption1},
expectedUnstage: false,
},
{
name: "should unstage volume when starting a second pod with different SELinux context",
csiDriverSELinuxEnabled: true,
firstPodSELinuxOpts: &seLinuxOpts1,
startSecondPod: true,
secondPodSELinuxOpts: &seLinuxOpts2,
volumeMode: v1.ReadWriteOncePod,
expectedFirstMountOptions: []string{seLinuxMountOption1},
expectedSecondMountOptions: []string{seLinuxMountOption2},
expectedUnstage: true,
},
}
for _, t := range tests {
@ -95,27 +137,103 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() {
e2eskipper.Skipf("SELinuxMount is only applied on linux nodes -- skipping")
}
var nodeStageMountOpts, nodePublishMountOpts []string
var unstageCalls, stageCalls, unpublishCalls, publishCalls atomic.Int32
m.init(ctx, testParameters{
disableAttach: true,
registerDriver: true,
enableSELinuxMount: &t.seLinuxEnabled,
hooks: createSELinuxMountPreHook(&nodeStageMountOpts, &nodePublishMountOpts),
enableSELinuxMount: &t.csiDriverSELinuxEnabled,
hooks: createSELinuxMountPreHook(&nodeStageMountOpts, &nodePublishMountOpts, &stageCalls, &unstageCalls, &publishCalls, &unpublishCalls),
})
ginkgo.DeferCleanup(m.cleanup)
defer m.cleanup(ctx)
// Act
ginkgo.By("Starting the initial pod")
accessModes := []v1.PersistentVolumeAccessMode{t.volumeMode}
var podSELinuxOpts *v1.SELinuxOptions
if t.seLinuxSetInPod {
// Make sure all options are set so system specific defaults are not used.
podSELinuxOpts = &seLinuxOpts
_, claim, pod := m.createPodWithSELinux(ctx, accessModes, t.mountOptions, t.firstPodSELinuxOpts)
err := e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "failed to start the initial pod")
// Assert
ginkgo.By("Checking the initial pod mount options")
framework.ExpectEqual(nodeStageMountOpts, t.expectedFirstMountOptions, "Expect NodeStageVolumeRequest.VolumeCapability.MountVolume.MountFlags to equal %q; got: %q", t.expectedFirstMountOptions, nodeStageMountOpts)
framework.ExpectEqual(nodePublishMountOpts, t.expectedFirstMountOptions, "Expect NodePublishVolumeRequest.VolumeCapability.MountVolume.MountFlags to equal %q; got: %q", t.expectedFirstMountOptions, nodeStageMountOpts)
ginkgo.By("Checking the CSI driver calls for the initial pod")
gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnstage was unexpectedly called for the initial pod")
gomega.Expect(unpublishCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnpublish was unexpectedly called for the initial pod")
gomega.Expect(stageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeStage was not called for the initial pod")
gomega.Expect(publishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodePublish was not called for the initial pod")
if !t.startSecondPod {
return
}
_, _, pod := m.createPodWithSELinux(ctx, accessModes, t.mountOptions, podSELinuxOpts)
err := e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "failed to start pod")
// Arrange 2nd part of the test
ginkgo.By("Starting the second pod to check if a volume used by the initial pod is / is not unmounted based on SELinux context")
framework.ExpectEqual(nodeStageMountOpts, t.expectedMountOptions, "Expect NodeStageVolumeRequest.VolumeCapability.MountVolume. to equal %q; got: %q", t.expectedMountOptions, nodeStageMountOpts)
framework.ExpectEqual(nodePublishMountOpts, t.expectedMountOptions, "Expect NodePublishVolumeRequest.VolumeCapability.MountVolume.VolumeMountGroup to equal %q; got: %q", t.expectedMountOptions, nodeStageMountOpts)
// Skip scheduler, it would block scheduling the second pod with ReadWriteOncePod PV.
pod, err = m.cs.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("get the initial pod"))
nodeSelection := e2epod.NodeSelection{Name: pod.Spec.NodeName}
pod2, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, t.secondPodSELinuxOpts)
framework.ExpectNoError(err, "Failed to create second pod with SELinux context %s: %v", t.secondPodSELinuxOpts, err)
m.pods = append(m.pods, pod2)
// Delete the initial pod only after kubelet processes the second pod and adds its volumes to
// DesiredStateOfWorld.
// In this state, any volume UnPublish / UnStage must be done because of SELinux contexts and not
// because of random races because volumes of the second pod are not in DesiredStateOfWorld yet.
ginkgo.By("Waiting for the second pod to fail to start because of ReadWriteOncePod.")
eventSelector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.name": pod2.Name,
"involvedObject.namespace": pod2.Namespace,
"reason": events.FailedMountVolume,
}.AsSelector().String()
var msg string
if t.expectedUnstage {
// This message is emitted before kubelet checks for ReadWriteOncePod
msg = "conflicting SELinux labels of volume"
} else {
msg = "volume uses the ReadWriteOncePod access mode and is already in use by another pod"
}
err = e2eevents.WaitTimeoutForEvent(ctx, m.cs, pod2.Namespace, eventSelector, msg, f.Timeouts.PodStart)
framework.ExpectNoError(err, "faile to wait for event in the second test pod:", msg)
// count fresh CSI driver calls between the first and the second pod
nodeStageMountOpts = nil
nodePublishMountOpts = nil
unstageCalls.Store(0)
unpublishCalls.Store(0)
stageCalls.Store(0)
publishCalls.Store(0)
// Act 2nd part of the test
ginkgo.By("Deleting the initial pod")
err = e2epod.DeletePodWithWait(ctx, m.cs, pod)
framework.ExpectNoError(err, "failed to delete the initial pod")
// Assert 2nd part of the test
ginkgo.By("Waiting for the second pod to start")
err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod2.Name, pod2.Namespace)
framework.ExpectNoError(err, "failed to start the second pod")
ginkgo.By("Checking CSI driver calls for the second pod")
if t.expectedUnstage {
// Volume should be fully unstaged between the first and the second pod
gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeUnstage was not called for the second pod")
gomega.Expect(stageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeStage was not called for the second pod")
// The second pod got the right mount option
framework.ExpectEqual(nodeStageMountOpts, t.expectedSecondMountOptions, "Expect NodeStageVolumeRequest.VolumeCapability.MountVolume.MountFlags to equal %q; got: %q", t.expectedSecondMountOptions, nodeStageMountOpts)
} else {
// Volume should not be fully unstaged between the first and the second pod
gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnstage was unexpectedly called for the second pod")
gomega.Expect(stageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeStage was unexpectedly called for the second pod")
}
// In both cases, Unublish and Publish is called, with the right mount opts
gomega.Expect(unpublishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeUnpublish was not called for the second pod")
gomega.Expect(publishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodePublish was not called for the second pod")
framework.ExpectEqual(nodePublishMountOpts, t.expectedSecondMountOptions, "Expect NodePublishVolumeRequest.VolumeCapability.MountVolume.MountFlags to equal %q; got: %q", t.expectedSecondMountOptions, nodeStageMountOpts)
})
}
})