From e1f62a9bc56dcb6fd7eee7d5fbe90a26b0f2a943 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Wed, 9 Nov 2022 14:17:34 +0100 Subject: [PATCH 1/5] Add multi-pod tests with SELinux mounts Check that a volume is fully unmounted (Unstaged) before kubelet starts a pod with the same volume, but with a different SELinux context. --- test/e2e/storage/csi_mock/base.go | 17 +- .../e2e/storage/csi_mock/csi_selinux_mount.go | 210 ++++++++++++++---- 2 files changed, 180 insertions(+), 47 deletions(-) diff --git a/test/e2e/storage/csi_mock/base.go b/test/e2e/storage/csi_mock/base.go index 4f198097d2a..cbbf1a484e3 100644 --- a/test/e2e/storage/csi_mock/base.go +++ b/test/e2e/storage/csi_mock/base.go @@ -664,6 +664,11 @@ func startPausePodWithSELinuxOptions(cs clientset.Interface, pvc *v1.PersistentV }, }, } + if node.Name != "" { + // Force schedule the pod to skip scheduler RWOP checks + framework.Logf("Forcing node name %s", node.Name) + pod.Spec.NodeName = node.Name + } e2epod.SetNodeSelection(&pod.Spec, node) return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) } @@ -821,11 +826,12 @@ func compareCSICalls(ctx context.Context, trackedCalls []string, expectedCallSeq // createSELinuxMountPreHook creates a hook that records the mountOptions passed in // through NodeStageVolume and NodePublishVolume calls. -func createSELinuxMountPreHook(nodeStageMountOpts, nodePublishMountOpts *[]string) *drivers.Hooks { +func createSELinuxMountPreHook(nodeStageMountOpts, nodePublishMountOpts *[]string, stageCalls, unstageCalls, publishCalls, unpublishCalls *atomic.Int32) *drivers.Hooks { return &drivers.Hooks{ Pre: func(ctx context.Context, fullMethod string, request interface{}) (reply interface{}, err error) { nodeStageRequest, ok := request.(*csipbv1.NodeStageVolumeRequest) if ok { + stageCalls.Add(1) mountVolume := nodeStageRequest.GetVolumeCapability().GetMount() if mountVolume != nil { *nodeStageMountOpts = mountVolume.MountFlags @@ -833,11 +839,20 @@ func createSELinuxMountPreHook(nodeStageMountOpts, nodePublishMountOpts *[]strin } nodePublishRequest, ok := request.(*csipbv1.NodePublishVolumeRequest) if ok { + publishCalls.Add(1) mountVolume := nodePublishRequest.GetVolumeCapability().GetMount() if mountVolume != nil { *nodePublishMountOpts = mountVolume.MountFlags } } + _, ok = request.(*csipbv1.NodeUnstageVolumeRequest) + if ok { + unstageCalls.Add(1) + } + _, ok = request.(*csipbv1.NodeUnpublishVolumeRequest) + if ok { + unpublishCalls.Add(1) + } return nil, nil }, } diff --git a/test/e2e/storage/csi_mock/csi_selinux_mount.go b/test/e2e/storage/csi_mock/csi_selinux_mount.go index d6c6e4265d1..4a4c8f8184c 100644 --- a/test/e2e/storage/csi_mock/csi_selinux_mount.go +++ b/test/e2e/storage/csi_mock/csi_selinux_mount.go @@ -18,10 +18,17 @@ package csi_mock import ( "context" + "fmt" + "sync/atomic" "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/test/e2e/framework" + e2eevents "k8s.io/kubernetes/test/e2e/framework/events" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -35,57 +42,92 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() { ginkgo.Context("SELinuxMount [LinuxOnly][Feature:SELinux][Feature:SELinuxMountReadWriteOncePod]", func() { // Make sure all options are set so system specific defaults are not used. - seLinuxOpts := v1.SELinuxOptions{ + seLinuxOpts1 := v1.SELinuxOptions{ User: "system_u", Role: "object_r", Type: "container_file_t", Level: "s0:c0,c1", } - seLinuxMountOption := "context=\"system_u:object_r:container_file_t:s0:c0,c1\"" + seLinuxMountOption1 := "context=\"system_u:object_r:container_file_t:s0:c0,c1\"" + seLinuxOpts2 := v1.SELinuxOptions{ + User: "system_u", + Role: "object_r", + Type: "container_file_t", + Level: "s0:c98,c99", + } + seLinuxMountOption2 := "context=\"system_u:object_r:container_file_t:s0:c98,c99\"" tests := []struct { - name string - seLinuxEnabled bool - seLinuxSetInPod bool - mountOptions []string - volumeMode v1.PersistentVolumeAccessMode - expectedMountOptions []string + name string + csiDriverSELinuxEnabled bool + firstPodSELinuxOpts *v1.SELinuxOptions + startSecondPod bool + secondPodSELinuxOpts *v1.SELinuxOptions + mountOptions []string + volumeMode v1.PersistentVolumeAccessMode + expectedFirstMountOptions []string + expectedSecondMountOptions []string + expectedUnstage bool }{ + // Start just a single pod and check its volume is mounted correctly { - name: "should pass SELinux mount option for RWOP volume and Pod with SELinux context set", - seLinuxEnabled: true, - seLinuxSetInPod: true, - volumeMode: v1.ReadWriteOncePod, - expectedMountOptions: []string{seLinuxMountOption}, + name: "should pass SELinux mount option for RWOP volume and Pod with SELinux context set", + csiDriverSELinuxEnabled: true, + firstPodSELinuxOpts: &seLinuxOpts1, + volumeMode: v1.ReadWriteOncePod, + expectedFirstMountOptions: []string{seLinuxMountOption1}, }, { - name: "should add SELinux mount option to existing mount options", - seLinuxEnabled: true, - seLinuxSetInPod: true, - mountOptions: []string{"noexec", "noatime"}, - volumeMode: v1.ReadWriteOncePod, - expectedMountOptions: []string{"noexec", "noatime", seLinuxMountOption}, + name: "should add SELinux mount option to existing mount options", + csiDriverSELinuxEnabled: true, + firstPodSELinuxOpts: &seLinuxOpts1, + mountOptions: []string{"noexec", "noatime"}, + volumeMode: v1.ReadWriteOncePod, + expectedFirstMountOptions: []string{"noexec", "noatime", seLinuxMountOption1}, }, { - name: "should not pass SELinux mount option for RWO volume", - seLinuxEnabled: true, - seLinuxSetInPod: true, - volumeMode: v1.ReadWriteOnce, - expectedMountOptions: nil, + name: "should not pass SELinux mount option for RWO volume", + csiDriverSELinuxEnabled: true, + firstPodSELinuxOpts: &seLinuxOpts1, + volumeMode: v1.ReadWriteOnce, + expectedFirstMountOptions: nil, }, { - name: "should not pass SELinux mount option for Pod without SELinux context", - seLinuxEnabled: true, - seLinuxSetInPod: false, - volumeMode: v1.ReadWriteOncePod, - expectedMountOptions: nil, + name: "should not pass SELinux mount option for Pod without SELinux context", + csiDriverSELinuxEnabled: true, + firstPodSELinuxOpts: nil, + volumeMode: v1.ReadWriteOncePod, + expectedFirstMountOptions: nil, }, { - name: "should not pass SELinux mount option for CSI driver that does not support SELinux mount", - seLinuxEnabled: false, - seLinuxSetInPod: true, - volumeMode: v1.ReadWriteOncePod, - expectedMountOptions: nil, + name: "should not pass SELinux mount option for CSI driver that does not support SELinux mount", + csiDriverSELinuxEnabled: false, + firstPodSELinuxOpts: &seLinuxOpts1, + volumeMode: v1.ReadWriteOncePod, + expectedFirstMountOptions: nil, + }, + // Start two pods in a sequence and check their volume is / is not unmounted in between + { + name: "should not unstage volume when starting a second pod with the same SELinux context", + csiDriverSELinuxEnabled: true, + firstPodSELinuxOpts: &seLinuxOpts1, + startSecondPod: true, + secondPodSELinuxOpts: &seLinuxOpts1, + volumeMode: v1.ReadWriteOncePod, + expectedFirstMountOptions: []string{seLinuxMountOption1}, + expectedSecondMountOptions: []string{seLinuxMountOption1}, + expectedUnstage: false, + }, + { + name: "should unstage volume when starting a second pod with different SELinux context", + csiDriverSELinuxEnabled: true, + firstPodSELinuxOpts: &seLinuxOpts1, + startSecondPod: true, + secondPodSELinuxOpts: &seLinuxOpts2, + volumeMode: v1.ReadWriteOncePod, + expectedFirstMountOptions: []string{seLinuxMountOption1}, + expectedSecondMountOptions: []string{seLinuxMountOption2}, + expectedUnstage: true, }, } for _, t := range tests { @@ -95,27 +137,103 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() { e2eskipper.Skipf("SELinuxMount is only applied on linux nodes -- skipping") } var nodeStageMountOpts, nodePublishMountOpts []string + var unstageCalls, stageCalls, unpublishCalls, publishCalls atomic.Int32 m.init(ctx, testParameters{ disableAttach: true, registerDriver: true, - enableSELinuxMount: &t.seLinuxEnabled, - hooks: createSELinuxMountPreHook(&nodeStageMountOpts, &nodePublishMountOpts), + enableSELinuxMount: &t.csiDriverSELinuxEnabled, + hooks: createSELinuxMountPreHook(&nodeStageMountOpts, &nodePublishMountOpts, &stageCalls, &unstageCalls, &publishCalls, &unpublishCalls), }) - ginkgo.DeferCleanup(m.cleanup) + defer m.cleanup(ctx) + // Act + ginkgo.By("Starting the initial pod") accessModes := []v1.PersistentVolumeAccessMode{t.volumeMode} - var podSELinuxOpts *v1.SELinuxOptions - if t.seLinuxSetInPod { - // Make sure all options are set so system specific defaults are not used. - podSELinuxOpts = &seLinuxOpts + _, claim, pod := m.createPodWithSELinux(ctx, accessModes, t.mountOptions, t.firstPodSELinuxOpts) + err := e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) + framework.ExpectNoError(err, "failed to start the initial pod") + + // Assert + ginkgo.By("Checking the initial pod mount options") + framework.ExpectEqual(nodeStageMountOpts, t.expectedFirstMountOptions, "Expect NodeStageVolumeRequest.VolumeCapability.MountVolume.MountFlags to equal %q; got: %q", t.expectedFirstMountOptions, nodeStageMountOpts) + framework.ExpectEqual(nodePublishMountOpts, t.expectedFirstMountOptions, "Expect NodePublishVolumeRequest.VolumeCapability.MountVolume.MountFlags to equal %q; got: %q", t.expectedFirstMountOptions, nodeStageMountOpts) + + ginkgo.By("Checking the CSI driver calls for the initial pod") + gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnstage was unexpectedly called for the initial pod") + gomega.Expect(unpublishCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnpublish was unexpectedly called for the initial pod") + gomega.Expect(stageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeStage was not called for the initial pod") + gomega.Expect(publishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodePublish was not called for the initial pod") + + if !t.startSecondPod { + return } - _, _, pod := m.createPodWithSELinux(ctx, accessModes, t.mountOptions, podSELinuxOpts) - err := e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) - framework.ExpectNoError(err, "failed to start pod") + // Arrange 2nd part of the test + ginkgo.By("Starting the second pod to check if a volume used by the initial pod is / is not unmounted based on SELinux context") - framework.ExpectEqual(nodeStageMountOpts, t.expectedMountOptions, "Expect NodeStageVolumeRequest.VolumeCapability.MountVolume. to equal %q; got: %q", t.expectedMountOptions, nodeStageMountOpts) - framework.ExpectEqual(nodePublishMountOpts, t.expectedMountOptions, "Expect NodePublishVolumeRequest.VolumeCapability.MountVolume.VolumeMountGroup to equal %q; got: %q", t.expectedMountOptions, nodeStageMountOpts) + // Skip scheduler, it would block scheduling the second pod with ReadWriteOncePod PV. + pod, err = m.cs.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) + framework.ExpectNoError(err, fmt.Sprintf("get the initial pod")) + nodeSelection := e2epod.NodeSelection{Name: pod.Spec.NodeName} + pod2, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, t.secondPodSELinuxOpts) + framework.ExpectNoError(err, "Failed to create second pod with SELinux context %s: %v", t.secondPodSELinuxOpts, err) + m.pods = append(m.pods, pod2) + + // Delete the initial pod only after kubelet processes the second pod and adds its volumes to + // DesiredStateOfWorld. + // In this state, any volume UnPublish / UnStage must be done because of SELinux contexts and not + // because of random races because volumes of the second pod are not in DesiredStateOfWorld yet. + ginkgo.By("Waiting for the second pod to fail to start because of ReadWriteOncePod.") + eventSelector := fields.Set{ + "involvedObject.kind": "Pod", + "involvedObject.name": pod2.Name, + "involvedObject.namespace": pod2.Namespace, + "reason": events.FailedMountVolume, + }.AsSelector().String() + var msg string + if t.expectedUnstage { + // This message is emitted before kubelet checks for ReadWriteOncePod + msg = "conflicting SELinux labels of volume" + } else { + msg = "volume uses the ReadWriteOncePod access mode and is already in use by another pod" + } + err = e2eevents.WaitTimeoutForEvent(ctx, m.cs, pod2.Namespace, eventSelector, msg, f.Timeouts.PodStart) + framework.ExpectNoError(err, "faile to wait for event in the second test pod:", msg) + + // count fresh CSI driver calls between the first and the second pod + nodeStageMountOpts = nil + nodePublishMountOpts = nil + unstageCalls.Store(0) + unpublishCalls.Store(0) + stageCalls.Store(0) + publishCalls.Store(0) + + // Act 2nd part of the test + ginkgo.By("Deleting the initial pod") + err = e2epod.DeletePodWithWait(ctx, m.cs, pod) + framework.ExpectNoError(err, "failed to delete the initial pod") + + // Assert 2nd part of the test + ginkgo.By("Waiting for the second pod to start") + err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod2.Name, pod2.Namespace) + framework.ExpectNoError(err, "failed to start the second pod") + + ginkgo.By("Checking CSI driver calls for the second pod") + if t.expectedUnstage { + // Volume should be fully unstaged between the first and the second pod + gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeUnstage was not called for the second pod") + gomega.Expect(stageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeStage was not called for the second pod") + // The second pod got the right mount option + framework.ExpectEqual(nodeStageMountOpts, t.expectedSecondMountOptions, "Expect NodeStageVolumeRequest.VolumeCapability.MountVolume.MountFlags to equal %q; got: %q", t.expectedSecondMountOptions, nodeStageMountOpts) + } else { + // Volume should not be fully unstaged between the first and the second pod + gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnstage was unexpectedly called for the second pod") + gomega.Expect(stageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeStage was unexpectedly called for the second pod") + } + // In both cases, Unublish and Publish is called, with the right mount opts + gomega.Expect(unpublishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeUnpublish was not called for the second pod") + gomega.Expect(publishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodePublish was not called for the second pod") + framework.ExpectEqual(nodePublishMountOpts, t.expectedSecondMountOptions, "Expect NodePublishVolumeRequest.VolumeCapability.MountVolume.MountFlags to equal %q; got: %q", t.expectedSecondMountOptions, nodeStageMountOpts) }) } }) From 03beb014f983418933a4a463b286f8a38e17b4f1 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Mon, 9 Jan 2023 13:53:28 +0100 Subject: [PATCH 2/5] Update error context messages --- .../e2e/storage/csi_mock/csi_selinux_mount.go | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/test/e2e/storage/csi_mock/csi_selinux_mount.go b/test/e2e/storage/csi_mock/csi_selinux_mount.go index 4a4c8f8184c..297dd09b06a 100644 --- a/test/e2e/storage/csi_mock/csi_selinux_mount.go +++ b/test/e2e/storage/csi_mock/csi_selinux_mount.go @@ -151,18 +151,18 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() { accessModes := []v1.PersistentVolumeAccessMode{t.volumeMode} _, claim, pod := m.createPodWithSELinux(ctx, accessModes, t.mountOptions, t.firstPodSELinuxOpts) err := e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) - framework.ExpectNoError(err, "failed to start the initial pod") + framework.ExpectNoError(err, "starting the initial pod") // Assert ginkgo.By("Checking the initial pod mount options") - framework.ExpectEqual(nodeStageMountOpts, t.expectedFirstMountOptions, "Expect NodeStageVolumeRequest.VolumeCapability.MountVolume.MountFlags to equal %q; got: %q", t.expectedFirstMountOptions, nodeStageMountOpts) - framework.ExpectEqual(nodePublishMountOpts, t.expectedFirstMountOptions, "Expect NodePublishVolumeRequest.VolumeCapability.MountVolume.MountFlags to equal %q; got: %q", t.expectedFirstMountOptions, nodeStageMountOpts) + framework.ExpectEqual(nodeStageMountOpts, t.expectedFirstMountOptions, "NodeStage MountFlags for the initial pod") + framework.ExpectEqual(nodePublishMountOpts, t.expectedFirstMountOptions, "NodePublish MountFlags for the initial pod") ginkgo.By("Checking the CSI driver calls for the initial pod") - gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnstage was unexpectedly called for the initial pod") - gomega.Expect(unpublishCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnpublish was unexpectedly called for the initial pod") - gomega.Expect(stageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeStage was not called for the initial pod") - gomega.Expect(publishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodePublish was not called for the initial pod") + gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnstage call count for the initial pod") + gomega.Expect(unpublishCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnpublish call count for the initial pod") + gomega.Expect(stageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeStage for the initial pod") + gomega.Expect(publishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodePublish for the initial pod") if !t.startSecondPod { return @@ -173,10 +173,10 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() { // Skip scheduler, it would block scheduling the second pod with ReadWriteOncePod PV. pod, err = m.cs.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) - framework.ExpectNoError(err, fmt.Sprintf("get the initial pod")) + framework.ExpectNoError(err, fmt.Sprintf("getting the initial pod")) nodeSelection := e2epod.NodeSelection{Name: pod.Spec.NodeName} pod2, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, t.secondPodSELinuxOpts) - framework.ExpectNoError(err, "Failed to create second pod with SELinux context %s: %v", t.secondPodSELinuxOpts, err) + framework.ExpectNoError(err, "creating second pod with SELinux context %s", t.secondPodSELinuxOpts) m.pods = append(m.pods, pod2) // Delete the initial pod only after kubelet processes the second pod and adds its volumes to @@ -198,7 +198,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() { msg = "volume uses the ReadWriteOncePod access mode and is already in use by another pod" } err = e2eevents.WaitTimeoutForEvent(ctx, m.cs, pod2.Namespace, eventSelector, msg, f.Timeouts.PodStart) - framework.ExpectNoError(err, "faile to wait for event in the second test pod:", msg) + framework.ExpectNoError(err, "waiting for event %q in the second test pod", msg) // count fresh CSI driver calls between the first and the second pod nodeStageMountOpts = nil @@ -211,29 +211,29 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() { // Act 2nd part of the test ginkgo.By("Deleting the initial pod") err = e2epod.DeletePodWithWait(ctx, m.cs, pod) - framework.ExpectNoError(err, "failed to delete the initial pod") + framework.ExpectNoError(err, "deleting the initial pod") // Assert 2nd part of the test ginkgo.By("Waiting for the second pod to start") err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod2.Name, pod2.Namespace) - framework.ExpectNoError(err, "failed to start the second pod") + framework.ExpectNoError(err, "starting the second pod") ginkgo.By("Checking CSI driver calls for the second pod") if t.expectedUnstage { // Volume should be fully unstaged between the first and the second pod - gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeUnstage was not called for the second pod") - gomega.Expect(stageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeStage was not called for the second pod") + gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeUnstage calls after the first pod is deleted") + gomega.Expect(stageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeStage calls for the second pod") // The second pod got the right mount option - framework.ExpectEqual(nodeStageMountOpts, t.expectedSecondMountOptions, "Expect NodeStageVolumeRequest.VolumeCapability.MountVolume.MountFlags to equal %q; got: %q", t.expectedSecondMountOptions, nodeStageMountOpts) + framework.ExpectEqual(nodeStageMountOpts, t.expectedSecondMountOptions, "NodeStage MountFlags for the second pod") } else { // Volume should not be fully unstaged between the first and the second pod - gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnstage was unexpectedly called for the second pod") - gomega.Expect(stageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeStage was unexpectedly called for the second pod") + gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnstage calls after the first pod is deleted") + gomega.Expect(stageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeStage calls for the second pod") } // In both cases, Unublish and Publish is called, with the right mount opts - gomega.Expect(unpublishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeUnpublish was not called for the second pod") - gomega.Expect(publishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodePublish was not called for the second pod") - framework.ExpectEqual(nodePublishMountOpts, t.expectedSecondMountOptions, "Expect NodePublishVolumeRequest.VolumeCapability.MountVolume.MountFlags to equal %q; got: %q", t.expectedSecondMountOptions, nodeStageMountOpts) + gomega.Expect(unpublishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeUnpublish calls after the first pod is deleted") + gomega.Expect(publishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodePublish calls for the second pod") + framework.ExpectEqual(nodePublishMountOpts, t.expectedSecondMountOptions, "NodePublish MountFlags for the second pod") }) } }) From de4ce7b58ca771c95cf7f7a44394c2dbe94498f1 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 Jan 2023 11:21:44 +0100 Subject: [PATCH 3/5] Remove defer Replace it with ginkgo.DeferCleanup --- test/e2e/storage/csi_mock/csi_selinux_mount.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/storage/csi_mock/csi_selinux_mount.go b/test/e2e/storage/csi_mock/csi_selinux_mount.go index 297dd09b06a..fdbcee979d0 100644 --- a/test/e2e/storage/csi_mock/csi_selinux_mount.go +++ b/test/e2e/storage/csi_mock/csi_selinux_mount.go @@ -144,7 +144,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() { enableSELinuxMount: &t.csiDriverSELinuxEnabled, hooks: createSELinuxMountPreHook(&nodeStageMountOpts, &nodePublishMountOpts, &stageCalls, &unstageCalls, &publishCalls, &unpublishCalls), }) - defer m.cleanup(ctx) + ginkgo.DeferCleanup(m.cleanup) // Act ginkgo.By("Starting the initial pod") From d2bb866d3f4a630a28900c870cf274141154b1e8 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 Jan 2023 11:22:24 +0100 Subject: [PATCH 4/5] Remove useless Sprintf --- test/e2e/storage/csi_mock/csi_selinux_mount.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/e2e/storage/csi_mock/csi_selinux_mount.go b/test/e2e/storage/csi_mock/csi_selinux_mount.go index fdbcee979d0..b6b74c78c69 100644 --- a/test/e2e/storage/csi_mock/csi_selinux_mount.go +++ b/test/e2e/storage/csi_mock/csi_selinux_mount.go @@ -18,7 +18,6 @@ package csi_mock import ( "context" - "fmt" "sync/atomic" "github.com/onsi/ginkgo/v2" @@ -173,7 +172,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() { // Skip scheduler, it would block scheduling the second pod with ReadWriteOncePod PV. pod, err = m.cs.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) - framework.ExpectNoError(err, fmt.Sprintf("getting the initial pod")) + framework.ExpectNoError(err, "getting the initial pod") nodeSelection := e2epod.NodeSelection{Name: pod.Spec.NodeName} pod2, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, t.secondPodSELinuxOpts) framework.ExpectNoError(err, "creating second pod with SELinux context %s", t.secondPodSELinuxOpts) From b9c244956959abe2e013c3b2cf1b3bd520326309 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 17 Jan 2023 11:23:09 +0100 Subject: [PATCH 5/5] Rework createSELinuxMountPreHook to switch() --- test/e2e/storage/csi_mock/base.go | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/test/e2e/storage/csi_mock/base.go b/test/e2e/storage/csi_mock/base.go index cbbf1a484e3..0d56cb61f6d 100644 --- a/test/e2e/storage/csi_mock/base.go +++ b/test/e2e/storage/csi_mock/base.go @@ -829,28 +829,22 @@ func compareCSICalls(ctx context.Context, trackedCalls []string, expectedCallSeq func createSELinuxMountPreHook(nodeStageMountOpts, nodePublishMountOpts *[]string, stageCalls, unstageCalls, publishCalls, unpublishCalls *atomic.Int32) *drivers.Hooks { return &drivers.Hooks{ Pre: func(ctx context.Context, fullMethod string, request interface{}) (reply interface{}, err error) { - nodeStageRequest, ok := request.(*csipbv1.NodeStageVolumeRequest) - if ok { + switch req := request.(type) { + case *csipbv1.NodeStageVolumeRequest: stageCalls.Add(1) - mountVolume := nodeStageRequest.GetVolumeCapability().GetMount() + mountVolume := req.GetVolumeCapability().GetMount() if mountVolume != nil { *nodeStageMountOpts = mountVolume.MountFlags } - } - nodePublishRequest, ok := request.(*csipbv1.NodePublishVolumeRequest) - if ok { + case *csipbv1.NodePublishVolumeRequest: publishCalls.Add(1) - mountVolume := nodePublishRequest.GetVolumeCapability().GetMount() + mountVolume := req.GetVolumeCapability().GetMount() if mountVolume != nil { *nodePublishMountOpts = mountVolume.MountFlags } - } - _, ok = request.(*csipbv1.NodeUnstageVolumeRequest) - if ok { + case *csipbv1.NodeUnstageVolumeRequest: unstageCalls.Add(1) - } - _, ok = request.(*csipbv1.NodeUnpublishVolumeRequest) - if ok { + case *csipbv1.NodeUnpublishVolumeRequest: unpublishCalls.Add(1) } return nil, nil