From 052f1fe8203fc95ae8293f3aafad2bda6cd8b5c7 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Wed, 26 Feb 2025 14:57:58 +0100 Subject: [PATCH] Update tests --- .../selinuxwarning/cache/volumecache_test.go | 49 ++---------- .../selinux_warning_controller_test.go | 76 ++++++++++++++++++- test/e2e/storage/csimock/csi_selinux_mount.go | 4 +- 3 files changed, 83 insertions(+), 46 deletions(-) diff --git a/pkg/controller/volume/selinuxwarning/cache/volumecache_test.go b/pkg/controller/volume/selinuxwarning/cache/volumecache_test.go index 8cddb3098c3..f951a082b2e 100644 --- a/pkg/controller/volume/selinuxwarning/cache/volumecache_test.go +++ b/pkg/controller/volume/selinuxwarning/cache/volumecache_test.go @@ -119,21 +119,21 @@ func TestVolumeCache_AddVolumeSendConflicts(t *testing.T) { podNamespace: "ns2", podName: "pod2-recursive", volumeName: "vol2", - label: "system_u:system_r:label2", + label: "", // labels on volumes with Recursive policy are cleared, we don't want the controller to report label conflicts on them changePolicy: v1.SELinuxChangePolicyRecursive, }, { podNamespace: "ns3", podName: "pod3-1", volumeName: "vol3", // vol3 is used by 2 pods with the same label + recursive policy - label: "system_u:system_r:label3", + label: "", // labels on volumes with Recursive policy are cleared, we don't want the controller to report label conflicts on them changePolicy: v1.SELinuxChangePolicyRecursive, }, { podNamespace: "ns3", podName: "pod3-2", volumeName: "vol3", // vol3 is used by 2 pods with the same label + recursive policy - label: "system_u:system_r:label3", + label: "", // labels on volumes with Recursive policy are cleared, we don't want the controller to report label conflicts on them changePolicy: v1.SELinuxChangePolicyRecursive, }, { @@ -244,13 +244,13 @@ func TestVolumeCache_AddVolumeSendConflicts(t *testing.T) { }, }, { - name: "existing volume in a new pod with new conflicting policy and existing label", + name: "existing volume in a new pod with new conflicting policy", initialPods: existingPods, podToAdd: podWithVolume{ podNamespace: "testns", podName: "testpod", volumeName: "vol1", - label: "system_u:system_r:label1", + label: "", changePolicy: v1.SELinuxChangePolicyRecursive, }, expectedConflicts: []Conflict{ @@ -264,35 +264,6 @@ func TestVolumeCache_AddVolumeSendConflicts(t *testing.T) { }, }, }, - { - name: "existing volume in a new pod with new conflicting policy and new conflicting label", - initialPods: existingPods, - podToAdd: podWithVolume{ - podNamespace: "testns", - podName: "testpod", - volumeName: "vol1", - label: "system_u:system_r:label-new", - changePolicy: v1.SELinuxChangePolicyRecursive, - }, - expectedConflicts: []Conflict{ - { - PropertyName: "SELinuxChangePolicy", - EventReason: "SELinuxChangePolicyConflict", - Pod: cache.ObjectName{Namespace: "testns", Name: "testpod"}, - PropertyValue: "Recursive", - OtherPod: cache.ObjectName{Namespace: "ns1", Name: "pod1-mountOption"}, - OtherPropertyValue: "MountOption", - }, - { - PropertyName: "SELinuxLabel", - EventReason: "SELinuxLabelConflict", - Pod: cache.ObjectName{Namespace: "testns", Name: "testpod"}, - PropertyValue: "system_u:system_r:label-new", - OtherPod: cache.ObjectName{Namespace: "ns1", Name: "pod1-mountOption"}, - OtherPropertyValue: "system_u:system_r:label1", - }, - }, - }, { name: "existing pod is replaced with different non-conflicting policy and label", initialPods: existingPods, @@ -307,7 +278,7 @@ func TestVolumeCache_AddVolumeSendConflicts(t *testing.T) { expectedConflicts: nil, }, { - name: "existing pod is replaced with conflicting policy and label", + name: "existing pod is replaced with conflicting policy", initialPods: existingPods, podToAdd: podWithVolume{ @@ -326,14 +297,6 @@ func TestVolumeCache_AddVolumeSendConflicts(t *testing.T) { OtherPod: cache.ObjectName{Namespace: "ns3", Name: "pod3-2"}, OtherPropertyValue: "Recursive", }, - { - PropertyName: "SELinuxLabel", - EventReason: "SELinuxLabelConflict", - Pod: cache.ObjectName{Namespace: "ns3", Name: "pod3-1"}, - PropertyValue: "system_u:system_r:label-new", - OtherPod: cache.ObjectName{Namespace: "ns3", Name: "pod3-2"}, - OtherPropertyValue: "system_u:system_r:label3", - }, }, }, { diff --git a/pkg/controller/volume/selinuxwarning/selinux_warning_controller_test.go b/pkg/controller/volume/selinuxwarning/selinux_warning_controller_test.go index f61a26a04c1..3f863cac8ab 100644 --- a/pkg/controller/volume/selinuxwarning/selinux_warning_controller_test.go +++ b/pkg/controller/volume/selinuxwarning/selinux_warning_controller_test.go @@ -25,14 +25,17 @@ import ( storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" + featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/klog/v2" "k8s.io/klog/v2/ktesting" "k8s.io/kubernetes/pkg/controller" volumecache "k8s.io/kubernetes/pkg/controller/volume/selinuxwarning/cache" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume" volumetesting "k8s.io/kubernetes/pkg/volume/testing" "k8s.io/utils/ptr" @@ -117,7 +120,7 @@ func TestSELinuxWarningController_Sync(t *testing.T) { { volumeName: "fake-plugin/pv1", podKey: cache.ObjectName{Namespace: namespace, Name: "pod1"}, - label: ":::s0:c1,c2", + label: "", // Label is cleared with the Recursive policy changePolicy: v1.SELinuxChangePolicyRecursive, csiDriver: "ebs.csi.aws.com", // The PV is a fake EBS volume }, @@ -221,6 +224,75 @@ func TestSELinuxWarningController_Sync(t *testing.T) { `Normal SELinuxLabelConflict SELinuxLabel ":::s0:c98,c99" conflicts with pod pod1 that uses the same volume as this pod with SELinuxLabel ":::s0:c1,c2". If both pods land on the same node, only one of them may access the volume.`, }, }, + { + name: "existing pod with Recursive policy does not generate conflicts", + existingPVCs: []*v1.PersistentVolumeClaim{ + pvcBoundToPV("pv1", "pvc1"), + }, + existingPVs: []*v1.PersistentVolume{ + pvBoundToPVC("pv1", "pvc1"), + }, + existingPods: []*v1.Pod{ + podWithPVC("pod1", "s0:c1,c2", ptr.To(v1.SELinuxChangePolicyRecursive), "pvc1", "vol1"), + pod("pod2", "s0:c98,c99", ptr.To(v1.SELinuxChangePolicyRecursive)), + }, + pod: cache.ObjectName{Namespace: namespace, Name: "pod1"}, + conflicts: []volumecache.Conflict{}, + expectedAddedVolumes: []addedVolume{ + { + volumeName: "fake-plugin/pv1", + podKey: cache.ObjectName{Namespace: namespace, Name: "pod1"}, + label: "", // Label is cleared with the Recursive policy + changePolicy: v1.SELinuxChangePolicyRecursive, + csiDriver: "ebs.csi.aws.com", // The PV is a fake EBS volume + }, + }, + }, + { + name: "existing pod with Recursive policy does not conflict with pod with MountOption policy label, only with the policy", + existingPVCs: []*v1.PersistentVolumeClaim{ + pvcBoundToPV("pv1", "pvc1"), + }, + existingPVs: []*v1.PersistentVolume{ + pvBoundToPVC("pv1", "pvc1"), + }, + existingPods: []*v1.Pod{ + podWithPVC("pod1", "s0:c1,c2", ptr.To(v1.SELinuxChangePolicyRecursive), "pvc1", "vol1"), + podWithPVC("pod2", "s0:c98,c99", ptr.To(v1.SELinuxChangePolicyMountOption), "pvc1", "vol1"), + }, + pod: cache.ObjectName{Namespace: namespace, Name: "pod1"}, + conflicts: []volumecache.Conflict{ + { + PropertyName: "SELinuxChangePolicy", + EventReason: "SELinuxChangePolicyConflict", + Pod: cache.ObjectName{Namespace: namespace, Name: "pod1"}, + PropertyValue: string(v1.SELinuxChangePolicyRecursive), + OtherPod: cache.ObjectName{Namespace: namespace, Name: "pod2"}, + OtherPropertyValue: string(v1.SELinuxChangePolicyMountOption), + }, + { + PropertyName: "SELinuxChangePolicy", + EventReason: "SELinuxChangePolicyConflict", + Pod: cache.ObjectName{Namespace: namespace, Name: "pod2"}, + PropertyValue: string(v1.SELinuxChangePolicyMountOption), + OtherPod: cache.ObjectName{Namespace: namespace, Name: "pod1"}, + OtherPropertyValue: string(v1.SELinuxChangePolicyRecursive), + }, + }, + expectedAddedVolumes: []addedVolume{ + { + volumeName: "fake-plugin/pv1", + podKey: cache.ObjectName{Namespace: namespace, Name: "pod1"}, + label: "", // Label is cleared with the Recursive policy + changePolicy: v1.SELinuxChangePolicyRecursive, + csiDriver: "ebs.csi.aws.com", // The PV is a fake EBS volume + }, + }, + expectedEvents: []string{ + `Normal SELinuxChangePolicyConflict SELinuxChangePolicy "Recursive" conflicts with pod pod2 that uses the same volume as this pod with SELinuxChangePolicy "MountOption". If both pods land on the same node, only one of them may access the volume.`, + `Normal SELinuxChangePolicyConflict SELinuxChangePolicy "MountOption" conflicts with pod pod1 that uses the same volume as this pod with SELinuxChangePolicy "Recursive". If both pods land on the same node, only one of them may access the volume.`, + }, + }, { name: "existing pod with PVC generates conflict, the other pod doesn't exist", existingPVCs: []*v1.PersistentVolumeClaim{ @@ -281,6 +353,8 @@ func TestSELinuxWarningController_Sync(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SELinuxChangePolicy, true) + _, ctx := ktesting.NewTestContext(t) _, plugin := volumetesting.GetTestKubeletVolumePluginMgr(t) plugin.SupportsSELinux = true diff --git a/test/e2e/storage/csimock/csi_selinux_mount.go b/test/e2e/storage/csimock/csi_selinux_mount.go index d2fddeb421b..25a1e3d78ad 100644 --- a/test/e2e/storage/csimock/csi_selinux_mount.go +++ b/test/e2e/storage/csimock/csi_selinux_mount.go @@ -506,7 +506,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC volumeMode: v1.ReadWriteOnce, waitForSecondPodStart: true, expectNodeIncreases: sets.New[string]( /* no metric is increased, admitted_total was already increased when the first pod started */ ), - expectControllerConflictProperty: "SELinuxLabel", /* SELinuxController does emit a warning for Recursive policy, while kubelet does not! */ + expectControllerConflictProperty: "", /* SELinuxController does not emit any warning either */ testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMountReadWriteOncePod), framework.WithFeatureGate(features.SELinuxChangePolicy), feature.SELinuxMountReadWriteOncePodOnly}, }, { @@ -595,7 +595,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC volumeMode: v1.ReadWriteMany, waitForSecondPodStart: true, expectNodeIncreases: sets.New[string]( /* no metric is increased, admitted_total was already increased when the first pod started */ ), - expectControllerConflictProperty: "SELinuxLabel", /* SELinuxController does emit a warning for Recursive policy, while kubelet does not! */ + expectControllerConflictProperty: "", /* SELinuxController does not emit any warning either */ testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMountReadWriteOncePod), framework.WithFeatureGate(features.SELinuxChangePolicy), framework.WithFeatureGate(features.SELinuxMount)}, }, {