Merge pull request #130472 from jsafrane/selinux-controller-ignore-recursive

selinux: Ignore pods with Recursive policy
This commit is contained in:
Kubernetes Prow Robot 2025-03-03 14:29:56 -08:00 committed by GitHub
commit df030f3851
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 98 additions and 57 deletions

View File

@ -119,21 +119,21 @@ func TestVolumeCache_AddVolumeSendConflicts(t *testing.T) {
podNamespace: "ns2",
podName: "pod2-recursive",
volumeName: "vol2",
label: "system_u:system_r:label2",
label: "", // labels on volumes with Recursive policy are cleared, we don't want the controller to report label conflicts on them
changePolicy: v1.SELinuxChangePolicyRecursive,
},
{
podNamespace: "ns3",
podName: "pod3-1",
volumeName: "vol3", // vol3 is used by 2 pods with the same label + recursive policy
label: "system_u:system_r:label3",
label: "", // labels on volumes with Recursive policy are cleared, we don't want the controller to report label conflicts on them
changePolicy: v1.SELinuxChangePolicyRecursive,
},
{
podNamespace: "ns3",
podName: "pod3-2",
volumeName: "vol3", // vol3 is used by 2 pods with the same label + recursive policy
label: "system_u:system_r:label3",
label: "", // labels on volumes with Recursive policy are cleared, we don't want the controller to report label conflicts on them
changePolicy: v1.SELinuxChangePolicyRecursive,
},
{
@ -244,13 +244,13 @@ func TestVolumeCache_AddVolumeSendConflicts(t *testing.T) {
},
},
{
name: "existing volume in a new pod with new conflicting policy and existing label",
name: "existing volume in a new pod with new conflicting policy",
initialPods: existingPods,
podToAdd: podWithVolume{
podNamespace: "testns",
podName: "testpod",
volumeName: "vol1",
label: "system_u:system_r:label1",
label: "",
changePolicy: v1.SELinuxChangePolicyRecursive,
},
expectedConflicts: []Conflict{
@ -264,35 +264,6 @@ func TestVolumeCache_AddVolumeSendConflicts(t *testing.T) {
},
},
},
{
name: "existing volume in a new pod with new conflicting policy and new conflicting label",
initialPods: existingPods,
podToAdd: podWithVolume{
podNamespace: "testns",
podName: "testpod",
volumeName: "vol1",
label: "system_u:system_r:label-new",
changePolicy: v1.SELinuxChangePolicyRecursive,
},
expectedConflicts: []Conflict{
{
PropertyName: "SELinuxChangePolicy",
EventReason: "SELinuxChangePolicyConflict",
Pod: cache.ObjectName{Namespace: "testns", Name: "testpod"},
PropertyValue: "Recursive",
OtherPod: cache.ObjectName{Namespace: "ns1", Name: "pod1-mountOption"},
OtherPropertyValue: "MountOption",
},
{
PropertyName: "SELinuxLabel",
EventReason: "SELinuxLabelConflict",
Pod: cache.ObjectName{Namespace: "testns", Name: "testpod"},
PropertyValue: "system_u:system_r:label-new",
OtherPod: cache.ObjectName{Namespace: "ns1", Name: "pod1-mountOption"},
OtherPropertyValue: "system_u:system_r:label1",
},
},
},
{
name: "existing pod is replaced with different non-conflicting policy and label",
initialPods: existingPods,
@ -307,7 +278,7 @@ func TestVolumeCache_AddVolumeSendConflicts(t *testing.T) {
expectedConflicts: nil,
},
{
name: "existing pod is replaced with conflicting policy and label",
name: "existing pod is replaced with conflicting policy",
initialPods: existingPods,
podToAdd: podWithVolume{
@ -326,14 +297,6 @@ func TestVolumeCache_AddVolumeSendConflicts(t *testing.T) {
OtherPod: cache.ObjectName{Namespace: "ns3", Name: "pod3-2"},
OtherPropertyValue: "Recursive",
},
{
PropertyName: "SELinuxLabel",
EventReason: "SELinuxLabelConflict",
Pod: cache.ObjectName{Namespace: "ns3", Name: "pod3-1"},
PropertyValue: "system_u:system_r:label-new",
OtherPod: cache.ObjectName{Namespace: "ns3", Name: "pod3-2"},
OtherPropertyValue: "system_u:system_r:label3",
},
},
},
{

View File

@ -451,10 +451,9 @@ func (c *Controller) syncPod(ctx context.Context, pod *v1.Pod) error {
continue
}
// Ignore how the volume is going to be mounted.
// Report any errors when a volume is used by two pods with different SELinux labels regardless of their
// SELinuxChangePolicy
seLinuxLabel := mountInfo.SELinuxProcessLabel
// Use the same label as kubelet will use for mount -o context.
// If the Pod has opted in to Recursive policy, it will be empty string here and no conflicts will be reported for it.
seLinuxLabel := mountInfo.SELinuxMountLabel
err = c.syncVolume(logger, pod, spec, seLinuxLabel, mountInfo.PluginSupportsSELinuxContextMount)
if err != nil {

View File

@ -25,14 +25,17 @@ import (
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/controller"
volumecache "k8s.io/kubernetes/pkg/controller/volume/selinuxwarning/cache"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/utils/ptr"
@ -117,7 +120,7 @@ func TestSELinuxWarningController_Sync(t *testing.T) {
{
volumeName: "fake-plugin/pv1",
podKey: cache.ObjectName{Namespace: namespace, Name: "pod1"},
label: ":::s0:c1,c2",
label: "", // Label is cleared with the Recursive policy
changePolicy: v1.SELinuxChangePolicyRecursive,
csiDriver: "ebs.csi.aws.com", // The PV is a fake EBS volume
},
@ -221,6 +224,75 @@ func TestSELinuxWarningController_Sync(t *testing.T) {
`Normal SELinuxLabelConflict SELinuxLabel ":::s0:c98,c99" conflicts with pod pod1 that uses the same volume as this pod with SELinuxLabel ":::s0:c1,c2". If both pods land on the same node, only one of them may access the volume.`,
},
},
{
name: "existing pod with Recursive policy does not generate conflicts",
existingPVCs: []*v1.PersistentVolumeClaim{
pvcBoundToPV("pv1", "pvc1"),
},
existingPVs: []*v1.PersistentVolume{
pvBoundToPVC("pv1", "pvc1"),
},
existingPods: []*v1.Pod{
podWithPVC("pod1", "s0:c1,c2", ptr.To(v1.SELinuxChangePolicyRecursive), "pvc1", "vol1"),
pod("pod2", "s0:c98,c99", ptr.To(v1.SELinuxChangePolicyRecursive)),
},
pod: cache.ObjectName{Namespace: namespace, Name: "pod1"},
conflicts: []volumecache.Conflict{},
expectedAddedVolumes: []addedVolume{
{
volumeName: "fake-plugin/pv1",
podKey: cache.ObjectName{Namespace: namespace, Name: "pod1"},
label: "", // Label is cleared with the Recursive policy
changePolicy: v1.SELinuxChangePolicyRecursive,
csiDriver: "ebs.csi.aws.com", // The PV is a fake EBS volume
},
},
},
{
name: "existing pod with Recursive policy does not conflict with pod with MountOption policy label, only with the policy",
existingPVCs: []*v1.PersistentVolumeClaim{
pvcBoundToPV("pv1", "pvc1"),
},
existingPVs: []*v1.PersistentVolume{
pvBoundToPVC("pv1", "pvc1"),
},
existingPods: []*v1.Pod{
podWithPVC("pod1", "s0:c1,c2", ptr.To(v1.SELinuxChangePolicyRecursive), "pvc1", "vol1"),
podWithPVC("pod2", "s0:c98,c99", ptr.To(v1.SELinuxChangePolicyMountOption), "pvc1", "vol1"),
},
pod: cache.ObjectName{Namespace: namespace, Name: "pod1"},
conflicts: []volumecache.Conflict{
{
PropertyName: "SELinuxChangePolicy",
EventReason: "SELinuxChangePolicyConflict",
Pod: cache.ObjectName{Namespace: namespace, Name: "pod1"},
PropertyValue: string(v1.SELinuxChangePolicyRecursive),
OtherPod: cache.ObjectName{Namespace: namespace, Name: "pod2"},
OtherPropertyValue: string(v1.SELinuxChangePolicyMountOption),
},
{
PropertyName: "SELinuxChangePolicy",
EventReason: "SELinuxChangePolicyConflict",
Pod: cache.ObjectName{Namespace: namespace, Name: "pod2"},
PropertyValue: string(v1.SELinuxChangePolicyMountOption),
OtherPod: cache.ObjectName{Namespace: namespace, Name: "pod1"},
OtherPropertyValue: string(v1.SELinuxChangePolicyRecursive),
},
},
expectedAddedVolumes: []addedVolume{
{
volumeName: "fake-plugin/pv1",
podKey: cache.ObjectName{Namespace: namespace, Name: "pod1"},
label: "", // Label is cleared with the Recursive policy
changePolicy: v1.SELinuxChangePolicyRecursive,
csiDriver: "ebs.csi.aws.com", // The PV is a fake EBS volume
},
},
expectedEvents: []string{
`Normal SELinuxChangePolicyConflict SELinuxChangePolicy "Recursive" conflicts with pod pod2 that uses the same volume as this pod with SELinuxChangePolicy "MountOption". If both pods land on the same node, only one of them may access the volume.`,
`Normal SELinuxChangePolicyConflict SELinuxChangePolicy "MountOption" conflicts with pod pod1 that uses the same volume as this pod with SELinuxChangePolicy "Recursive". If both pods land on the same node, only one of them may access the volume.`,
},
},
{
name: "existing pod with PVC generates conflict, the other pod doesn't exist",
existingPVCs: []*v1.PersistentVolumeClaim{
@ -281,6 +353,8 @@ func TestSELinuxWarningController_Sync(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SELinuxChangePolicy, true)
_, ctx := ktesting.NewTestContext(t)
_, plugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
plugin.SupportsSELinux = true

View File

@ -72,6 +72,10 @@ import (
//
// All other feature gate combinations should be invalid.
const (
controllerSELinuxMetricName = "selinux_warning_controller_selinux_volume_conflict"
)
var (
defaultSELinuxLabels = map[string]struct{ defaultProcessLabel, defaultFileLabel string }{
"debian": {"svirt_lxc_net_t", "svirt_lxc_file_t"},
@ -502,7 +506,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC
volumeMode: v1.ReadWriteOnce,
waitForSecondPodStart: true,
expectNodeIncreases: sets.New[string]( /* no metric is increased, admitted_total was already increased when the first pod started */ ),
expectControllerConflictProperty: "SELinuxLabel", /* SELinuxController does emit a warning for Recursive policy, while kubelet does not! */
expectControllerConflictProperty: "", /* SELinuxController does not emit any warning either */
testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMountReadWriteOncePod), framework.WithFeatureGate(features.SELinuxChangePolicy), feature.SELinuxMountReadWriteOncePodOnly},
},
{
@ -591,7 +595,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC
volumeMode: v1.ReadWriteMany,
waitForSecondPodStart: true,
expectNodeIncreases: sets.New[string]( /* no metric is increased, admitted_total was already increased when the first pod started */ ),
expectControllerConflictProperty: "SELinuxLabel", /* SELinuxController does emit a warning for Recursive policy, while kubelet does not! */
expectControllerConflictProperty: "", /* SELinuxController does not emit any warning either */
testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMountReadWriteOncePod), framework.WithFeatureGate(features.SELinuxChangePolicy), framework.WithFeatureGate(features.SELinuxMount)},
},
{
@ -713,13 +717,13 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC
// We don't need to compare the initial and final KCM metrics,
// KCM metrics report exact pod namespaces+names as labels and the metric value is always "1".
err = waitForControllerMetric(ctx, grabber, f.Namespace.Name, pod.Name, pod2.Name, t.expectControllerConflictProperty, framework.PodStartShortTimeout)
framework.ExpectNoError(err, "failed to get metrics from KCM")
framework.ExpectNoError(err, "while waiting for metrics from KCM")
// Check the controler generated a conflict event on the first pod
err = waitForConflictEvent(ctx, m.cs, pod, pod2, t.expectControllerConflictProperty, f.Timeouts.PodStart)
framework.ExpectNoError(err, "failed to receive event on the first pod")
framework.ExpectNoError(err, "while waiting for an event on the first pod")
// Check the controler generated event on the second pod
err = waitForConflictEvent(ctx, m.cs, pod2, pod, t.expectControllerConflictProperty, f.Timeouts.PodStart)
framework.ExpectNoError(err, "failed to receive event on the second pod")
framework.ExpectNoError(err, "while waiting for an event on the second pod")
}
}
// t.testTags is array and it's not possible to use It("name", func(){xxx}, t.testTags...)
@ -778,7 +782,7 @@ func grabKCMSELinuxMetrics(ctx context.Context, grabber *e2emetrics.Grabber, nam
for i := range samples {
// E.g. "selinux_warning_controller_selinux_volume_conflict"
metricName := samples[i].Metric[testutil.MetricNameLabel]
if metricName != "selinux_warning_controller_selinux_volume_conflict" {
if metricName != controllerSELinuxMetricName {
continue
}
@ -836,7 +840,6 @@ func waitForNodeMetricIncrease(ctx context.Context, grabber *e2emetrics.Grabber,
}
func waitForControllerMetric(ctx context.Context, grabber *e2emetrics.Grabber, namespace, pod1Name, pod2Name, propertyName string, timeout time.Duration) error {
var noIncreaseMetrics sets.Set[string]
var metrics map[string]float64
expectLabels := []string{
@ -846,6 +849,8 @@ func waitForControllerMetric(ctx context.Context, grabber *e2emetrics.Grabber, n
fmt.Sprintf("pod2_namespace=%q", namespace),
fmt.Sprintf("property=%q", propertyName),
}
framework.Logf("Waiting for KCM metric %s{%+v}", controllerSELinuxMetricName, expectLabels)
err := wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) {
var err error
metrics, err = grabKCMSELinuxMetrics(ctx, grabber, namespace)
@ -876,8 +881,8 @@ func waitForControllerMetric(ctx context.Context, grabber *e2emetrics.Grabber, n
ginkgo.By("Dumping final KCM metrics")
dumpMetrics(metrics)
if err == context.DeadlineExceeded {
return fmt.Errorf("timed out waiting for KCM metrics %v", noIncreaseMetrics.UnsortedList())
if err != nil {
return fmt.Errorf("error waiting for KCM metrics %s{%+v}: %w", controllerSELinuxMetricName, expectLabels, err)
}
return err
}