selinux: Do not report conflits with finished pods

When a Pod reaches its final state (Succeeded or Failed), its volumes are
getting unmounted and therefore their SELinux mount option will not
conflict with any other pod.

Let the SELinux controller monitor "pod updated" events to see the pod is
finished
This commit is contained in:
Jan Safranek
2025-12-04 14:21:47 +01:00
parent f02a1fc357
commit 9222f08d22
2 changed files with 41 additions and 3 deletions

View File

@@ -114,11 +114,19 @@ func (c *volumeCache) AddVolume(logger klog.Logger, volumeName v1.UniqueVolumeNa
}
// The volume is already known
// Add the pod to the cache or update its properties
volume.pods[podKey] = podInfo{
podInfo := podInfo{
seLinuxLabel: label,
changePolicy: changePolicy,
}
oldPodInfo, found := volume.pods[podKey]
if found && oldPodInfo == podInfo {
// The Pod is already known too and nothing changed since the last update.
// All conflicts were already reported when the Pod was added / updated in the cache last time.
return conflicts
}
// Add the updated pod info to the cache
volume.pods[podKey] = podInfo
// Emit conflicts for the pod
for otherPodKey, otherPodInfo := range volume.pods {

View File

@@ -141,8 +141,8 @@ func NewController(
logger := klog.FromContext(ctx)
_, err = podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { c.enqueuePod(logger, obj) },
UpdateFunc: func(oldObj, newObj interface{}) { c.updatePod(logger, oldObj, newObj) },
DeleteFunc: func(obj interface{}) { c.enqueuePod(logger, obj) },
// Not watching updates: Pod volumes and SecurityContext are immutable after creation
})
if err != nil {
return nil, err
@@ -186,6 +186,31 @@ func (c *Controller) enqueuePod(_ klog.Logger, obj interface{}) {
c.queue.Add(podRef)
}
func (c *Controller) updatePod(logger klog.Logger, oldObj, newObj interface{}) {
// Pod.Spec fields that are relevant to this controller are immutable after creation (i.e.
// pod volumes, SELinux labels, privileged flag). React to update only when the Pod
// reaches its final state - kubelet will unmount the Pod volumes and the controller should
// therefore remove them from the cache.
oldPod, ok := oldObj.(*v1.Pod)
if !ok {
return
}
newPod, ok := newObj.(*v1.Pod)
if !ok {
return
}
// This is an optimization. In theory, passing most pod updates to the controller queue should lead to noop.
// To save some CPU, pass only pod updates that can cause any action in the controller
if oldPod.Status.Phase == newPod.Status.Phase {
return
}
if newPod.Status.Phase != v1.PodFailed && newPod.Status.Phase != v1.PodSucceeded {
return
}
c.enqueuePod(logger, newObj)
}
func (c *Controller) addPVC(logger klog.Logger, obj interface{}) {
pvc, ok := obj.(*v1.PersistentVolumeClaim)
if !ok {
@@ -392,6 +417,11 @@ func (c *Controller) sync(ctx context.Context, podRef cache.ObjectName) error {
logger.V(5).Info("Error getting pod from informer", "pod", klog.KObj(pod), "podUID", pod.UID, "err", err)
return err
}
if pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded {
// The pod has reached its final state and kubelet is unmounting is volumes.
// Remove them from the cache.
return c.syncPodDelete(ctx, podRef)
}
return c.syncPod(ctx, pod)
}