diff --git a/pkg/controller/disruption/disruption.go b/pkg/controller/disruption/disruption.go index 31a7228d4f5..6ebd8fb8dd1 100644 --- a/pkg/controller/disruption/disruption.go +++ b/pkg/controller/disruption/disruption.go @@ -19,7 +19,6 @@ package disruption import ( "context" "fmt" - "strings" "time" apps "k8s.io/api/apps/v1beta1" @@ -706,8 +705,11 @@ func (dc *DisruptionController) trySync(ctx context.Context, pdb *policy.PodDisr } // We have unmamanged pods, instead of erroring and hotlooping in disruption controller, log and continue. if len(unmanagedPods) > 0 { - klog.V(4).Infof("found unmanaged pods associated with this PDB: %v", - strings.Join(unmanagedPods, ",'")) + klog.V(4).Infof("found unmanaged pods associated with this PDB: %v", unmanagedPods) + dc.recorder.Eventf(pdb, v1.EventTypeWarning, "UnmanagedPods", "Pods selected by this PodDisruptionBudget (selector: %v) were found "+ + "to be unmanaged or managed by an unsupported controller (missing a /scale subresource). As a result, the status of the PDB "+ + "cannot be calculated correctly, which may result in undefined behavior. To account for these pods please set \".spec.minAvailable\" "+ + "field of the PDB to an integer value.", pdb.Spec.Selector) } currentTime := dc.clock.Now() diff --git a/pkg/controller/disruption/disruption_test.go b/pkg/controller/disruption/disruption_test.go index 6b0a95fa8d8..56c2035125b 100644 --- a/pkg/controller/disruption/disruption_test.go +++ b/pkg/controller/disruption/disruption_test.go @@ -49,6 +49,7 @@ import ( scalefake "k8s.io/client-go/scale/fake" core "k8s.io/client-go/testing" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" _ "k8s.io/kubernetes/pkg/apis/core/install" @@ -187,6 +188,7 @@ func newFakeDisruptionControllerWithTime(ctx context.Context, now time.Time) (*d dc.rsListerSynced = alwaysReady dc.dListerSynced = alwaysReady dc.ssListerSynced = alwaysReady + dc.recorder = record.NewFakeRecorder(10) informerFactory.Start(ctx.Done()) informerFactory.WaitForCacheSync(ctx.Done()) @@ -485,6 +487,8 @@ func TestIntegerMaxUnavailable(t *testing.T) { dc.sync(ctx, pdbName) ps.VerifyDisruptionAllowed(t, pdbName, 0) + verifyUnsupportedControllerEventEmitted(t, dc) + } // Verify that an integer MaxUnavailable will recompute allowed disruptions when the scale of @@ -556,6 +560,7 @@ func TestNakedPod(t *testing.T) { dc.sync(ctx, pdbName) ps.VerifyDisruptionAllowed(t, pdbName, 0) + verifyUnsupportedControllerEventEmitted(t, dc) } // Verify that disruption controller is not erroring when unmanaged pods are found @@ -572,9 +577,8 @@ func TestStatusForUnmanagedPod(t *testing.T) { pod, _ := newPod(t, "unmanaged") add(t, dc.podStore, pod) dc.sync(ctx, pdbName) - ps.VerifyNoStatusError(t, pdbName) - + verifyUnsupportedControllerEventEmitted(t, dc) } // Check if the unmanaged pods are correctly collected or not @@ -598,7 +602,7 @@ func TestTotalUnmanagedPods(t *testing.T) { t.Fatalf("expected one pod to be unmanaged pod but found %d", len(unmanagedPods)) } ps.VerifyNoStatusError(t, pdbName) - + verifyUnsupportedControllerEventEmitted(t, dc) } // Verify that we count the scale of a ReplicaSet even when it has no Deployment. @@ -1538,6 +1542,20 @@ func waitForCacheCount(store cache.Store, n int) error { }) } +func verifyUnsupportedControllerEventEmitted(t *testing.T, dc *disruptionController) { + // Verify that an UnmanagedPod event is generated + found := false + for e := range dc.recorder.(*record.FakeRecorder).Events { + if strings.Contains(e, "managed by an unsupported controller") { + found = true + break + } + } + if !found { + t.Fatalf("UnmanagedPod event not generated") + } +} + // TestMain adds klog flags to make debugging tests easier. func TestMain(m *testing.M) { klog.InitFlags(flag.CommandLine)