Merge pull request #116256 from atiratree/unmanaged-pods

improve message, log level and testing for unmanaged pods in disruption controller
This commit is contained in:
Kubernetes Prow Robot 2023-03-06 08:19:14 -08:00 committed by GitHub
commit 0270fc75d0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 47 additions and 18 deletions

View File

@ -705,10 +705,10 @@ func (dc *DisruptionController) trySync(ctx context.Context, pdb *policy.PodDisr
} }
// We have unmamanged pods, instead of erroring and hotlooping in disruption controller, log and continue. // We have unmamanged pods, instead of erroring and hotlooping in disruption controller, log and continue.
if len(unmanagedPods) > 0 { if len(unmanagedPods) > 0 {
klog.V(4).Infof("found unmanaged pods associated with this PDB: %v", unmanagedPods) klog.Warningf("found unmanaged pods associated with this PDB: %v", unmanagedPods)
dc.recorder.Eventf(pdb, v1.EventTypeWarning, "UnmanagedPods", "Pods selected by this PodDisruptionBudget (selector: %v) were found "+ dc.recorder.Eventf(pdb, v1.EventTypeWarning, "UnmanagedPods", "Pods selected by this PodDisruptionBudget (selector: %v) were found "+
"to be unmanaged or managed by an unsupported controller (missing a /scale subresource). As a result, the status of the PDB "+ "to be unmanaged. As a result, the status of the PDB cannot be calculated correctly, which may result in undefined behavior. "+
"cannot be calculated correctly, which may result in undefined behavior. To account for these pods please set \".spec.minAvailable\" "+ "To account for these pods please set \".spec.minAvailable\" "+
"field of the PDB to an integer value.", pdb.Spec.Selector) "field of the PDB to an integer value.", pdb.Spec.Selector)
} }

View File

@ -188,7 +188,7 @@ func newFakeDisruptionControllerWithTime(ctx context.Context, now time.Time) (*d
dc.rsListerSynced = alwaysReady dc.rsListerSynced = alwaysReady
dc.dListerSynced = alwaysReady dc.dListerSynced = alwaysReady
dc.ssListerSynced = alwaysReady dc.ssListerSynced = alwaysReady
dc.recorder = record.NewFakeRecorder(10) dc.recorder = record.NewFakeRecorder(100)
informerFactory.Start(ctx.Done()) informerFactory.Start(ctx.Done())
informerFactory.WaitForCacheSync(ctx.Done()) informerFactory.WaitForCacheSync(ctx.Done())
@ -487,7 +487,7 @@ func TestIntegerMaxUnavailable(t *testing.T) {
dc.sync(ctx, pdbName) dc.sync(ctx, pdbName)
ps.VerifyDisruptionAllowed(t, pdbName, 0) ps.VerifyDisruptionAllowed(t, pdbName, 0)
verifyUnsupportedControllerEventEmitted(t, dc) verifyEventEmitted(t, dc, "UnmanagedPods")
} }
@ -560,7 +560,36 @@ func TestNakedPod(t *testing.T) {
dc.sync(ctx, pdbName) dc.sync(ctx, pdbName)
ps.VerifyDisruptionAllowed(t, pdbName, 0) ps.VerifyDisruptionAllowed(t, pdbName, 0)
verifyUnsupportedControllerEventEmitted(t, dc) verifyEventEmitted(t, dc, "UnmanagedPods")
}
// Create a pod with unsupported controller, and verify that a PDB with a percentage
// specified won't allow a disruption.
func TestUnsupportedControllerPod(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("28%"))
add(t, dc.pdbStore, pdb)
ctx := context.TODO()
dc.sync(ctx, pdbName)
// This verifies that when a PDB has 0 pods, disruptions are not allowed.
ps.VerifyDisruptionAllowed(t, pdbName, 0)
pod, _ := newPod(t, "naked")
isController := true
pod.OwnerReferences = append(pod.OwnerReferences, metav1.OwnerReference{
APIVersion: "apps.test.io/v1",
Kind: "TestWorkload",
Name: "fake-controller",
UID: "b7329742-8daa-493a-8881-6ca07139172b",
Controller: &isController,
})
add(t, dc.podStore, pod)
dc.sync(ctx, pdbName)
ps.VerifyDisruptionAllowed(t, pdbName, 0)
verifyEventEmitted(t, dc, "CalculateExpectedPodCountFailed")
} }
// Verify that disruption controller is not erroring when unmanaged pods are found // Verify that disruption controller is not erroring when unmanaged pods are found
@ -578,7 +607,7 @@ func TestStatusForUnmanagedPod(t *testing.T) {
add(t, dc.podStore, pod) add(t, dc.podStore, pod)
dc.sync(ctx, pdbName) dc.sync(ctx, pdbName)
ps.VerifyNoStatusError(t, pdbName) ps.VerifyNoStatusError(t, pdbName)
verifyUnsupportedControllerEventEmitted(t, dc) verifyEventEmitted(t, dc, "UnmanagedPods")
} }
// Check if the unmanaged pods are correctly collected or not // Check if the unmanaged pods are correctly collected or not
@ -602,7 +631,7 @@ func TestTotalUnmanagedPods(t *testing.T) {
t.Fatalf("expected one pod to be unmanaged pod but found %d", len(unmanagedPods)) t.Fatalf("expected one pod to be unmanaged pod but found %d", len(unmanagedPods))
} }
ps.VerifyNoStatusError(t, pdbName) ps.VerifyNoStatusError(t, pdbName)
verifyUnsupportedControllerEventEmitted(t, dc) verifyEventEmitted(t, dc, "UnmanagedPods")
} }
// Verify that we count the scale of a ReplicaSet even when it has no Deployment. // Verify that we count the scale of a ReplicaSet even when it has no Deployment.
@ -1542,17 +1571,17 @@ func waitForCacheCount(store cache.Store, n int) error {
}) })
} }
func verifyUnsupportedControllerEventEmitted(t *testing.T, dc *disruptionController) { func verifyEventEmitted(t *testing.T, dc *disruptionController, expectedEvent string) {
// Verify that an UnmanagedPod event is generated ticker := time.NewTicker(500 * time.Millisecond)
found := false for {
for e := range dc.recorder.(*record.FakeRecorder).Events { select {
if strings.Contains(e, "managed by an unsupported controller") { case e := <-dc.recorder.(*record.FakeRecorder).Events:
found = true if strings.Contains(e, expectedEvent) {
break return
} }
case <-ticker.C:
t.Fatalf("Timed out: expected event not generated: %v", expectedEvent)
} }
if !found {
t.Fatalf("UnmanagedPod event not generated")
} }
} }