[disruptioncontroller] Don't error for unmanaged pods

As of now, we allow PDBs to be applied to pods via
selectors, so there can be unmanaged pods(pods that
don't have backing controllers) but still have PDBs associated.
Such pods are to be logged instead of immediately throwing
a sync error. This ensures disruption controller is
not frequently updating the status subresource and thus
preventing excessive and expensive writes to etcd.
This commit is contained in:
ravisantoshgudimetla
2021-07-01 16:10:17 -04:00
parent 1861e4756d
commit 2c116055f7
3 changed files with 105 additions and 10 deletions

View File

@@ -22,6 +22,7 @@ import (
"fmt"
"os"
"runtime/debug"
"strings"
"sync"
"testing"
"time"
@@ -115,6 +116,15 @@ func (ps *pdbStates) VerifyDisruptionAllowed(t *testing.T, key string, disruptio
}
}
func (ps *pdbStates) VerifyNoStatusError(t *testing.T, key string) {
pdb := ps.Get(key)
for _, condition := range pdb.Status.Conditions {
if strings.Contains(condition.Message, "found no controller ref") && condition.Reason == policy.SyncFailedReason {
t.Fatalf("PodDisruption Controller should not error when unmanaged pods are found but it failed for %q", key)
}
}
}
type disruptionController struct {
*DisruptionController
@@ -534,6 +544,47 @@ func TestNakedPod(t *testing.T) {
ps.VerifyDisruptionAllowed(t, pdbName, 0)
}
// Verify that disruption controller is not erroring when unmanaged pods are found
func TestStatusForUnmanagedPod(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("28%"))
add(t, dc.pdbStore, pdb)
dc.sync(pdbName)
// This verifies that when a PDB has 0 pods, disruptions are not allowed.
ps.VerifyDisruptionAllowed(t, pdbName, 0)
pod, _ := newPod(t, "unmanaged")
add(t, dc.podStore, pod)
dc.sync(pdbName)
ps.VerifyNoStatusError(t, pdbName)
}
// Check if the unmanaged pods are correctly collected or not
func TestTotalUnmanagedPods(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("28%"))
add(t, dc.pdbStore, pdb)
dc.sync(pdbName)
// This verifies that when a PDB has 0 pods, disruptions are not allowed.
ps.VerifyDisruptionAllowed(t, pdbName, 0)
pod, _ := newPod(t, "unmanaged")
add(t, dc.podStore, pod)
dc.sync(pdbName)
var pods []*v1.Pod
pods = append(pods, pod)
_, unmanagedPods, _ := dc.getExpectedScale(pdb, pods)
if len(unmanagedPods) != 1 {
t.Fatalf("expected one pod to be unmanaged pod but found %d", len(unmanagedPods))
}
ps.VerifyNoStatusError(t, pdbName)
}
// Verify that we count the scale of a ReplicaSet even when it has no Deployment.
func TestReplicaSet(t *testing.T) {
dc, ps := newFakeDisruptionController()
@@ -752,7 +803,7 @@ func TestReplicationController(t *testing.T) {
rogue, _ := newPod(t, "rogue")
add(t, dc.podStore, rogue)
dc.sync(pdbName)
ps.VerifyDisruptionAllowed(t, pdbName, 0)
ps.VerifyDisruptionAllowed(t, pdbName, 2)
}
func TestStatefulSetController(t *testing.T) {