mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Add a warning event when pdb has found a unmanaged pod
This commit is contained in:
parent
e55f2a9b54
commit
1f42ebc013
@ -19,7 +19,6 @@ package disruption
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
apps "k8s.io/api/apps/v1beta1"
|
apps "k8s.io/api/apps/v1beta1"
|
||||||
@ -706,8 +705,11 @@ func (dc *DisruptionController) trySync(ctx context.Context, pdb *policy.PodDisr
|
|||||||
}
|
}
|
||||||
// We have unmamanged pods, instead of erroring and hotlooping in disruption controller, log and continue.
|
// We have unmamanged pods, instead of erroring and hotlooping in disruption controller, log and continue.
|
||||||
if len(unmanagedPods) > 0 {
|
if len(unmanagedPods) > 0 {
|
||||||
klog.V(4).Infof("found unmanaged pods associated with this PDB: %v",
|
klog.V(4).Infof("found unmanaged pods associated with this PDB: %v", unmanagedPods)
|
||||||
strings.Join(unmanagedPods, ",'"))
|
dc.recorder.Eventf(pdb, v1.EventTypeWarning, "UnmanagedPods", "Pods selected by this PodDisruptionBudget (selector: %v) were found "+
|
||||||
|
"to be unmanaged or managed by an unsupported controller (missing a /scale subresource). As a result, the status of the PDB "+
|
||||||
|
"cannot be calculated correctly, which may result in undefined behavior. To account for these pods please set \".spec.minAvailable\" "+
|
||||||
|
"field of the PDB to an integer value.", pdb.Spec.Selector)
|
||||||
}
|
}
|
||||||
|
|
||||||
currentTime := dc.clock.Now()
|
currentTime := dc.clock.Now()
|
||||||
|
@ -49,6 +49,7 @@ import (
|
|||||||
scalefake "k8s.io/client-go/scale/fake"
|
scalefake "k8s.io/client-go/scale/fake"
|
||||||
core "k8s.io/client-go/testing"
|
core "k8s.io/client-go/testing"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
|
"k8s.io/client-go/tools/record"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||||
@ -187,6 +188,7 @@ func newFakeDisruptionControllerWithTime(ctx context.Context, now time.Time) (*d
|
|||||||
dc.rsListerSynced = alwaysReady
|
dc.rsListerSynced = alwaysReady
|
||||||
dc.dListerSynced = alwaysReady
|
dc.dListerSynced = alwaysReady
|
||||||
dc.ssListerSynced = alwaysReady
|
dc.ssListerSynced = alwaysReady
|
||||||
|
dc.recorder = record.NewFakeRecorder(10)
|
||||||
informerFactory.Start(ctx.Done())
|
informerFactory.Start(ctx.Done())
|
||||||
informerFactory.WaitForCacheSync(ctx.Done())
|
informerFactory.WaitForCacheSync(ctx.Done())
|
||||||
|
|
||||||
@ -485,6 +487,8 @@ func TestIntegerMaxUnavailable(t *testing.T) {
|
|||||||
dc.sync(ctx, pdbName)
|
dc.sync(ctx, pdbName)
|
||||||
|
|
||||||
ps.VerifyDisruptionAllowed(t, pdbName, 0)
|
ps.VerifyDisruptionAllowed(t, pdbName, 0)
|
||||||
|
verifyUnsupportedControllerEventEmitted(t, dc)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that an integer MaxUnavailable will recompute allowed disruptions when the scale of
|
// Verify that an integer MaxUnavailable will recompute allowed disruptions when the scale of
|
||||||
@ -556,6 +560,7 @@ func TestNakedPod(t *testing.T) {
|
|||||||
dc.sync(ctx, pdbName)
|
dc.sync(ctx, pdbName)
|
||||||
|
|
||||||
ps.VerifyDisruptionAllowed(t, pdbName, 0)
|
ps.VerifyDisruptionAllowed(t, pdbName, 0)
|
||||||
|
verifyUnsupportedControllerEventEmitted(t, dc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that disruption controller is not erroring when unmanaged pods are found
|
// Verify that disruption controller is not erroring when unmanaged pods are found
|
||||||
@ -572,9 +577,8 @@ func TestStatusForUnmanagedPod(t *testing.T) {
|
|||||||
pod, _ := newPod(t, "unmanaged")
|
pod, _ := newPod(t, "unmanaged")
|
||||||
add(t, dc.podStore, pod)
|
add(t, dc.podStore, pod)
|
||||||
dc.sync(ctx, pdbName)
|
dc.sync(ctx, pdbName)
|
||||||
|
|
||||||
ps.VerifyNoStatusError(t, pdbName)
|
ps.VerifyNoStatusError(t, pdbName)
|
||||||
|
verifyUnsupportedControllerEventEmitted(t, dc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the unmanaged pods are correctly collected or not
|
// Check if the unmanaged pods are correctly collected or not
|
||||||
@ -598,7 +602,7 @@ func TestTotalUnmanagedPods(t *testing.T) {
|
|||||||
t.Fatalf("expected one pod to be unmanaged pod but found %d", len(unmanagedPods))
|
t.Fatalf("expected one pod to be unmanaged pod but found %d", len(unmanagedPods))
|
||||||
}
|
}
|
||||||
ps.VerifyNoStatusError(t, pdbName)
|
ps.VerifyNoStatusError(t, pdbName)
|
||||||
|
verifyUnsupportedControllerEventEmitted(t, dc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that we count the scale of a ReplicaSet even when it has no Deployment.
|
// Verify that we count the scale of a ReplicaSet even when it has no Deployment.
|
||||||
@ -1538,6 +1542,20 @@ func waitForCacheCount(store cache.Store, n int) error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func verifyUnsupportedControllerEventEmitted(t *testing.T, dc *disruptionController) {
|
||||||
|
// Verify that an UnmanagedPod event is generated
|
||||||
|
found := false
|
||||||
|
for e := range dc.recorder.(*record.FakeRecorder).Events {
|
||||||
|
if strings.Contains(e, "managed by an unsupported controller") {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
t.Fatalf("UnmanagedPod event not generated")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// TestMain adds klog flags to make debugging tests easier.
|
// TestMain adds klog flags to make debugging tests easier.
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
klog.InitFlags(flag.CommandLine)
|
klog.InitFlags(flag.CommandLine)
|
||||||
|
Loading…
Reference in New Issue
Block a user