mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-14 21:53:52 +00:00
Merge pull request #30800 from mml/db.controller.followup
Automatic merge from submit-queue Followup fixes for disruption controller. Part of #12611. - Record an event when a pod does not have exactly 1 controller. - Add TODO comment suggesting we simplify the two cases: integer and percentage.
This commit is contained in:
@@ -462,6 +462,10 @@ func (dc *DisruptionController) trySync(pdb *policy.PodDisruptionBudget) error {
|
||||
|
||||
func (dc *DisruptionController) getExpectedPodCount(pdb *policy.PodDisruptionBudget, pods []*api.Pod) (expectedCount, desiredHealthy int32, err error) {
|
||||
err = nil
|
||||
// TODO(davidopp): consider making the way expectedCount and rules about
|
||||
// permitted controller configurations (specifically, considering it an error
|
||||
// if a pod covered by a PDB has 0 controllers or > 1 controller) should be
|
||||
// handled the same way for integer and percentage minAvailable
|
||||
if pdb.Spec.MinAvailable.Type == intstr.Int {
|
||||
desiredHealthy = pdb.Spec.MinAvailable.IntVal
|
||||
expectedCount = int32(len(pods))
|
||||
@@ -498,9 +502,11 @@ func (dc *DisruptionController) getExpectedPodCount(pdb *policy.PodDisruptionBud
|
||||
}
|
||||
if controllerCount == 0 {
|
||||
err = fmt.Errorf("asked for percentage, but found no controllers for pod %q", pod.Name)
|
||||
dc.recorder.Event(pdb, api.EventTypeWarning, "NoControllers", err.Error())
|
||||
return
|
||||
} else if controllerCount > 1 {
|
||||
err = fmt.Errorf("pod %q has %v>1 controllers", pod.Name, controllerCount)
|
||||
dc.recorder.Event(pdb, api.EventTypeWarning, "TooManyControllers", err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@@ -27,6 +27,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
@@ -76,14 +77,17 @@ func newFakeDisruptionController() (*DisruptionController, *pdbStates) {
|
||||
ps := &pdbStates{}
|
||||
|
||||
dc := &DisruptionController{
|
||||
pdbLister: cache.StoreToPodDisruptionBudgetLister{Store: cache.NewStore(controller.KeyFunc)},
|
||||
podLister: cache.StoreToPodLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{})},
|
||||
rcLister: cache.StoreToReplicationControllerLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})},
|
||||
rsLister: cache.StoreToReplicaSetLister{Store: cache.NewStore(controller.KeyFunc)},
|
||||
dLister: cache.StoreToDeploymentLister{Store: cache.NewStore(controller.KeyFunc)},
|
||||
getUpdater: func() updater { return ps.Set },
|
||||
pdbLister: cache.StoreToPodDisruptionBudgetLister{Store: cache.NewStore(controller.KeyFunc)},
|
||||
podLister: cache.StoreToPodLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{})},
|
||||
rcLister: cache.StoreToReplicationControllerLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})},
|
||||
rsLister: cache.StoreToReplicaSetLister{Store: cache.NewStore(controller.KeyFunc)},
|
||||
dLister: cache.StoreToDeploymentLister{Store: cache.NewStore(controller.KeyFunc)},
|
||||
getUpdater: func() updater { return ps.Set },
|
||||
broadcaster: record.NewBroadcaster(),
|
||||
}
|
||||
|
||||
dc.recorder = dc.broadcaster.NewRecorder(api.EventSource{Component: "disruption_test"})
|
||||
|
||||
return dc, ps
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user