mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 07:27:21 +00:00
Support scale subresource for PDBs (#76294)
* Support scale subresource for PDBs * Check group in finder functions * Small fixes and more tests
This commit is contained in:
committed by
Kubernetes Prow Robot
parent
cdff17a96b
commit
f1883c9e8c
@@ -26,7 +26,9 @@ import (
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
@@ -41,6 +43,7 @@ import (
|
||||
appsv1listers "k8s.io/client-go/listers/apps/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
policylisters "k8s.io/client-go/listers/policy/v1beta1"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
@@ -67,6 +70,9 @@ type updater func(*policy.PodDisruptionBudget) error
|
||||
|
||||
type DisruptionController struct {
|
||||
kubeClient clientset.Interface
|
||||
mapper apimeta.RESTMapper
|
||||
|
||||
scaleNamespacer scaleclient.ScalesGetter
|
||||
|
||||
pdbLister policylisters.PodDisruptionBudgetLister
|
||||
pdbListerSynced cache.InformerSynced
|
||||
@@ -105,7 +111,7 @@ type controllerAndScale struct {
|
||||
|
||||
// podControllerFinder is a function type that maps a pod to a list of
|
||||
// controllers and their scale.
|
||||
type podControllerFinder func(*v1.Pod) (*controllerAndScale, error)
|
||||
type podControllerFinder func(controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error)
|
||||
|
||||
func NewDisruptionController(
|
||||
podInformer coreinformers.PodInformer,
|
||||
@@ -115,6 +121,8 @@ func NewDisruptionController(
|
||||
dInformer appsv1informers.DeploymentInformer,
|
||||
ssInformer appsv1informers.StatefulSetInformer,
|
||||
kubeClient clientset.Interface,
|
||||
restMapper apimeta.RESTMapper,
|
||||
scaleNamespacer scaleclient.ScalesGetter,
|
||||
) *DisruptionController {
|
||||
dc := &DisruptionController{
|
||||
kubeClient: kubeClient,
|
||||
@@ -157,19 +165,19 @@ func NewDisruptionController(
|
||||
dc.ssLister = ssInformer.Lister()
|
||||
dc.ssListerSynced = ssInformer.Informer().HasSynced
|
||||
|
||||
dc.mapper = restMapper
|
||||
dc.scaleNamespacer = scaleNamespacer
|
||||
|
||||
return dc
|
||||
}
|
||||
|
||||
// TODO(mml): When controllerRef is implemented (#2210), we *could* simply
|
||||
// return controllers without their scales, and access scale type-generically
|
||||
// via the scale subresource. That may not be as much of a win as it sounds,
|
||||
// however. We are accessing everything through the pkg/client/cache API that
|
||||
// we have to set up and tune to the types we know we'll be accessing anyway,
|
||||
// and we may well need further tweaks just to be able to access scale
|
||||
// subresources.
|
||||
// The workload resources do implement the scale subresource, so it would
|
||||
// be possible to only check the scale subresource here. But since there is no
|
||||
// way to take advantage of listers with scale subresources, we use the workload
|
||||
// resources directly and only fall back to the scale subresource when needed.
|
||||
func (dc *DisruptionController) finders() []podControllerFinder {
|
||||
return []podControllerFinder{dc.getPodReplicationController, dc.getPodDeployment, dc.getPodReplicaSet,
|
||||
dc.getPodStatefulSet}
|
||||
dc.getPodStatefulSet, dc.getScaleController}
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -180,15 +188,12 @@ var (
|
||||
)
|
||||
|
||||
// getPodReplicaSet finds a replicaset which has no matching deployments.
|
||||
func (dc *DisruptionController) getPodReplicaSet(pod *v1.Pod) (*controllerAndScale, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
return nil, nil
|
||||
func (dc *DisruptionController) getPodReplicaSet(controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error) {
|
||||
ok, err := verifyGroupKind(controllerRef, controllerKindRS.Kind, []string{"apps", "extensions"})
|
||||
if !ok || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if controllerRef.Kind != controllerKindRS.Kind {
|
||||
return nil, nil
|
||||
}
|
||||
rs, err := dc.rsLister.ReplicaSets(pod.Namespace).Get(controllerRef.Name)
|
||||
rs, err := dc.rsLister.ReplicaSets(namespace).Get(controllerRef.Name)
|
||||
if err != nil {
|
||||
// The only possible error is NotFound, which is ok here.
|
||||
return nil, nil
|
||||
@@ -204,16 +209,13 @@ func (dc *DisruptionController) getPodReplicaSet(pod *v1.Pod) (*controllerAndSca
|
||||
return &controllerAndScale{rs.UID, *(rs.Spec.Replicas)}, nil
|
||||
}
|
||||
|
||||
// getPodStatefulSet returns the statefulset managing the given pod.
|
||||
func (dc *DisruptionController) getPodStatefulSet(pod *v1.Pod) (*controllerAndScale, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
return nil, nil
|
||||
// getPodStatefulSet returns the statefulset referenced by the provided controllerRef.
|
||||
func (dc *DisruptionController) getPodStatefulSet(controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error) {
|
||||
ok, err := verifyGroupKind(controllerRef, controllerKindSS.Kind, []string{"apps"})
|
||||
if !ok || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if controllerRef.Kind != controllerKindSS.Kind {
|
||||
return nil, nil
|
||||
}
|
||||
ss, err := dc.ssLister.StatefulSets(pod.Namespace).Get(controllerRef.Name)
|
||||
ss, err := dc.ssLister.StatefulSets(namespace).Get(controllerRef.Name)
|
||||
if err != nil {
|
||||
// The only possible error is NotFound, which is ok here.
|
||||
return nil, nil
|
||||
@@ -226,15 +228,12 @@ func (dc *DisruptionController) getPodStatefulSet(pod *v1.Pod) (*controllerAndSc
|
||||
}
|
||||
|
||||
// getPodDeployments finds deployments for any replicasets which are being managed by deployments.
|
||||
func (dc *DisruptionController) getPodDeployment(pod *v1.Pod) (*controllerAndScale, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
return nil, nil
|
||||
func (dc *DisruptionController) getPodDeployment(controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error) {
|
||||
ok, err := verifyGroupKind(controllerRef, controllerKindRS.Kind, []string{"apps", "extensions"})
|
||||
if !ok || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if controllerRef.Kind != controllerKindRS.Kind {
|
||||
return nil, nil
|
||||
}
|
||||
rs, err := dc.rsLister.ReplicaSets(pod.Namespace).Get(controllerRef.Name)
|
||||
rs, err := dc.rsLister.ReplicaSets(namespace).Get(controllerRef.Name)
|
||||
if err != nil {
|
||||
// The only possible error is NotFound, which is ok here.
|
||||
return nil, nil
|
||||
@@ -246,8 +245,10 @@ func (dc *DisruptionController) getPodDeployment(pod *v1.Pod) (*controllerAndSca
|
||||
if controllerRef == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if controllerRef.Kind != controllerKindDep.Kind {
|
||||
return nil, nil
|
||||
|
||||
ok, err = verifyGroupKind(controllerRef, controllerKindDep.Kind, []string{"apps", "extensions"})
|
||||
if !ok || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deployment, err := dc.dLister.Deployments(rs.Namespace).Get(controllerRef.Name)
|
||||
if err != nil {
|
||||
@@ -260,15 +261,12 @@ func (dc *DisruptionController) getPodDeployment(pod *v1.Pod) (*controllerAndSca
|
||||
return &controllerAndScale{deployment.UID, *(deployment.Spec.Replicas)}, nil
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) getPodReplicationController(pod *v1.Pod) (*controllerAndScale, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
return nil, nil
|
||||
func (dc *DisruptionController) getPodReplicationController(controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error) {
|
||||
ok, err := verifyGroupKind(controllerRef, controllerKindRC.Kind, []string{""})
|
||||
if !ok || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if controllerRef.Kind != controllerKindRC.Kind {
|
||||
return nil, nil
|
||||
}
|
||||
rc, err := dc.rcLister.ReplicationControllers(pod.Namespace).Get(controllerRef.Name)
|
||||
rc, err := dc.rcLister.ReplicationControllers(namespace).Get(controllerRef.Name)
|
||||
if err != nil {
|
||||
// The only possible error is NotFound, which is ok here.
|
||||
return nil, nil
|
||||
@@ -279,6 +277,55 @@ func (dc *DisruptionController) getPodReplicationController(pod *v1.Pod) (*contr
|
||||
return &controllerAndScale{rc.UID, *(rc.Spec.Replicas)}, nil
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) getScaleController(controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error) {
|
||||
gv, err := schema.ParseGroupVersion(controllerRef.APIVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gk := schema.GroupKind{
|
||||
Group: gv.Group,
|
||||
Kind: controllerRef.Kind,
|
||||
}
|
||||
|
||||
mapping, err := dc.mapper.RESTMapping(gk, gv.Version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
gr := mapping.Resource.GroupResource()
|
||||
|
||||
scale, err := dc.scaleNamespacer.Scales(namespace).Get(gr, controllerRef.Name)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if scale.UID != controllerRef.UID {
|
||||
return nil, nil
|
||||
}
|
||||
return &controllerAndScale{scale.UID, scale.Spec.Replicas}, nil
|
||||
}
|
||||
|
||||
func verifyGroupKind(controllerRef *metav1.OwnerReference, expectedKind string, expectedGroups []string) (bool, error) {
|
||||
gv, err := schema.ParseGroupVersion(controllerRef.APIVersion)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if controllerRef.Kind != expectedKind {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, group := range expectedGroups {
|
||||
if group == gv.Group {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer dc.queue.ShutDown()
|
||||
@@ -583,10 +630,23 @@ func (dc *DisruptionController) getExpectedScale(pdb *policy.PodDisruptionBudget
|
||||
// 1. Find the controller for each pod. If any pod has 0 controllers,
|
||||
// that's an error. With ControllerRef, a pod can only have 1 controller.
|
||||
for _, pod := range pods {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
err = fmt.Errorf("found no controller ref for pod %q", pod.Name)
|
||||
dc.recorder.Event(pdb, v1.EventTypeWarning, "NoControllerRef", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// If we already know the scale of the controller there is no need to do anything.
|
||||
if _, found := controllerScale[controllerRef.UID]; found {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check all the supported controllers to find the desired scale.
|
||||
foundController := false
|
||||
for _, finder := range dc.finders() {
|
||||
var controllerNScale *controllerAndScale
|
||||
controllerNScale, err = finder(pod)
|
||||
controllerNScale, err = finder(controllerRef, pod.Namespace)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user