Use Patch instead of SSA for Pod Disruption condition

This commit is contained in:
Michal Wozniak
2023-10-10 12:44:44 +02:00
parent 145b84b0b1
commit 32fdb55192
8 changed files with 167 additions and 73 deletions

View File

@@ -34,7 +34,6 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
corev1apply "k8s.io/client-go/applyconfigurations/core/v1"
"k8s.io/client-go/discovery"
appsv1informers "k8s.io/client-go/informers/apps/v1"
coreinformers "k8s.io/client-go/informers/core/v1"
@@ -74,9 +73,6 @@ const (
// Once the timeout is reached, this controller attempts to set the status
// of the condition to False.
stalePodDisruptionTimeout = 2 * time.Minute
// field manager used to disable the pod failure condition
fieldManager = "DisruptionController"
)
type updater func(context.Context, *policy.PodDisruptionBudget) error
@@ -770,16 +766,15 @@ func (dc *DisruptionController) syncStalePodDisruption(ctx context.Context, key
return nil
}
podApply := corev1apply.Pod(pod.Name, pod.Namespace).
WithStatus(corev1apply.PodStatus()).
WithResourceVersion(pod.ResourceVersion)
podApply.Status.WithConditions(corev1apply.PodCondition().
WithType(v1.DisruptionTarget).
WithStatus(v1.ConditionFalse).
WithLastTransitionTime(metav1.Now()),
)
if _, err := dc.kubeClient.CoreV1().Pods(pod.Namespace).ApplyStatus(ctx, podApply, metav1.ApplyOptions{FieldManager: fieldManager, Force: true}); err != nil {
newPod := pod.DeepCopy()
updated := apipod.UpdatePodCondition(&newPod.Status, &v1.PodCondition{
Type: v1.DisruptionTarget,
Status: v1.ConditionFalse,
})
if !updated {
return nil
}
if _, err := dc.kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(ctx, newPod, metav1.UpdateOptions{}); err != nil {
return err
}
logger.V(2).Info("Reset stale DisruptionTarget condition to False", "pod", klog.KObj(pod))