Do not revert the pod condition if there might be running containers, skip condition update instead.

This commit is contained in:
Michal Wozniak 2022-11-07 11:57:56 +01:00
parent 52cd6755eb
commit 4e732e20d0
5 changed files with 170 additions and 34 deletions

View File

@ -57,6 +57,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/status"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util"
utilpod "k8s.io/kubernetes/pkg/util/pod"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/subpath"
@ -1518,11 +1519,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
// on the container statuses as they are added based on one-time events.
cType := v1.AlphaNoCompatGuaranteeDisruptionTarget
if _, condition := podutil.GetPodConditionFromList(oldPodStatus.Conditions, cType); condition != nil {
if i, _ := podutil.GetPodConditionFromList(s.Conditions, cType); i >= 0 {
s.Conditions[i] = *condition
} else {
s.Conditions = append(s.Conditions, *condition)
}
s.Conditions = utilpod.ReplaceOrAppendPodCondition(s.Conditions, condition)
}
}

View File

@ -888,15 +888,24 @@ func mergePodStatus(oldPodStatus, newPodStatus v1.PodStatus, couldHaveRunningCon
}
}
transitioningToTerminalPhase := !podutil.IsPodPhaseTerminal(oldPodStatus.Phase) && podutil.IsPodPhaseTerminal(newPodStatus.Phase)
for _, c := range newPodStatus.Conditions {
if kubetypes.PodConditionByKubelet(c.Type) {
podConditions = append(podConditions, c)
} else if kubetypes.PodConditionSharedByKubelet(c.Type) {
if c.Type == v1.AlphaNoCompatGuaranteeDisruptionTarget {
// update the pod disruption condition only if transitioning to terminal phase. In particular, check if
// there might still be running containers to avoid sending an unnecessary PATCH request if the
// actual transition is delayed (see below)
if transitioningToTerminalPhase && !couldHaveRunningContainers {
// update the LastTransitionTime
updateLastTransitionTime(&newPodStatus, &oldPodStatus, c.Type)
if _, c := podutil.GetPodConditionFromList(newPodStatus.Conditions, c.Type); c != nil {
// for shared conditions we update or append in podConditions
if i, _ := podutil.GetPodConditionFromList(podConditions, c.Type); i >= 0 {
podConditions[i] = c
} else {
podConditions = append(podConditions, c)
podConditions = statusutil.ReplaceOrAppendPodCondition(podConditions, c)
}
}
}
}
}
@ -911,21 +920,11 @@ func mergePodStatus(oldPodStatus, newPodStatus v1.PodStatus, couldHaveRunningCon
// the Kubelet exclusively owns must be released prior to a pod being reported terminal,
// while resources that have participanting components above the API use the pod's
// transition to a terminal phase (or full deletion) to release those resources.
if !podutil.IsPodPhaseTerminal(oldPodStatus.Phase) && podutil.IsPodPhaseTerminal(newPodStatus.Phase) {
if transitioningToTerminalPhase {
if couldHaveRunningContainers {
newPodStatus.Phase = oldPodStatus.Phase
newPodStatus.Reason = oldPodStatus.Reason
newPodStatus.Message = oldPodStatus.Message
if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) {
// revert setting of the pod disruption condition until the pod is terminal in order to do not issue
// an unnecessary PATCH request
revertPodCondition(&oldPodStatus, &newPodStatus, v1.AlphaNoCompatGuaranteeDisruptionTarget)
}
} else {
if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) {
// update the LastTransitionTime when transitioning into the failed state
updateLastTransitionTime(&newPodStatus, &oldPodStatus, v1.AlphaNoCompatGuaranteeDisruptionTarget)
}
}
}
@ -946,18 +945,6 @@ func mergePodStatus(oldPodStatus, newPodStatus v1.PodStatus, couldHaveRunningCon
return newPodStatus
}
func revertPodCondition(oldPodStatus, newPodStatus *v1.PodStatus, cType v1.PodConditionType) {
if newIndex, newCondition := podutil.GetPodConditionFromList(newPodStatus.Conditions, cType); newCondition != nil {
if _, oldCondition := podutil.GetPodConditionFromList(oldPodStatus.Conditions, cType); oldCondition != nil {
// revert the new condition to what was before
newPodStatus.Conditions[newIndex] = *oldCondition
} else {
// delete the new condition as it wasn't there before
newPodStatus.Conditions = append(newPodStatus.Conditions[:newIndex], newPodStatus.Conditions[newIndex+1:]...)
}
}
}
// NeedToReconcilePodReadiness returns if the pod "Ready" condition need to be reconcile
func NeedToReconcilePodReadiness(pod *v1.Pod) bool {
if len(pod.Spec.ReadinessGates) == 0 {

View File

@ -1569,6 +1569,53 @@ func TestMergePodStatus(t *testing.T) {
})
return input
},
func(input v1.PodStatus) v1.PodStatus {
input.Phase = v1.PodFailed
input.Conditions = append(input.Conditions, v1.PodCondition{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
Status: v1.ConditionTrue,
Reason: "TerminationByKubelet",
})
return input
},
v1.PodStatus{
Phase: v1.PodFailed,
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionFalse,
Reason: "PodFailed",
},
{
Type: v1.ContainersReady,
Status: v1.ConditionFalse,
Reason: "PodFailed",
},
{
Type: v1.PodScheduled,
Status: v1.ConditionTrue,
},
{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
Status: v1.ConditionTrue,
Reason: "TerminationByKubelet",
},
},
Message: "Message",
},
},
{
"don't override DisruptionTarget condition when remaining in running phase; PodDisruptionConditions enabled",
true,
false,
func(input v1.PodStatus) v1.PodStatus {
input.Conditions = append(input.Conditions, v1.PodCondition{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
Status: v1.ConditionTrue,
Reason: "EvictedByEvictionAPI",
})
return input
},
func(input v1.PodStatus) v1.PodStatus {
input.Conditions = append(input.Conditions, v1.PodCondition{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
@ -1580,6 +1627,11 @@ func TestMergePodStatus(t *testing.T) {
v1.PodStatus{
Phase: v1.PodRunning,
Conditions: []v1.PodCondition{
{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
Status: v1.ConditionTrue,
Reason: "EvictedByEvictionAPI",
},
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
@ -1588,10 +1640,46 @@ func TestMergePodStatus(t *testing.T) {
Type: v1.PodScheduled,
Status: v1.ConditionTrue,
},
},
Message: "Message",
},
},
{
"don't override DisruptionTarget condition when transitioning to failed phase but there might still be running containers; PodDisruptionConditions enabled",
true,
true,
func(input v1.PodStatus) v1.PodStatus {
input.Conditions = append(input.Conditions, v1.PodCondition{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
Status: v1.ConditionTrue,
Reason: "EvictedByEvictionAPI",
})
return input
},
func(input v1.PodStatus) v1.PodStatus {
input.Phase = v1.PodFailed
input.Conditions = append(input.Conditions, v1.PodCondition{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
Status: v1.ConditionTrue,
Reason: "TerminationByKubelet",
})
return input
},
v1.PodStatus{
Phase: v1.PodRunning,
Conditions: []v1.PodCondition{
{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
Status: v1.ConditionTrue,
Reason: "EvictedByEvictionAPI",
},
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
},
{
Type: v1.PodScheduled,
Status: v1.ConditionTrue,
},
},
Message: "Message",

View File

@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
)
// PatchPodStatus patches pod status. It returns true and avoids an update if the patch contains no changes.
@ -68,3 +69,13 @@ func preparePatchBytesForPodStatus(namespace, name string, uid types.UID, oldPod
}
return patchBytes, bytes.Equal(patchBytes, []byte(fmt.Sprintf(`{"metadata":{"uid":%q}}`, uid))), nil
}
// ReplaceOrAppendPodCondition replaces the first pod condition with equal type or appends if there is none
func ReplaceOrAppendPodCondition(conditions []v1.PodCondition, condition *v1.PodCondition) []v1.PodCondition {
if i, _ := podutil.GetPodConditionFromList(conditions, condition.Type); i >= 0 {
conditions[i] = *condition
} else {
conditions = append(conditions, *condition)
}
return conditions
}

View File

@ -23,6 +23,7 @@ import (
"reflect"
"github.com/google/go-cmp/cmp"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@ -128,3 +129,55 @@ func getPodStatus() v1.PodStatus {
Message: "Message",
}
}
func TestReplaceOrAppendPodCondition(t *testing.T) {
cType := v1.PodConditionType("ExampleType")
testCases := []struct {
description string
conditions []v1.PodCondition
condition v1.PodCondition
wantConditions []v1.PodCondition
}{
{
description: "append",
conditions: []v1.PodCondition{},
condition: v1.PodCondition{
Type: cType,
Status: v1.ConditionTrue,
},
wantConditions: []v1.PodCondition{
{
Type: cType,
Status: v1.ConditionTrue,
},
},
},
{
description: "replace",
conditions: []v1.PodCondition{
{
Type: cType,
Status: v1.ConditionTrue,
},
},
condition: v1.PodCondition{
Type: cType,
Status: v1.ConditionFalse,
},
wantConditions: []v1.PodCondition{
{
Type: cType,
Status: v1.ConditionFalse,
},
},
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
gotConditions := ReplaceOrAppendPodCondition(tc.conditions, &tc.condition)
if diff := cmp.Diff(tc.wantConditions, gotConditions); diff != "" {
t.Errorf("Unexpected conditions: %s", diff)
}
})
}
}