mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 20:53:33 +00:00
Do not revert the pod condition if there might be running containers, skip condition update instead.
This commit is contained in:
parent
52cd6755eb
commit
4e732e20d0
@ -57,6 +57,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/util"
|
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||||
|
utilpod "k8s.io/kubernetes/pkg/util/pod"
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/hostutil"
|
"k8s.io/kubernetes/pkg/volume/util/hostutil"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/subpath"
|
"k8s.io/kubernetes/pkg/volume/util/subpath"
|
||||||
@ -1518,11 +1519,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
|
|||||||
// on the container statuses as they are added based on one-time events.
|
// on the container statuses as they are added based on one-time events.
|
||||||
cType := v1.AlphaNoCompatGuaranteeDisruptionTarget
|
cType := v1.AlphaNoCompatGuaranteeDisruptionTarget
|
||||||
if _, condition := podutil.GetPodConditionFromList(oldPodStatus.Conditions, cType); condition != nil {
|
if _, condition := podutil.GetPodConditionFromList(oldPodStatus.Conditions, cType); condition != nil {
|
||||||
if i, _ := podutil.GetPodConditionFromList(s.Conditions, cType); i >= 0 {
|
s.Conditions = utilpod.ReplaceOrAppendPodCondition(s.Conditions, condition)
|
||||||
s.Conditions[i] = *condition
|
|
||||||
} else {
|
|
||||||
s.Conditions = append(s.Conditions, *condition)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -888,15 +888,24 @@ func mergePodStatus(oldPodStatus, newPodStatus v1.PodStatus, couldHaveRunningCon
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
transitioningToTerminalPhase := !podutil.IsPodPhaseTerminal(oldPodStatus.Phase) && podutil.IsPodPhaseTerminal(newPodStatus.Phase)
|
||||||
|
|
||||||
for _, c := range newPodStatus.Conditions {
|
for _, c := range newPodStatus.Conditions {
|
||||||
if kubetypes.PodConditionByKubelet(c.Type) {
|
if kubetypes.PodConditionByKubelet(c.Type) {
|
||||||
podConditions = append(podConditions, c)
|
podConditions = append(podConditions, c)
|
||||||
} else if kubetypes.PodConditionSharedByKubelet(c.Type) {
|
} else if kubetypes.PodConditionSharedByKubelet(c.Type) {
|
||||||
// for shared conditions we update or append in podConditions
|
if c.Type == v1.AlphaNoCompatGuaranteeDisruptionTarget {
|
||||||
if i, _ := podutil.GetPodConditionFromList(podConditions, c.Type); i >= 0 {
|
// update the pod disruption condition only if transitioning to terminal phase. In particular, check if
|
||||||
podConditions[i] = c
|
// there might still be running containers to avoid sending an unnecessary PATCH request if the
|
||||||
} else {
|
// actual transition is delayed (see below)
|
||||||
podConditions = append(podConditions, c)
|
if transitioningToTerminalPhase && !couldHaveRunningContainers {
|
||||||
|
// update the LastTransitionTime
|
||||||
|
updateLastTransitionTime(&newPodStatus, &oldPodStatus, c.Type)
|
||||||
|
if _, c := podutil.GetPodConditionFromList(newPodStatus.Conditions, c.Type); c != nil {
|
||||||
|
// for shared conditions we update or append in podConditions
|
||||||
|
podConditions = statusutil.ReplaceOrAppendPodCondition(podConditions, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -911,21 +920,11 @@ func mergePodStatus(oldPodStatus, newPodStatus v1.PodStatus, couldHaveRunningCon
|
|||||||
// the Kubelet exclusively owns must be released prior to a pod being reported terminal,
|
// the Kubelet exclusively owns must be released prior to a pod being reported terminal,
|
||||||
// while resources that have participanting components above the API use the pod's
|
// while resources that have participanting components above the API use the pod's
|
||||||
// transition to a terminal phase (or full deletion) to release those resources.
|
// transition to a terminal phase (or full deletion) to release those resources.
|
||||||
if !podutil.IsPodPhaseTerminal(oldPodStatus.Phase) && podutil.IsPodPhaseTerminal(newPodStatus.Phase) {
|
if transitioningToTerminalPhase {
|
||||||
if couldHaveRunningContainers {
|
if couldHaveRunningContainers {
|
||||||
newPodStatus.Phase = oldPodStatus.Phase
|
newPodStatus.Phase = oldPodStatus.Phase
|
||||||
newPodStatus.Reason = oldPodStatus.Reason
|
newPodStatus.Reason = oldPodStatus.Reason
|
||||||
newPodStatus.Message = oldPodStatus.Message
|
newPodStatus.Message = oldPodStatus.Message
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) {
|
|
||||||
// revert setting of the pod disruption condition until the pod is terminal in order to do not issue
|
|
||||||
// an unnecessary PATCH request
|
|
||||||
revertPodCondition(&oldPodStatus, &newPodStatus, v1.AlphaNoCompatGuaranteeDisruptionTarget)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) {
|
|
||||||
// update the LastTransitionTime when transitioning into the failed state
|
|
||||||
updateLastTransitionTime(&newPodStatus, &oldPodStatus, v1.AlphaNoCompatGuaranteeDisruptionTarget)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -946,18 +945,6 @@ func mergePodStatus(oldPodStatus, newPodStatus v1.PodStatus, couldHaveRunningCon
|
|||||||
return newPodStatus
|
return newPodStatus
|
||||||
}
|
}
|
||||||
|
|
||||||
func revertPodCondition(oldPodStatus, newPodStatus *v1.PodStatus, cType v1.PodConditionType) {
|
|
||||||
if newIndex, newCondition := podutil.GetPodConditionFromList(newPodStatus.Conditions, cType); newCondition != nil {
|
|
||||||
if _, oldCondition := podutil.GetPodConditionFromList(oldPodStatus.Conditions, cType); oldCondition != nil {
|
|
||||||
// revert the new condition to what was before
|
|
||||||
newPodStatus.Conditions[newIndex] = *oldCondition
|
|
||||||
} else {
|
|
||||||
// delete the new condition as it wasn't there before
|
|
||||||
newPodStatus.Conditions = append(newPodStatus.Conditions[:newIndex], newPodStatus.Conditions[newIndex+1:]...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NeedToReconcilePodReadiness returns if the pod "Ready" condition need to be reconcile
|
// NeedToReconcilePodReadiness returns if the pod "Ready" condition need to be reconcile
|
||||||
func NeedToReconcilePodReadiness(pod *v1.Pod) bool {
|
func NeedToReconcilePodReadiness(pod *v1.Pod) bool {
|
||||||
if len(pod.Spec.ReadinessGates) == 0 {
|
if len(pod.Spec.ReadinessGates) == 0 {
|
||||||
|
@ -1569,6 +1569,53 @@ func TestMergePodStatus(t *testing.T) {
|
|||||||
})
|
})
|
||||||
return input
|
return input
|
||||||
},
|
},
|
||||||
|
func(input v1.PodStatus) v1.PodStatus {
|
||||||
|
input.Phase = v1.PodFailed
|
||||||
|
input.Conditions = append(input.Conditions, v1.PodCondition{
|
||||||
|
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
|
||||||
|
Status: v1.ConditionTrue,
|
||||||
|
Reason: "TerminationByKubelet",
|
||||||
|
})
|
||||||
|
return input
|
||||||
|
},
|
||||||
|
v1.PodStatus{
|
||||||
|
Phase: v1.PodFailed,
|
||||||
|
Conditions: []v1.PodCondition{
|
||||||
|
{
|
||||||
|
Type: v1.PodReady,
|
||||||
|
Status: v1.ConditionFalse,
|
||||||
|
Reason: "PodFailed",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: v1.ContainersReady,
|
||||||
|
Status: v1.ConditionFalse,
|
||||||
|
Reason: "PodFailed",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: v1.PodScheduled,
|
||||||
|
Status: v1.ConditionTrue,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
|
||||||
|
Status: v1.ConditionTrue,
|
||||||
|
Reason: "TerminationByKubelet",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Message: "Message",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"don't override DisruptionTarget condition when remaining in running phase; PodDisruptionConditions enabled",
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
func(input v1.PodStatus) v1.PodStatus {
|
||||||
|
input.Conditions = append(input.Conditions, v1.PodCondition{
|
||||||
|
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
|
||||||
|
Status: v1.ConditionTrue,
|
||||||
|
Reason: "EvictedByEvictionAPI",
|
||||||
|
})
|
||||||
|
return input
|
||||||
|
},
|
||||||
func(input v1.PodStatus) v1.PodStatus {
|
func(input v1.PodStatus) v1.PodStatus {
|
||||||
input.Conditions = append(input.Conditions, v1.PodCondition{
|
input.Conditions = append(input.Conditions, v1.PodCondition{
|
||||||
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
|
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
|
||||||
@ -1580,6 +1627,11 @@ func TestMergePodStatus(t *testing.T) {
|
|||||||
v1.PodStatus{
|
v1.PodStatus{
|
||||||
Phase: v1.PodRunning,
|
Phase: v1.PodRunning,
|
||||||
Conditions: []v1.PodCondition{
|
Conditions: []v1.PodCondition{
|
||||||
|
{
|
||||||
|
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
|
||||||
|
Status: v1.ConditionTrue,
|
||||||
|
Reason: "EvictedByEvictionAPI",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Type: v1.PodReady,
|
Type: v1.PodReady,
|
||||||
Status: v1.ConditionTrue,
|
Status: v1.ConditionTrue,
|
||||||
@ -1588,10 +1640,46 @@ func TestMergePodStatus(t *testing.T) {
|
|||||||
Type: v1.PodScheduled,
|
Type: v1.PodScheduled,
|
||||||
Status: v1.ConditionTrue,
|
Status: v1.ConditionTrue,
|
||||||
},
|
},
|
||||||
|
},
|
||||||
|
Message: "Message",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"don't override DisruptionTarget condition when transitioning to failed phase but there might still be running containers; PodDisruptionConditions enabled",
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
func(input v1.PodStatus) v1.PodStatus {
|
||||||
|
input.Conditions = append(input.Conditions, v1.PodCondition{
|
||||||
|
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
|
||||||
|
Status: v1.ConditionTrue,
|
||||||
|
Reason: "EvictedByEvictionAPI",
|
||||||
|
})
|
||||||
|
return input
|
||||||
|
},
|
||||||
|
func(input v1.PodStatus) v1.PodStatus {
|
||||||
|
input.Phase = v1.PodFailed
|
||||||
|
input.Conditions = append(input.Conditions, v1.PodCondition{
|
||||||
|
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
|
||||||
|
Status: v1.ConditionTrue,
|
||||||
|
Reason: "TerminationByKubelet",
|
||||||
|
})
|
||||||
|
return input
|
||||||
|
},
|
||||||
|
v1.PodStatus{
|
||||||
|
Phase: v1.PodRunning,
|
||||||
|
Conditions: []v1.PodCondition{
|
||||||
{
|
{
|
||||||
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
|
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
|
||||||
Status: v1.ConditionTrue,
|
Status: v1.ConditionTrue,
|
||||||
Reason: "TerminationByKubelet",
|
Reason: "EvictedByEvictionAPI",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: v1.PodReady,
|
||||||
|
Status: v1.ConditionTrue,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: v1.PodScheduled,
|
||||||
|
Status: v1.ConditionTrue,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Message: "Message",
|
Message: "Message",
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PatchPodStatus patches pod status. It returns true and avoids an update if the patch contains no changes.
|
// PatchPodStatus patches pod status. It returns true and avoids an update if the patch contains no changes.
|
||||||
@ -68,3 +69,13 @@ func preparePatchBytesForPodStatus(namespace, name string, uid types.UID, oldPod
|
|||||||
}
|
}
|
||||||
return patchBytes, bytes.Equal(patchBytes, []byte(fmt.Sprintf(`{"metadata":{"uid":%q}}`, uid))), nil
|
return patchBytes, bytes.Equal(patchBytes, []byte(fmt.Sprintf(`{"metadata":{"uid":%q}}`, uid))), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReplaceOrAppendPodCondition replaces the first pod condition with equal type or appends if there is none
|
||||||
|
func ReplaceOrAppendPodCondition(conditions []v1.PodCondition, condition *v1.PodCondition) []v1.PodCondition {
|
||||||
|
if i, _ := podutil.GetPodConditionFromList(conditions, condition.Type); i >= 0 {
|
||||||
|
conditions[i] = *condition
|
||||||
|
} else {
|
||||||
|
conditions = append(conditions, *condition)
|
||||||
|
}
|
||||||
|
return conditions
|
||||||
|
}
|
||||||
|
@ -23,6 +23,7 @@ import (
|
|||||||
|
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
@ -128,3 +129,55 @@ func getPodStatus() v1.PodStatus {
|
|||||||
Message: "Message",
|
Message: "Message",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReplaceOrAppendPodCondition(t *testing.T) {
|
||||||
|
cType := v1.PodConditionType("ExampleType")
|
||||||
|
testCases := []struct {
|
||||||
|
description string
|
||||||
|
conditions []v1.PodCondition
|
||||||
|
condition v1.PodCondition
|
||||||
|
wantConditions []v1.PodCondition
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
description: "append",
|
||||||
|
conditions: []v1.PodCondition{},
|
||||||
|
condition: v1.PodCondition{
|
||||||
|
Type: cType,
|
||||||
|
Status: v1.ConditionTrue,
|
||||||
|
},
|
||||||
|
wantConditions: []v1.PodCondition{
|
||||||
|
{
|
||||||
|
Type: cType,
|
||||||
|
Status: v1.ConditionTrue,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "replace",
|
||||||
|
conditions: []v1.PodCondition{
|
||||||
|
{
|
||||||
|
Type: cType,
|
||||||
|
Status: v1.ConditionTrue,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
condition: v1.PodCondition{
|
||||||
|
Type: cType,
|
||||||
|
Status: v1.ConditionFalse,
|
||||||
|
},
|
||||||
|
wantConditions: []v1.PodCondition{
|
||||||
|
{
|
||||||
|
Type: cType,
|
||||||
|
Status: v1.ConditionFalse,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
|
gotConditions := ReplaceOrAppendPodCondition(tc.conditions, &tc.condition)
|
||||||
|
if diff := cmp.Diff(tc.wantConditions, gotConditions); diff != "" {
|
||||||
|
t.Errorf("Unexpected conditions: %s", diff)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user