mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
kubelet: Preserve reason/message when phase changes
The Kubelet always clears reason and message in generateAPIPodStatus even when the phase is unchanged. It is reasonable that we preserve the previous values when the phase does not change, and clear it when the phase does change. When a pod is evicted, this ensurse that the eviction message and reason are propagated even in the face of subsequent updates. It also preserves the message and reason if components beyond the Kubelet choose to set that value. To preserve the value we need to know the old phase, which requires a change to convertStatusToAPIStatus so that both methods have access to it.
This commit is contained in:
parent
33aba7ee02
commit
9efd40d72a
@ -1378,24 +1378,40 @@ func getPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase {
|
||||
func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus {
|
||||
klog.V(3).InfoS("Generating pod status", "pod", klog.KObj(pod))
|
||||
|
||||
s := kl.convertStatusToAPIStatus(pod, podStatus)
|
||||
// use the previous pod status, or the api status, as the basis for this pod
|
||||
oldPodStatus, found := kl.statusManager.GetPodStatus(pod.UID)
|
||||
if !found {
|
||||
oldPodStatus = pod.Status
|
||||
}
|
||||
s := kl.convertStatusToAPIStatus(pod, podStatus, oldPodStatus)
|
||||
|
||||
// check if an internal module has requested the pod is evicted.
|
||||
// calculate the next phase and preserve reason
|
||||
allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
|
||||
s.Phase = getPhase(&pod.Spec, allStatus)
|
||||
klog.V(4).InfoS("Got phase for pod", "pod", klog.KObj(pod), "oldPhase", oldPodStatus.Phase, "phase", s.Phase)
|
||||
if s.Phase == oldPodStatus.Phase {
|
||||
// preserve the reason and message which is associated with the phase
|
||||
s.Reason = oldPodStatus.Reason
|
||||
s.Message = oldPodStatus.Message
|
||||
if len(s.Reason) == 0 {
|
||||
s.Reason = pod.Status.Reason
|
||||
}
|
||||
if len(s.Message) == 0 {
|
||||
s.Message = pod.Status.Message
|
||||
}
|
||||
}
|
||||
|
||||
// check if an internal module has requested the pod is evicted and override the reason and message
|
||||
for _, podSyncHandler := range kl.PodSyncHandlers {
|
||||
if result := podSyncHandler.ShouldEvict(pod); result.Evict {
|
||||
s.Phase = v1.PodFailed
|
||||
s.Reason = result.Reason
|
||||
s.Message = result.Message
|
||||
return *s
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Assume info is ready to process
|
||||
spec := &pod.Spec
|
||||
allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
|
||||
s.Phase = getPhase(spec, allStatus)
|
||||
klog.V(4).InfoS("Got phase for pod", "pod", klog.KObj(pod), "phase", s.Phase)
|
||||
// Check for illegal phase transition
|
||||
// pods are not allowed to transition out of terminal phases
|
||||
if pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded {
|
||||
// API server shows terminal phase; transitions are not allowed
|
||||
if s.Phase != pod.Status.Phase {
|
||||
@ -1404,6 +1420,10 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
|
||||
s.Phase = pod.Status.Phase
|
||||
}
|
||||
}
|
||||
|
||||
spec := &pod.Spec
|
||||
|
||||
// ensure the probe managers have up to date status for containers
|
||||
kl.probeManager.UpdatePodStatus(pod.UID, s)
|
||||
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase))
|
||||
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.Conditions, s.ContainerStatuses, s.Phase))
|
||||
@ -1466,10 +1486,10 @@ func (kl *Kubelet) sortPodIPs(podIPs []string) []string {
|
||||
return ips
|
||||
}
|
||||
|
||||
// convertStatusToAPIStatus creates an api PodStatus for the given pod from
|
||||
// the given internal pod status. It is purely transformative and does not
|
||||
// alter the kubelet state at all.
|
||||
func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *v1.PodStatus {
|
||||
// convertStatusToAPIStatus initialize an api PodStatus for the given pod from
|
||||
// the given internal pod status and the previous state of the pod from the API.
|
||||
// It is purely transformative and does not alter the kubelet state at all.
|
||||
func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus, oldPodStatus v1.PodStatus) *v1.PodStatus {
|
||||
var apiPodStatus v1.PodStatus
|
||||
|
||||
// copy pod status IPs to avoid race conditions with PodStatus #102806
|
||||
@ -1490,11 +1510,6 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontaine
|
||||
// set status for Pods created on versions of kube older than 1.6
|
||||
apiPodStatus.QOSClass = v1qos.GetPodQOS(pod)
|
||||
|
||||
oldPodStatus, found := kl.statusManager.GetPodStatus(pod.UID)
|
||||
if !found {
|
||||
oldPodStatus = pod.Status
|
||||
}
|
||||
|
||||
apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses(
|
||||
pod, podStatus,
|
||||
oldPodStatus.ContainerStatuses,
|
||||
|
@ -31,11 +31,13 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/record"
|
||||
@ -51,6 +53,7 @@ import (
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cri/streaming/remotecommand"
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/volume/util/hostutil"
|
||||
"k8s.io/kubernetes/pkg/volume/util/subpath"
|
||||
@ -1806,10 +1809,13 @@ func TestMakeEnvironmentVariables(t *testing.T) {
|
||||
}
|
||||
|
||||
func waitingState(cName string) v1.ContainerStatus {
|
||||
return waitingStateWithReason(cName, "")
|
||||
}
|
||||
func waitingStateWithReason(cName, reason string) v1.ContainerStatus {
|
||||
return v1.ContainerStatus{
|
||||
Name: cName,
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{},
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: reason},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -1847,6 +1853,14 @@ func runningState(cName string) v1.ContainerStatus {
|
||||
},
|
||||
}
|
||||
}
|
||||
func runningStateWithStartedAt(cName string, startedAt time.Time) v1.ContainerStatus {
|
||||
return v1.ContainerStatus{
|
||||
Name: cName,
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{StartedAt: metav1.Time{Time: startedAt}},
|
||||
},
|
||||
}
|
||||
}
|
||||
func stoppedState(cName string) v1.ContainerStatus {
|
||||
return v1.ContainerStatus{
|
||||
Name: cName,
|
||||
@ -1891,6 +1905,14 @@ func waitingWithLastTerminationUnknown(cName string, restartCount int32) v1.Cont
|
||||
RestartCount: restartCount,
|
||||
}
|
||||
}
|
||||
func ready(status v1.ContainerStatus) v1.ContainerStatus {
|
||||
status.Ready = true
|
||||
return status
|
||||
}
|
||||
func withID(status v1.ContainerStatus, id string) v1.ContainerStatus {
|
||||
status.ContainerID = id
|
||||
return status
|
||||
}
|
||||
|
||||
func TestPodPhaseWithRestartAlways(t *testing.T) {
|
||||
desiredState := v1.PodSpec{
|
||||
@ -2506,6 +2528,303 @@ func TestConvertToAPIContainerStatuses(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_generateAPIPodStatus(t *testing.T) {
|
||||
desiredState := v1.PodSpec{
|
||||
NodeName: "machine",
|
||||
Containers: []v1.Container{
|
||||
{Name: "containerA"},
|
||||
{Name: "containerB"},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
}
|
||||
now := metav1.Now()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pod *v1.Pod
|
||||
currentStatus *kubecontainer.PodStatus
|
||||
unreadyContainer []string
|
||||
previousStatus v1.PodStatus
|
||||
expected v1.PodStatus
|
||||
}{
|
||||
{
|
||||
name: "no current status, with previous statuses and deletion",
|
||||
pod: &v1.Pod{
|
||||
Spec: desiredState,
|
||||
Status: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
runningState("containerA"),
|
||||
runningState("containerB"),
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "my-pod", DeletionTimestamp: &now},
|
||||
},
|
||||
currentStatus: &kubecontainer.PodStatus{},
|
||||
previousStatus: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
runningState("containerA"),
|
||||
runningState("containerB"),
|
||||
},
|
||||
},
|
||||
expected: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
HostIP: "127.0.0.1",
|
||||
QOSClass: v1.PodQOSBestEffort,
|
||||
Conditions: []v1.PodCondition{
|
||||
{Type: v1.PodInitialized, Status: v1.ConditionTrue},
|
||||
{Type: v1.PodReady, Status: v1.ConditionTrue},
|
||||
{Type: v1.ContainersReady, Status: v1.ConditionTrue},
|
||||
{Type: v1.PodScheduled, Status: v1.ConditionTrue},
|
||||
},
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
ready(waitingWithLastTerminationUnknown("containerA", 0)),
|
||||
ready(waitingWithLastTerminationUnknown("containerB", 0)),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no current status, with previous statuses and no deletion",
|
||||
pod: &v1.Pod{
|
||||
Spec: desiredState,
|
||||
Status: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
runningState("containerA"),
|
||||
runningState("containerB"),
|
||||
},
|
||||
},
|
||||
},
|
||||
currentStatus: &kubecontainer.PodStatus{},
|
||||
previousStatus: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
runningState("containerA"),
|
||||
runningState("containerB"),
|
||||
},
|
||||
},
|
||||
expected: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
HostIP: "127.0.0.1",
|
||||
QOSClass: v1.PodQOSBestEffort,
|
||||
Conditions: []v1.PodCondition{
|
||||
{Type: v1.PodInitialized, Status: v1.ConditionTrue},
|
||||
{Type: v1.PodReady, Status: v1.ConditionTrue},
|
||||
{Type: v1.ContainersReady, Status: v1.ConditionTrue},
|
||||
{Type: v1.PodScheduled, Status: v1.ConditionTrue},
|
||||
},
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
ready(waitingWithLastTerminationUnknown("containerA", 1)),
|
||||
ready(waitingWithLastTerminationUnknown("containerB", 1)),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "terminal phase cannot be changed (apiserver previous is succeeded)",
|
||||
pod: &v1.Pod{
|
||||
Spec: desiredState,
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodSucceeded,
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
runningState("containerA"),
|
||||
runningState("containerB"),
|
||||
},
|
||||
},
|
||||
},
|
||||
currentStatus: &kubecontainer.PodStatus{},
|
||||
previousStatus: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
runningState("containerA"),
|
||||
runningState("containerB"),
|
||||
},
|
||||
},
|
||||
expected: v1.PodStatus{
|
||||
Phase: v1.PodSucceeded,
|
||||
HostIP: "127.0.0.1",
|
||||
QOSClass: v1.PodQOSBestEffort,
|
||||
Conditions: []v1.PodCondition{
|
||||
{Type: v1.PodInitialized, Status: v1.ConditionTrue, Reason: "PodCompleted"},
|
||||
{Type: v1.PodReady, Status: v1.ConditionFalse, Reason: "PodCompleted"},
|
||||
{Type: v1.ContainersReady, Status: v1.ConditionFalse, Reason: "PodCompleted"},
|
||||
{Type: v1.PodScheduled, Status: v1.ConditionTrue},
|
||||
},
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
ready(waitingWithLastTerminationUnknown("containerA", 1)),
|
||||
ready(waitingWithLastTerminationUnknown("containerB", 1)),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "running can revert to pending",
|
||||
pod: &v1.Pod{
|
||||
Spec: desiredState,
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
runningState("containerA"),
|
||||
runningState("containerB"),
|
||||
},
|
||||
},
|
||||
},
|
||||
currentStatus: &kubecontainer.PodStatus{},
|
||||
previousStatus: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
waitingState("containerA"),
|
||||
waitingState("containerB"),
|
||||
},
|
||||
},
|
||||
expected: v1.PodStatus{
|
||||
Phase: v1.PodPending,
|
||||
HostIP: "127.0.0.1",
|
||||
QOSClass: v1.PodQOSBestEffort,
|
||||
Conditions: []v1.PodCondition{
|
||||
{Type: v1.PodInitialized, Status: v1.ConditionTrue},
|
||||
{Type: v1.PodReady, Status: v1.ConditionTrue},
|
||||
{Type: v1.ContainersReady, Status: v1.ConditionTrue},
|
||||
{Type: v1.PodScheduled, Status: v1.ConditionTrue},
|
||||
},
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
ready(waitingStateWithReason("containerA", "ContainerCreating")),
|
||||
ready(waitingStateWithReason("containerB", "ContainerCreating")),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "reason and message are preserved when phase doesn't change",
|
||||
pod: &v1.Pod{
|
||||
Spec: desiredState,
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
waitingState("containerA"),
|
||||
waitingState("containerB"),
|
||||
},
|
||||
},
|
||||
},
|
||||
currentStatus: &kubecontainer.PodStatus{
|
||||
ContainerStatuses: []*kubecontainer.Status{
|
||||
{
|
||||
ID: kubecontainer.ContainerID{ID: "foo"},
|
||||
Name: "containerB",
|
||||
StartedAt: time.Unix(1, 0).UTC(),
|
||||
State: kubecontainer.ContainerStateRunning,
|
||||
},
|
||||
},
|
||||
},
|
||||
previousStatus: v1.PodStatus{
|
||||
Phase: v1.PodPending,
|
||||
Reason: "Test",
|
||||
Message: "test",
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
waitingState("containerA"),
|
||||
runningState("containerB"),
|
||||
},
|
||||
},
|
||||
expected: v1.PodStatus{
|
||||
Phase: v1.PodPending,
|
||||
Reason: "Test",
|
||||
Message: "test",
|
||||
HostIP: "127.0.0.1",
|
||||
QOSClass: v1.PodQOSBestEffort,
|
||||
Conditions: []v1.PodCondition{
|
||||
{Type: v1.PodInitialized, Status: v1.ConditionTrue},
|
||||
{Type: v1.PodReady, Status: v1.ConditionTrue},
|
||||
{Type: v1.ContainersReady, Status: v1.ConditionTrue},
|
||||
{Type: v1.PodScheduled, Status: v1.ConditionTrue},
|
||||
},
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
ready(waitingStateWithReason("containerA", "ContainerCreating")),
|
||||
ready(withID(runningStateWithStartedAt("containerB", time.Unix(1, 0).UTC()), "://foo")),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "reason and message are cleared when phase changes",
|
||||
pod: &v1.Pod{
|
||||
Spec: desiredState,
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodPending,
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
waitingState("containerA"),
|
||||
waitingState("containerB"),
|
||||
},
|
||||
},
|
||||
},
|
||||
currentStatus: &kubecontainer.PodStatus{
|
||||
ContainerStatuses: []*kubecontainer.Status{
|
||||
{
|
||||
ID: kubecontainer.ContainerID{ID: "c1"},
|
||||
Name: "containerA",
|
||||
StartedAt: time.Unix(1, 0).UTC(),
|
||||
State: kubecontainer.ContainerStateRunning,
|
||||
},
|
||||
{
|
||||
ID: kubecontainer.ContainerID{ID: "c2"},
|
||||
Name: "containerB",
|
||||
StartedAt: time.Unix(2, 0).UTC(),
|
||||
State: kubecontainer.ContainerStateRunning,
|
||||
},
|
||||
},
|
||||
},
|
||||
previousStatus: v1.PodStatus{
|
||||
Phase: v1.PodPending,
|
||||
Reason: "Test",
|
||||
Message: "test",
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
runningState("containerA"),
|
||||
runningState("containerB"),
|
||||
},
|
||||
},
|
||||
expected: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
HostIP: "127.0.0.1",
|
||||
QOSClass: v1.PodQOSBestEffort,
|
||||
Conditions: []v1.PodCondition{
|
||||
{Type: v1.PodInitialized, Status: v1.ConditionTrue},
|
||||
{Type: v1.PodReady, Status: v1.ConditionTrue},
|
||||
{Type: v1.ContainersReady, Status: v1.ConditionTrue},
|
||||
{Type: v1.PodScheduled, Status: v1.ConditionTrue},
|
||||
},
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
ready(withID(runningStateWithStartedAt("containerA", time.Unix(1, 0).UTC()), "://c1")),
|
||||
ready(withID(runningStateWithStartedAt("containerB", time.Unix(2, 0).UTC()), "://c2")),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kl := testKubelet.kubelet
|
||||
kl.statusManager.SetPodStatus(test.pod, test.previousStatus)
|
||||
for _, name := range test.unreadyContainer {
|
||||
kl.readinessManager.Set(kubecontainer.BuildContainerID("", findContainerStatusByName(test.expected, name).ContainerID), results.Failure, test.pod)
|
||||
}
|
||||
actual := kl.generateAPIPodStatus(test.pod, test.currentStatus)
|
||||
if !apiequality.Semantic.DeepEqual(test.expected, actual) {
|
||||
t.Fatalf("Unexpected status: %s", diff.ObjectReflectDiff(actual, test.expected))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func findContainerStatusByName(status v1.PodStatus, name string) *v1.ContainerStatus {
|
||||
for i, c := range status.InitContainerStatuses {
|
||||
if c.Name == name {
|
||||
return &status.InitContainerStatuses[i]
|
||||
}
|
||||
}
|
||||
for i, c := range status.ContainerStatuses {
|
||||
if c.Name == name {
|
||||
return &status.ContainerStatuses[i]
|
||||
}
|
||||
}
|
||||
for i, c := range status.EphemeralContainerStatuses {
|
||||
if c.Name == name {
|
||||
return &status.EphemeralContainerStatuses[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestGetExec(t *testing.T) {
|
||||
const (
|
||||
podName = "podFoo"
|
||||
|
Loading…
Reference in New Issue
Block a user