mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #103785 from smarterclayton/preserve_reason
Ensure that Reason and Message are preserved on pod status
This commit is contained in:
commit
9f47110aa2
@ -1378,24 +1378,40 @@ func getPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase {
|
|||||||
func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus {
|
func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus {
|
||||||
klog.V(3).InfoS("Generating pod status", "pod", klog.KObj(pod))
|
klog.V(3).InfoS("Generating pod status", "pod", klog.KObj(pod))
|
||||||
|
|
||||||
s := kl.convertStatusToAPIStatus(pod, podStatus)
|
// use the previous pod status, or the api status, as the basis for this pod
|
||||||
|
oldPodStatus, found := kl.statusManager.GetPodStatus(pod.UID)
|
||||||
|
if !found {
|
||||||
|
oldPodStatus = pod.Status
|
||||||
|
}
|
||||||
|
s := kl.convertStatusToAPIStatus(pod, podStatus, oldPodStatus)
|
||||||
|
|
||||||
// check if an internal module has requested the pod is evicted.
|
// calculate the next phase and preserve reason
|
||||||
|
allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
|
||||||
|
s.Phase = getPhase(&pod.Spec, allStatus)
|
||||||
|
klog.V(4).InfoS("Got phase for pod", "pod", klog.KObj(pod), "oldPhase", oldPodStatus.Phase, "phase", s.Phase)
|
||||||
|
if s.Phase == oldPodStatus.Phase {
|
||||||
|
// preserve the reason and message which is associated with the phase
|
||||||
|
s.Reason = oldPodStatus.Reason
|
||||||
|
s.Message = oldPodStatus.Message
|
||||||
|
if len(s.Reason) == 0 {
|
||||||
|
s.Reason = pod.Status.Reason
|
||||||
|
}
|
||||||
|
if len(s.Message) == 0 {
|
||||||
|
s.Message = pod.Status.Message
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if an internal module has requested the pod is evicted and override the reason and message
|
||||||
for _, podSyncHandler := range kl.PodSyncHandlers {
|
for _, podSyncHandler := range kl.PodSyncHandlers {
|
||||||
if result := podSyncHandler.ShouldEvict(pod); result.Evict {
|
if result := podSyncHandler.ShouldEvict(pod); result.Evict {
|
||||||
s.Phase = v1.PodFailed
|
s.Phase = v1.PodFailed
|
||||||
s.Reason = result.Reason
|
s.Reason = result.Reason
|
||||||
s.Message = result.Message
|
s.Message = result.Message
|
||||||
return *s
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assume info is ready to process
|
// pods are not allowed to transition out of terminal phases
|
||||||
spec := &pod.Spec
|
|
||||||
allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
|
|
||||||
s.Phase = getPhase(spec, allStatus)
|
|
||||||
klog.V(4).InfoS("Got phase for pod", "pod", klog.KObj(pod), "phase", s.Phase)
|
|
||||||
// Check for illegal phase transition
|
|
||||||
if pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded {
|
if pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded {
|
||||||
// API server shows terminal phase; transitions are not allowed
|
// API server shows terminal phase; transitions are not allowed
|
||||||
if s.Phase != pod.Status.Phase {
|
if s.Phase != pod.Status.Phase {
|
||||||
@ -1404,18 +1420,27 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
|
|||||||
s.Phase = pod.Status.Phase
|
s.Phase = pod.Status.Phase
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ensure the probe managers have up to date status for containers
|
||||||
kl.probeManager.UpdatePodStatus(pod.UID, s)
|
kl.probeManager.UpdatePodStatus(pod.UID, s)
|
||||||
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase))
|
|
||||||
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.Conditions, s.ContainerStatuses, s.Phase))
|
// preserve all conditions not owned by the kubelet
|
||||||
s.Conditions = append(s.Conditions, status.GenerateContainersReadyCondition(spec, s.ContainerStatuses, s.Phase))
|
s.Conditions = make([]v1.PodCondition, 0, len(pod.Status.Conditions)+1)
|
||||||
// Status manager will take care of the LastTransitionTimestamp, either preserve
|
for _, c := range pod.Status.Conditions {
|
||||||
// the timestamp from apiserver, or set a new one. When kubelet sees the pod,
|
if !kubetypes.PodConditionByKubelet(c.Type) {
|
||||||
// `PodScheduled` condition must be true.
|
s.Conditions = append(s.Conditions, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// set all Kubelet-owned conditions
|
||||||
|
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(&pod.Spec, s.InitContainerStatuses, s.Phase))
|
||||||
|
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(&pod.Spec, s.Conditions, s.ContainerStatuses, s.Phase))
|
||||||
|
s.Conditions = append(s.Conditions, status.GenerateContainersReadyCondition(&pod.Spec, s.ContainerStatuses, s.Phase))
|
||||||
s.Conditions = append(s.Conditions, v1.PodCondition{
|
s.Conditions = append(s.Conditions, v1.PodCondition{
|
||||||
Type: v1.PodScheduled,
|
Type: v1.PodScheduled,
|
||||||
Status: v1.ConditionTrue,
|
Status: v1.ConditionTrue,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// set HostIP and initialize PodIP/PodIPs for host network pods
|
||||||
if kl.kubeClient != nil {
|
if kl.kubeClient != nil {
|
||||||
hostIPs, err := kl.getHostIPsAnyWay()
|
hostIPs, err := kl.getHostIPsAnyWay()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1466,10 +1491,10 @@ func (kl *Kubelet) sortPodIPs(podIPs []string) []string {
|
|||||||
return ips
|
return ips
|
||||||
}
|
}
|
||||||
|
|
||||||
// convertStatusToAPIStatus creates an api PodStatus for the given pod from
|
// convertStatusToAPIStatus initialize an api PodStatus for the given pod from
|
||||||
// the given internal pod status. It is purely transformative and does not
|
// the given internal pod status and the previous state of the pod from the API.
|
||||||
// alter the kubelet state at all.
|
// It is purely transformative and does not alter the kubelet state at all.
|
||||||
func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *v1.PodStatus {
|
func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus, oldPodStatus v1.PodStatus) *v1.PodStatus {
|
||||||
var apiPodStatus v1.PodStatus
|
var apiPodStatus v1.PodStatus
|
||||||
|
|
||||||
// copy pod status IPs to avoid race conditions with PodStatus #102806
|
// copy pod status IPs to avoid race conditions with PodStatus #102806
|
||||||
@ -1490,11 +1515,6 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontaine
|
|||||||
// set status for Pods created on versions of kube older than 1.6
|
// set status for Pods created on versions of kube older than 1.6
|
||||||
apiPodStatus.QOSClass = v1qos.GetPodQOS(pod)
|
apiPodStatus.QOSClass = v1qos.GetPodQOS(pod)
|
||||||
|
|
||||||
oldPodStatus, found := kl.statusManager.GetPodStatus(pod.UID)
|
|
||||||
if !found {
|
|
||||||
oldPodStatus = pod.Status
|
|
||||||
}
|
|
||||||
|
|
||||||
apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses(
|
apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses(
|
||||||
pod, podStatus,
|
pod, podStatus,
|
||||||
oldPodStatus.ContainerStatuses,
|
oldPodStatus.ContainerStatuses,
|
||||||
@ -1526,13 +1546,6 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontaine
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Preserves conditions not controlled by kubelet
|
|
||||||
for _, c := range pod.Status.Conditions {
|
|
||||||
if !kubetypes.PodConditionByKubelet(c.Type) {
|
|
||||||
apiPodStatus.Conditions = append(apiPodStatus.Conditions, c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &apiPodStatus
|
return &apiPodStatus
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1767,7 +1780,7 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon
|
|||||||
if isInitContainer {
|
if isInitContainer {
|
||||||
return kubetypes.SortStatusesOfInitContainers(pod, statuses)
|
return kubetypes.SortStatusesOfInitContainers(pod, statuses)
|
||||||
}
|
}
|
||||||
var containerStatuses []v1.ContainerStatus
|
containerStatuses := make([]v1.ContainerStatus, 0, len(statuses))
|
||||||
for _, status := range statuses {
|
for _, status := range statuses {
|
||||||
containerStatuses = append(containerStatuses, *status)
|
containerStatuses = append(containerStatuses, *status)
|
||||||
}
|
}
|
||||||
|
@ -31,11 +31,13 @@ import (
|
|||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"k8s.io/apimachinery/pkg/util/diff"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
core "k8s.io/client-go/testing"
|
core "k8s.io/client-go/testing"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
@ -51,6 +53,7 @@ import (
|
|||||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward"
|
"k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cri/streaming/remotecommand"
|
"k8s.io/kubernetes/pkg/kubelet/cri/streaming/remotecommand"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/hostutil"
|
"k8s.io/kubernetes/pkg/volume/util/hostutil"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/subpath"
|
"k8s.io/kubernetes/pkg/volume/util/subpath"
|
||||||
@ -1806,10 +1809,13 @@ func TestMakeEnvironmentVariables(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func waitingState(cName string) v1.ContainerStatus {
|
func waitingState(cName string) v1.ContainerStatus {
|
||||||
|
return waitingStateWithReason(cName, "")
|
||||||
|
}
|
||||||
|
func waitingStateWithReason(cName, reason string) v1.ContainerStatus {
|
||||||
return v1.ContainerStatus{
|
return v1.ContainerStatus{
|
||||||
Name: cName,
|
Name: cName,
|
||||||
State: v1.ContainerState{
|
State: v1.ContainerState{
|
||||||
Waiting: &v1.ContainerStateWaiting{},
|
Waiting: &v1.ContainerStateWaiting{Reason: reason},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1847,6 +1853,14 @@ func runningState(cName string) v1.ContainerStatus {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
func runningStateWithStartedAt(cName string, startedAt time.Time) v1.ContainerStatus {
|
||||||
|
return v1.ContainerStatus{
|
||||||
|
Name: cName,
|
||||||
|
State: v1.ContainerState{
|
||||||
|
Running: &v1.ContainerStateRunning{StartedAt: metav1.Time{Time: startedAt}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
func stoppedState(cName string) v1.ContainerStatus {
|
func stoppedState(cName string) v1.ContainerStatus {
|
||||||
return v1.ContainerStatus{
|
return v1.ContainerStatus{
|
||||||
Name: cName,
|
Name: cName,
|
||||||
@ -1891,6 +1905,14 @@ func waitingWithLastTerminationUnknown(cName string, restartCount int32) v1.Cont
|
|||||||
RestartCount: restartCount,
|
RestartCount: restartCount,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
func ready(status v1.ContainerStatus) v1.ContainerStatus {
|
||||||
|
status.Ready = true
|
||||||
|
return status
|
||||||
|
}
|
||||||
|
func withID(status v1.ContainerStatus, id string) v1.ContainerStatus {
|
||||||
|
status.ContainerID = id
|
||||||
|
return status
|
||||||
|
}
|
||||||
|
|
||||||
func TestPodPhaseWithRestartAlways(t *testing.T) {
|
func TestPodPhaseWithRestartAlways(t *testing.T) {
|
||||||
desiredState := v1.PodSpec{
|
desiredState := v1.PodSpec{
|
||||||
@ -2506,6 +2528,303 @@ func TestConvertToAPIContainerStatuses(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Test_generateAPIPodStatus(t *testing.T) {
|
||||||
|
desiredState := v1.PodSpec{
|
||||||
|
NodeName: "machine",
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{Name: "containerA"},
|
||||||
|
{Name: "containerB"},
|
||||||
|
},
|
||||||
|
RestartPolicy: v1.RestartPolicyAlways,
|
||||||
|
}
|
||||||
|
now := metav1.Now()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
pod *v1.Pod
|
||||||
|
currentStatus *kubecontainer.PodStatus
|
||||||
|
unreadyContainer []string
|
||||||
|
previousStatus v1.PodStatus
|
||||||
|
expected v1.PodStatus
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "no current status, with previous statuses and deletion",
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: desiredState,
|
||||||
|
Status: v1.PodStatus{
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
runningState("containerA"),
|
||||||
|
runningState("containerB"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "my-pod", DeletionTimestamp: &now},
|
||||||
|
},
|
||||||
|
currentStatus: &kubecontainer.PodStatus{},
|
||||||
|
previousStatus: v1.PodStatus{
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
runningState("containerA"),
|
||||||
|
runningState("containerB"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: v1.PodStatus{
|
||||||
|
Phase: v1.PodRunning,
|
||||||
|
HostIP: "127.0.0.1",
|
||||||
|
QOSClass: v1.PodQOSBestEffort,
|
||||||
|
Conditions: []v1.PodCondition{
|
||||||
|
{Type: v1.PodInitialized, Status: v1.ConditionTrue},
|
||||||
|
{Type: v1.PodReady, Status: v1.ConditionTrue},
|
||||||
|
{Type: v1.ContainersReady, Status: v1.ConditionTrue},
|
||||||
|
{Type: v1.PodScheduled, Status: v1.ConditionTrue},
|
||||||
|
},
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
ready(waitingWithLastTerminationUnknown("containerA", 0)),
|
||||||
|
ready(waitingWithLastTerminationUnknown("containerB", 0)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no current status, with previous statuses and no deletion",
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: desiredState,
|
||||||
|
Status: v1.PodStatus{
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
runningState("containerA"),
|
||||||
|
runningState("containerB"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
currentStatus: &kubecontainer.PodStatus{},
|
||||||
|
previousStatus: v1.PodStatus{
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
runningState("containerA"),
|
||||||
|
runningState("containerB"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: v1.PodStatus{
|
||||||
|
Phase: v1.PodRunning,
|
||||||
|
HostIP: "127.0.0.1",
|
||||||
|
QOSClass: v1.PodQOSBestEffort,
|
||||||
|
Conditions: []v1.PodCondition{
|
||||||
|
{Type: v1.PodInitialized, Status: v1.ConditionTrue},
|
||||||
|
{Type: v1.PodReady, Status: v1.ConditionTrue},
|
||||||
|
{Type: v1.ContainersReady, Status: v1.ConditionTrue},
|
||||||
|
{Type: v1.PodScheduled, Status: v1.ConditionTrue},
|
||||||
|
},
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
ready(waitingWithLastTerminationUnknown("containerA", 1)),
|
||||||
|
ready(waitingWithLastTerminationUnknown("containerB", 1)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "terminal phase cannot be changed (apiserver previous is succeeded)",
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: desiredState,
|
||||||
|
Status: v1.PodStatus{
|
||||||
|
Phase: v1.PodSucceeded,
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
runningState("containerA"),
|
||||||
|
runningState("containerB"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
currentStatus: &kubecontainer.PodStatus{},
|
||||||
|
previousStatus: v1.PodStatus{
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
runningState("containerA"),
|
||||||
|
runningState("containerB"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: v1.PodStatus{
|
||||||
|
Phase: v1.PodSucceeded,
|
||||||
|
HostIP: "127.0.0.1",
|
||||||
|
QOSClass: v1.PodQOSBestEffort,
|
||||||
|
Conditions: []v1.PodCondition{
|
||||||
|
{Type: v1.PodInitialized, Status: v1.ConditionTrue, Reason: "PodCompleted"},
|
||||||
|
{Type: v1.PodReady, Status: v1.ConditionFalse, Reason: "PodCompleted"},
|
||||||
|
{Type: v1.ContainersReady, Status: v1.ConditionFalse, Reason: "PodCompleted"},
|
||||||
|
{Type: v1.PodScheduled, Status: v1.ConditionTrue},
|
||||||
|
},
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
ready(waitingWithLastTerminationUnknown("containerA", 1)),
|
||||||
|
ready(waitingWithLastTerminationUnknown("containerB", 1)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "running can revert to pending",
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: desiredState,
|
||||||
|
Status: v1.PodStatus{
|
||||||
|
Phase: v1.PodRunning,
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
runningState("containerA"),
|
||||||
|
runningState("containerB"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
currentStatus: &kubecontainer.PodStatus{},
|
||||||
|
previousStatus: v1.PodStatus{
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
waitingState("containerA"),
|
||||||
|
waitingState("containerB"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: v1.PodStatus{
|
||||||
|
Phase: v1.PodPending,
|
||||||
|
HostIP: "127.0.0.1",
|
||||||
|
QOSClass: v1.PodQOSBestEffort,
|
||||||
|
Conditions: []v1.PodCondition{
|
||||||
|
{Type: v1.PodInitialized, Status: v1.ConditionTrue},
|
||||||
|
{Type: v1.PodReady, Status: v1.ConditionTrue},
|
||||||
|
{Type: v1.ContainersReady, Status: v1.ConditionTrue},
|
||||||
|
{Type: v1.PodScheduled, Status: v1.ConditionTrue},
|
||||||
|
},
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
ready(waitingStateWithReason("containerA", "ContainerCreating")),
|
||||||
|
ready(waitingStateWithReason("containerB", "ContainerCreating")),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "reason and message are preserved when phase doesn't change",
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: desiredState,
|
||||||
|
Status: v1.PodStatus{
|
||||||
|
Phase: v1.PodRunning,
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
waitingState("containerA"),
|
||||||
|
waitingState("containerB"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
currentStatus: &kubecontainer.PodStatus{
|
||||||
|
ContainerStatuses: []*kubecontainer.Status{
|
||||||
|
{
|
||||||
|
ID: kubecontainer.ContainerID{ID: "foo"},
|
||||||
|
Name: "containerB",
|
||||||
|
StartedAt: time.Unix(1, 0).UTC(),
|
||||||
|
State: kubecontainer.ContainerStateRunning,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
previousStatus: v1.PodStatus{
|
||||||
|
Phase: v1.PodPending,
|
||||||
|
Reason: "Test",
|
||||||
|
Message: "test",
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
waitingState("containerA"),
|
||||||
|
runningState("containerB"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: v1.PodStatus{
|
||||||
|
Phase: v1.PodPending,
|
||||||
|
Reason: "Test",
|
||||||
|
Message: "test",
|
||||||
|
HostIP: "127.0.0.1",
|
||||||
|
QOSClass: v1.PodQOSBestEffort,
|
||||||
|
Conditions: []v1.PodCondition{
|
||||||
|
{Type: v1.PodInitialized, Status: v1.ConditionTrue},
|
||||||
|
{Type: v1.PodReady, Status: v1.ConditionTrue},
|
||||||
|
{Type: v1.ContainersReady, Status: v1.ConditionTrue},
|
||||||
|
{Type: v1.PodScheduled, Status: v1.ConditionTrue},
|
||||||
|
},
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
ready(waitingStateWithReason("containerA", "ContainerCreating")),
|
||||||
|
ready(withID(runningStateWithStartedAt("containerB", time.Unix(1, 0).UTC()), "://foo")),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "reason and message are cleared when phase changes",
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: desiredState,
|
||||||
|
Status: v1.PodStatus{
|
||||||
|
Phase: v1.PodPending,
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
waitingState("containerA"),
|
||||||
|
waitingState("containerB"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
currentStatus: &kubecontainer.PodStatus{
|
||||||
|
ContainerStatuses: []*kubecontainer.Status{
|
||||||
|
{
|
||||||
|
ID: kubecontainer.ContainerID{ID: "c1"},
|
||||||
|
Name: "containerA",
|
||||||
|
StartedAt: time.Unix(1, 0).UTC(),
|
||||||
|
State: kubecontainer.ContainerStateRunning,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: kubecontainer.ContainerID{ID: "c2"},
|
||||||
|
Name: "containerB",
|
||||||
|
StartedAt: time.Unix(2, 0).UTC(),
|
||||||
|
State: kubecontainer.ContainerStateRunning,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
previousStatus: v1.PodStatus{
|
||||||
|
Phase: v1.PodPending,
|
||||||
|
Reason: "Test",
|
||||||
|
Message: "test",
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
runningState("containerA"),
|
||||||
|
runningState("containerB"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: v1.PodStatus{
|
||||||
|
Phase: v1.PodRunning,
|
||||||
|
HostIP: "127.0.0.1",
|
||||||
|
QOSClass: v1.PodQOSBestEffort,
|
||||||
|
Conditions: []v1.PodCondition{
|
||||||
|
{Type: v1.PodInitialized, Status: v1.ConditionTrue},
|
||||||
|
{Type: v1.PodReady, Status: v1.ConditionTrue},
|
||||||
|
{Type: v1.ContainersReady, Status: v1.ConditionTrue},
|
||||||
|
{Type: v1.PodScheduled, Status: v1.ConditionTrue},
|
||||||
|
},
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
ready(withID(runningStateWithStartedAt("containerA", time.Unix(1, 0).UTC()), "://c1")),
|
||||||
|
ready(withID(runningStateWithStartedAt("containerB", time.Unix(2, 0).UTC()), "://c2")),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||||
|
defer testKubelet.Cleanup()
|
||||||
|
kl := testKubelet.kubelet
|
||||||
|
kl.statusManager.SetPodStatus(test.pod, test.previousStatus)
|
||||||
|
for _, name := range test.unreadyContainer {
|
||||||
|
kl.readinessManager.Set(kubecontainer.BuildContainerID("", findContainerStatusByName(test.expected, name).ContainerID), results.Failure, test.pod)
|
||||||
|
}
|
||||||
|
actual := kl.generateAPIPodStatus(test.pod, test.currentStatus)
|
||||||
|
if !apiequality.Semantic.DeepEqual(test.expected, actual) {
|
||||||
|
t.Fatalf("Unexpected status: %s", diff.ObjectReflectDiff(actual, test.expected))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func findContainerStatusByName(status v1.PodStatus, name string) *v1.ContainerStatus {
|
||||||
|
for i, c := range status.InitContainerStatuses {
|
||||||
|
if c.Name == name {
|
||||||
|
return &status.InitContainerStatuses[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i, c := range status.ContainerStatuses {
|
||||||
|
if c.Name == name {
|
||||||
|
return &status.ContainerStatuses[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i, c := range status.EphemeralContainerStatuses {
|
||||||
|
if c.Name == name {
|
||||||
|
return &status.EphemeralContainerStatuses[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func TestGetExec(t *testing.T) {
|
func TestGetExec(t *testing.T) {
|
||||||
const (
|
const (
|
||||||
podName = "podFoo"
|
podName = "podFoo"
|
||||||
|
Loading…
Reference in New Issue
Block a user