From 101739a6bee61b0a09edfad7a27fbf1f497b466b Mon Sep 17 00:00:00 2001 From: Anastasis Andronidis Date: Tue, 9 Jun 2015 17:58:16 +0200 Subject: [PATCH] Added Reason in PodStatus --- api/swagger-spec/v1.json | 4 +++ api/swagger-spec/v1beta3.json | 4 +++ pkg/api/deep_copy_generated.go | 1 + pkg/api/types.go | 2 ++ pkg/api/v1/conversion_generated.go | 2 ++ pkg/api/v1/deep_copy_generated.go | 1 + pkg/api/v1/types.go | 2 ++ pkg/api/v1beta3/conversion_generated.go | 2 ++ pkg/api/v1beta3/deep_copy_generated.go | 1 + pkg/api/v1beta3/types.go | 2 ++ pkg/kubectl/resource_printer.go | 4 +++ pkg/kubectl/resource_printer_test.go | 26 +++++++++++++++---- pkg/kubelet/kubelet.go | 33 ++++++++++++++++--------- 13 files changed, 68 insertions(+), 16 deletions(-) diff --git a/api/swagger-spec/v1.json b/api/swagger-spec/v1.json index 35391f1bf7b..39081e2b52d 100644 --- a/api/swagger-spec/v1.json +++ b/api/swagger-spec/v1.json @@ -12663,6 +12663,10 @@ "type": "string", "description": "human readable message indicating details about why the pod is in this condition" }, + "reason": { + "type": "string", + "description": "(brief-CamelCase) reason indicating details about why the pod is in this condition" + }, "hostIP": { "type": "string", "description": "IP address of the host to which the pod is assigned; empty if not yet scheduled" diff --git a/api/swagger-spec/v1beta3.json b/api/swagger-spec/v1beta3.json index 42c02827882..5c43aef8c10 100644 --- a/api/swagger-spec/v1beta3.json +++ b/api/swagger-spec/v1beta3.json @@ -12665,6 +12665,10 @@ "type": "string", "description": "human readable message indicating details about why the pod is in this condition" }, + "reason": { + "type": "string", + "description": "(brief-CamelCase) reason indicating details about why the pod is in this condition" + }, "hostIP": { "type": "string", "description": "IP address of the host to which the pod is assigned; empty if not yet scheduled" diff --git a/pkg/api/deep_copy_generated.go b/pkg/api/deep_copy_generated.go index 1d45137a1de..26fba10dfd2 100644 --- a/pkg/api/deep_copy_generated.go +++ b/pkg/api/deep_copy_generated.go @@ -1407,6 +1407,7 @@ func deepCopy_api_PodStatus(in PodStatus, out *PodStatus, c *conversion.Cloner) out.Conditions = nil } out.Message = in.Message + out.Reason = in.Reason out.HostIP = in.HostIP out.PodIP = in.PodIP if in.StartTime != nil { diff --git a/pkg/api/types.go b/pkg/api/types.go index 81e4c897217..da9f5982c36 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -911,6 +911,8 @@ type PodStatus struct { Conditions []PodCondition `json:"conditions,omitempty"` // A human readable message indicating details about why the pod is in this state. Message string `json:"message,omitempty"` + // A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk' + Reason string `json:"reason,omitempty" description:"(brief-CamelCase) reason indicating details about why the pod is in this condition"` HostIP string `json:"hostIP,omitempty"` PodIP string `json:"podIP,omitempty"` diff --git a/pkg/api/v1/conversion_generated.go b/pkg/api/v1/conversion_generated.go index f97d15032ea..8c07d4e26ab 100644 --- a/pkg/api/v1/conversion_generated.go +++ b/pkg/api/v1/conversion_generated.go @@ -1549,6 +1549,7 @@ func convert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus, s out.Conditions = nil } out.Message = in.Message + out.Reason = in.Reason out.HostIP = in.HostIP out.PodIP = in.PodIP if in.StartTime != nil { @@ -3859,6 +3860,7 @@ func convert_v1_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus, s out.Conditions = nil } out.Message = in.Message + out.Reason = in.Reason out.HostIP = in.HostIP out.PodIP = in.PodIP if in.StartTime != nil { diff --git a/pkg/api/v1/deep_copy_generated.go b/pkg/api/v1/deep_copy_generated.go index 8160172a712..c5b4c5a52ad 100644 --- a/pkg/api/v1/deep_copy_generated.go +++ b/pkg/api/v1/deep_copy_generated.go @@ -1338,6 +1338,7 @@ func deepCopy_v1_PodStatus(in PodStatus, out *PodStatus, c *conversion.Cloner) e out.Conditions = nil } out.Message = in.Message + out.Reason = in.Reason out.HostIP = in.HostIP out.PodIP = in.PodIP if in.StartTime != nil { diff --git a/pkg/api/v1/types.go b/pkg/api/v1/types.go index e39456d7054..a44781d514d 100644 --- a/pkg/api/v1/types.go +++ b/pkg/api/v1/types.go @@ -913,6 +913,8 @@ type PodStatus struct { Conditions []PodCondition `json:"conditions,omitempty" description:"current service state of pod" patchStrategy:"merge" patchMergeKey:"type"` // A human readable message indicating details about why the pod is in this state. Message string `json:"message,omitempty" description:"human readable message indicating details about why the pod is in this condition"` + // A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk' + Reason string `json:"reason,omitempty" description:"(brief-CamelCase) reason indicating details about why the pod is in this condition"` HostIP string `json:"hostIP,omitempty" description:"IP address of the host to which the pod is assigned; empty if not yet scheduled"` PodIP string `json:"podIP,omitempty" description:"IP address allocated to the pod; routable at least within the cluster; empty if not yet allocated"` diff --git a/pkg/api/v1beta3/conversion_generated.go b/pkg/api/v1beta3/conversion_generated.go index 4bd9fbc59bb..73be8ca7618 100644 --- a/pkg/api/v1beta3/conversion_generated.go +++ b/pkg/api/v1beta3/conversion_generated.go @@ -1345,6 +1345,7 @@ func convert_api_PodStatus_To_v1beta3_PodStatus(in *api.PodStatus, out *PodStatu out.Conditions = nil } out.Message = in.Message + out.Reason = in.Reason out.HostIP = in.HostIP out.PodIP = in.PodIP if in.StartTime != nil { @@ -3409,6 +3410,7 @@ func convert_v1beta3_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatu out.Conditions = nil } out.Message = in.Message + out.Reason = in.Reason out.HostIP = in.HostIP out.PodIP = in.PodIP if in.StartTime != nil { diff --git a/pkg/api/v1beta3/deep_copy_generated.go b/pkg/api/v1beta3/deep_copy_generated.go index 1394bfa061e..ef17355939f 100644 --- a/pkg/api/v1beta3/deep_copy_generated.go +++ b/pkg/api/v1beta3/deep_copy_generated.go @@ -1342,6 +1342,7 @@ func deepCopy_v1beta3_PodStatus(in PodStatus, out *PodStatus, c *conversion.Clon out.Conditions = nil } out.Message = in.Message + out.Reason = in.Reason out.HostIP = in.HostIP out.PodIP = in.PodIP if in.StartTime != nil { diff --git a/pkg/api/v1beta3/types.go b/pkg/api/v1beta3/types.go index 63323dad7df..3364bc30d1a 100644 --- a/pkg/api/v1beta3/types.go +++ b/pkg/api/v1beta3/types.go @@ -917,6 +917,8 @@ type PodStatus struct { Conditions []PodCondition `json:"Condition,omitempty" description:"current service state of pod" patchStrategy:"merge" patchMergeKey:"type"` // A human readable message indicating details about why the pod is in this state. Message string `json:"message,omitempty" description:"human readable message indicating details about why the pod is in this condition"` + // A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk' + Reason string `json:"reason,omitempty" description:"(brief-CamelCase) reason indicating details about why the pod is in this condition"` HostIP string `json:"hostIP,omitempty" description:"IP address of the host to which the pod is assigned; empty if not yet scheduled"` PodIP string `json:"podIP,omitempty" description:"IP address allocated to the pod; routable at least within the cluster; empty if not yet allocated"` diff --git a/pkg/kubectl/resource_printer.go b/pkg/kubectl/resource_printer.go index 03af00ab402..c51c70981d8 100644 --- a/pkg/kubectl/resource_printer.go +++ b/pkg/kubectl/resource_printer.go @@ -377,7 +377,11 @@ func printPod(pod *api.Pod, w io.Writer, withNamespace bool) error { restarts := 0 totalContainers := len(pod.Spec.Containers) readyContainers := 0 + reason := string(pod.Status.Phase) + if pod.Status.Reason != "" { + reason = pod.Status.Reason + } for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- { container := pod.Status.ContainerStatuses[i] diff --git a/pkg/kubectl/resource_printer_test.go b/pkg/kubectl/resource_printer_test.go index 0c89ab5d558..07f8186fdbb 100644 --- a/pkg/kubectl/resource_printer_test.go +++ b/pkg/kubectl/resource_printer_test.go @@ -979,11 +979,11 @@ func TestPrintPod(t *testing.T) { Phase: "podPhase", ContainerStatuses: []api.ContainerStatus{ {Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}}, - {State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "containerWaitingReason"}}, RestartCount: 3}, + {State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ContainerWaitingReason"}}, RestartCount: 3}, }, }, }, - "test2\t1/2\tcontainerWaitingReason\t6\t", + "test2\t1/2\tContainerWaitingReason\t6\t", }, { // Test the same as the above but with Terminated state and the first container overwrites the rest @@ -993,12 +993,12 @@ func TestPrintPod(t *testing.T) { Status: api.PodStatus{ Phase: "podPhase", ContainerStatuses: []api.ContainerStatus{ - {State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "containerWaitingReason"}}, RestartCount: 3}, - {State: api.ContainerState{Terminated: &api.ContainerStateTerminated{Reason: "containerTerminatedReason"}}, RestartCount: 3}, + {State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ContainerWaitingReason"}}, RestartCount: 3}, + {State: api.ContainerState{Terminated: &api.ContainerStateTerminated{Reason: "ContainerTerminatedReason"}}, RestartCount: 3}, }, }, }, - "test3\t0/2\tcontainerWaitingReason\t6\t", + "test3\t0/2\tContainerWaitingReason\t6\t", }, { // Test ready is not enough for reporting running @@ -1015,6 +1015,22 @@ func TestPrintPod(t *testing.T) { }, "test4\t1/2\tpodPhase\t6\t", }, + { + // Test ready is not enough for reporting running + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "test5"}, + Spec: api.PodSpec{Containers: make([]api.Container, 2)}, + Status: api.PodStatus{ + Reason: "OutOfDisk", + Phase: "podPhase", + ContainerStatuses: []api.ContainerStatus{ + {Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}}, + {Ready: true, RestartCount: 3}, + }, + }, + }, + "test5\t1/2\tOutOfDisk\t6\t", + }, } buf := bytes.NewBuffer([]byte{}) diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 4527af33a88..5aa2dec1ca4 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -1584,9 +1584,11 @@ func (kl *Kubelet) handleOutOfDisk(pods []*api.Pod, podSyncTypes map[types.UID]S pod := pods[i] // Only reject pods that didn't start yet. if podSyncTypes[pod.UID] == SyncPodCreate { - kl.recorder.Eventf(pod, "OutOfDisk", "Cannot start the pod due to lack of disk space.") + reason := "OutOfDisk" + kl.recorder.Eventf(pod, reason, "Cannot start the pod due to lack of disk space.") kl.statusManager.SetPodStatus(pod, api.PodStatus{ Phase: api.PodFailed, + Reason: reason, Message: "Pod cannot be started due to lack of disk space."}) continue } @@ -1621,23 +1623,29 @@ func (kl *Kubelet) checkNodeSelectorMatching(pods []*api.Pod) (fitting []*api.Po func (kl *Kubelet) handleNotFittingPods(pods []*api.Pod) []*api.Pod { fitting, notFitting := checkHostPortConflicts(pods) for _, pod := range notFitting { - kl.recorder.Eventf(pod, "hostPortConflict", "Cannot start the pod due to host port conflict.") + reason := "HostPortConflict" + kl.recorder.Eventf(pod, reason, "Cannot start the pod due to host port conflict.") kl.statusManager.SetPodStatus(pod, api.PodStatus{ Phase: api.PodFailed, + Reason: reason, Message: "Pod cannot be started due to host port conflict"}) } fitting, notFitting = kl.checkNodeSelectorMatching(fitting) for _, pod := range notFitting { - kl.recorder.Eventf(pod, "nodeSelectorMismatching", "Cannot start the pod due to node selector mismatch.") + reason := "NodeSelectorMismatching" + kl.recorder.Eventf(pod, reason, "Cannot start the pod due to node selector mismatch.") kl.statusManager.SetPodStatus(pod, api.PodStatus{ Phase: api.PodFailed, + Reason: reason, Message: "Pod cannot be started due to node selector mismatch"}) } fitting, notFitting = kl.checkCapacityExceeded(fitting) for _, pod := range notFitting { - kl.recorder.Eventf(pod, "capacityExceeded", "Cannot start the pod due to exceeded capacity.") + reason := "CapacityExceeded" + kl.recorder.Eventf(pod, reason, "Cannot start the pod due to exceeded capacity.") kl.statusManager.SetPodStatus(pod, api.PodStatus{ Phase: api.PodFailed, + Reason: reason, Message: "Pod cannot be started due to exceeded capacity"}) } return fitting @@ -2138,9 +2146,11 @@ func (kl *Kubelet) generatePodStatus(pod *api.Pod) (api.PodStatus, error) { // TODO: Consider include the container information. if kl.pastActiveDeadline(pod) { - kl.recorder.Eventf(pod, "deadline", "Pod was active on the node longer than specified deadline") + reason := "DeadlineExceeded" + kl.recorder.Eventf(pod, reason, "Pod was active on the node longer than specified deadline") return api.PodStatus{ Phase: api.PodFailed, + Reason: reason, Message: "Pod was active on the node longer than specified deadline"}, nil } @@ -2153,13 +2163,14 @@ func (kl *Kubelet) generatePodStatus(pod *api.Pod) (api.PodStatus, error) { if strings.Contains(err.Error(), "resource temporarily unavailable") { // Leave upstream layer to decide what to do return api.PodStatus{}, err - } else { - pendingStatus := api.PodStatus{ - Phase: api.PodPending, - Message: fmt.Sprintf("Query container info failed with error (%v)", err), - } - return pendingStatus, nil } + + pendingStatus := api.PodStatus{ + Phase: api.PodPending, + Reason: "GeneralError", + Message: fmt.Sprintf("Query container info failed with error (%v)", err), + } + return pendingStatus, nil } // Assume info is ready to process