mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-06 10:43:56 +00:00
Added Reason in PodStatus
This commit is contained in:
parent
ff0546da4f
commit
101739a6be
@ -12663,6 +12663,10 @@
|
|||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "human readable message indicating details about why the pod is in this condition"
|
"description": "human readable message indicating details about why the pod is in this condition"
|
||||||
},
|
},
|
||||||
|
"reason": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "(brief-CamelCase) reason indicating details about why the pod is in this condition"
|
||||||
|
},
|
||||||
"hostIP": {
|
"hostIP": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "IP address of the host to which the pod is assigned; empty if not yet scheduled"
|
"description": "IP address of the host to which the pod is assigned; empty if not yet scheduled"
|
||||||
|
@ -12665,6 +12665,10 @@
|
|||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "human readable message indicating details about why the pod is in this condition"
|
"description": "human readable message indicating details about why the pod is in this condition"
|
||||||
},
|
},
|
||||||
|
"reason": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "(brief-CamelCase) reason indicating details about why the pod is in this condition"
|
||||||
|
},
|
||||||
"hostIP": {
|
"hostIP": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "IP address of the host to which the pod is assigned; empty if not yet scheduled"
|
"description": "IP address of the host to which the pod is assigned; empty if not yet scheduled"
|
||||||
|
@ -1407,6 +1407,7 @@ func deepCopy_api_PodStatus(in PodStatus, out *PodStatus, c *conversion.Cloner)
|
|||||||
out.Conditions = nil
|
out.Conditions = nil
|
||||||
}
|
}
|
||||||
out.Message = in.Message
|
out.Message = in.Message
|
||||||
|
out.Reason = in.Reason
|
||||||
out.HostIP = in.HostIP
|
out.HostIP = in.HostIP
|
||||||
out.PodIP = in.PodIP
|
out.PodIP = in.PodIP
|
||||||
if in.StartTime != nil {
|
if in.StartTime != nil {
|
||||||
|
@ -911,6 +911,8 @@ type PodStatus struct {
|
|||||||
Conditions []PodCondition `json:"conditions,omitempty"`
|
Conditions []PodCondition `json:"conditions,omitempty"`
|
||||||
// A human readable message indicating details about why the pod is in this state.
|
// A human readable message indicating details about why the pod is in this state.
|
||||||
Message string `json:"message,omitempty"`
|
Message string `json:"message,omitempty"`
|
||||||
|
// A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk'
|
||||||
|
Reason string `json:"reason,omitempty" description:"(brief-CamelCase) reason indicating details about why the pod is in this condition"`
|
||||||
|
|
||||||
HostIP string `json:"hostIP,omitempty"`
|
HostIP string `json:"hostIP,omitempty"`
|
||||||
PodIP string `json:"podIP,omitempty"`
|
PodIP string `json:"podIP,omitempty"`
|
||||||
|
@ -1549,6 +1549,7 @@ func convert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus, s
|
|||||||
out.Conditions = nil
|
out.Conditions = nil
|
||||||
}
|
}
|
||||||
out.Message = in.Message
|
out.Message = in.Message
|
||||||
|
out.Reason = in.Reason
|
||||||
out.HostIP = in.HostIP
|
out.HostIP = in.HostIP
|
||||||
out.PodIP = in.PodIP
|
out.PodIP = in.PodIP
|
||||||
if in.StartTime != nil {
|
if in.StartTime != nil {
|
||||||
@ -3859,6 +3860,7 @@ func convert_v1_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus, s
|
|||||||
out.Conditions = nil
|
out.Conditions = nil
|
||||||
}
|
}
|
||||||
out.Message = in.Message
|
out.Message = in.Message
|
||||||
|
out.Reason = in.Reason
|
||||||
out.HostIP = in.HostIP
|
out.HostIP = in.HostIP
|
||||||
out.PodIP = in.PodIP
|
out.PodIP = in.PodIP
|
||||||
if in.StartTime != nil {
|
if in.StartTime != nil {
|
||||||
|
@ -1338,6 +1338,7 @@ func deepCopy_v1_PodStatus(in PodStatus, out *PodStatus, c *conversion.Cloner) e
|
|||||||
out.Conditions = nil
|
out.Conditions = nil
|
||||||
}
|
}
|
||||||
out.Message = in.Message
|
out.Message = in.Message
|
||||||
|
out.Reason = in.Reason
|
||||||
out.HostIP = in.HostIP
|
out.HostIP = in.HostIP
|
||||||
out.PodIP = in.PodIP
|
out.PodIP = in.PodIP
|
||||||
if in.StartTime != nil {
|
if in.StartTime != nil {
|
||||||
|
@ -913,6 +913,8 @@ type PodStatus struct {
|
|||||||
Conditions []PodCondition `json:"conditions,omitempty" description:"current service state of pod" patchStrategy:"merge" patchMergeKey:"type"`
|
Conditions []PodCondition `json:"conditions,omitempty" description:"current service state of pod" patchStrategy:"merge" patchMergeKey:"type"`
|
||||||
// A human readable message indicating details about why the pod is in this state.
|
// A human readable message indicating details about why the pod is in this state.
|
||||||
Message string `json:"message,omitempty" description:"human readable message indicating details about why the pod is in this condition"`
|
Message string `json:"message,omitempty" description:"human readable message indicating details about why the pod is in this condition"`
|
||||||
|
// A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk'
|
||||||
|
Reason string `json:"reason,omitempty" description:"(brief-CamelCase) reason indicating details about why the pod is in this condition"`
|
||||||
|
|
||||||
HostIP string `json:"hostIP,omitempty" description:"IP address of the host to which the pod is assigned; empty if not yet scheduled"`
|
HostIP string `json:"hostIP,omitempty" description:"IP address of the host to which the pod is assigned; empty if not yet scheduled"`
|
||||||
PodIP string `json:"podIP,omitempty" description:"IP address allocated to the pod; routable at least within the cluster; empty if not yet allocated"`
|
PodIP string `json:"podIP,omitempty" description:"IP address allocated to the pod; routable at least within the cluster; empty if not yet allocated"`
|
||||||
|
@ -1345,6 +1345,7 @@ func convert_api_PodStatus_To_v1beta3_PodStatus(in *api.PodStatus, out *PodStatu
|
|||||||
out.Conditions = nil
|
out.Conditions = nil
|
||||||
}
|
}
|
||||||
out.Message = in.Message
|
out.Message = in.Message
|
||||||
|
out.Reason = in.Reason
|
||||||
out.HostIP = in.HostIP
|
out.HostIP = in.HostIP
|
||||||
out.PodIP = in.PodIP
|
out.PodIP = in.PodIP
|
||||||
if in.StartTime != nil {
|
if in.StartTime != nil {
|
||||||
@ -3409,6 +3410,7 @@ func convert_v1beta3_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatu
|
|||||||
out.Conditions = nil
|
out.Conditions = nil
|
||||||
}
|
}
|
||||||
out.Message = in.Message
|
out.Message = in.Message
|
||||||
|
out.Reason = in.Reason
|
||||||
out.HostIP = in.HostIP
|
out.HostIP = in.HostIP
|
||||||
out.PodIP = in.PodIP
|
out.PodIP = in.PodIP
|
||||||
if in.StartTime != nil {
|
if in.StartTime != nil {
|
||||||
|
@ -1342,6 +1342,7 @@ func deepCopy_v1beta3_PodStatus(in PodStatus, out *PodStatus, c *conversion.Clon
|
|||||||
out.Conditions = nil
|
out.Conditions = nil
|
||||||
}
|
}
|
||||||
out.Message = in.Message
|
out.Message = in.Message
|
||||||
|
out.Reason = in.Reason
|
||||||
out.HostIP = in.HostIP
|
out.HostIP = in.HostIP
|
||||||
out.PodIP = in.PodIP
|
out.PodIP = in.PodIP
|
||||||
if in.StartTime != nil {
|
if in.StartTime != nil {
|
||||||
|
@ -917,6 +917,8 @@ type PodStatus struct {
|
|||||||
Conditions []PodCondition `json:"Condition,omitempty" description:"current service state of pod" patchStrategy:"merge" patchMergeKey:"type"`
|
Conditions []PodCondition `json:"Condition,omitempty" description:"current service state of pod" patchStrategy:"merge" patchMergeKey:"type"`
|
||||||
// A human readable message indicating details about why the pod is in this state.
|
// A human readable message indicating details about why the pod is in this state.
|
||||||
Message string `json:"message,omitempty" description:"human readable message indicating details about why the pod is in this condition"`
|
Message string `json:"message,omitempty" description:"human readable message indicating details about why the pod is in this condition"`
|
||||||
|
// A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk'
|
||||||
|
Reason string `json:"reason,omitempty" description:"(brief-CamelCase) reason indicating details about why the pod is in this condition"`
|
||||||
|
|
||||||
HostIP string `json:"hostIP,omitempty" description:"IP address of the host to which the pod is assigned; empty if not yet scheduled"`
|
HostIP string `json:"hostIP,omitempty" description:"IP address of the host to which the pod is assigned; empty if not yet scheduled"`
|
||||||
PodIP string `json:"podIP,omitempty" description:"IP address allocated to the pod; routable at least within the cluster; empty if not yet allocated"`
|
PodIP string `json:"podIP,omitempty" description:"IP address allocated to the pod; routable at least within the cluster; empty if not yet allocated"`
|
||||||
|
@ -377,7 +377,11 @@ func printPod(pod *api.Pod, w io.Writer, withNamespace bool) error {
|
|||||||
restarts := 0
|
restarts := 0
|
||||||
totalContainers := len(pod.Spec.Containers)
|
totalContainers := len(pod.Spec.Containers)
|
||||||
readyContainers := 0
|
readyContainers := 0
|
||||||
|
|
||||||
reason := string(pod.Status.Phase)
|
reason := string(pod.Status.Phase)
|
||||||
|
if pod.Status.Reason != "" {
|
||||||
|
reason = pod.Status.Reason
|
||||||
|
}
|
||||||
|
|
||||||
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
|
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
|
||||||
container := pod.Status.ContainerStatuses[i]
|
container := pod.Status.ContainerStatuses[i]
|
||||||
|
@ -979,11 +979,11 @@ func TestPrintPod(t *testing.T) {
|
|||||||
Phase: "podPhase",
|
Phase: "podPhase",
|
||||||
ContainerStatuses: []api.ContainerStatus{
|
ContainerStatuses: []api.ContainerStatus{
|
||||||
{Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}},
|
{Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}},
|
||||||
{State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "containerWaitingReason"}}, RestartCount: 3},
|
{State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ContainerWaitingReason"}}, RestartCount: 3},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"test2\t1/2\tcontainerWaitingReason\t6\t",
|
"test2\t1/2\tContainerWaitingReason\t6\t",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// Test the same as the above but with Terminated state and the first container overwrites the rest
|
// Test the same as the above but with Terminated state and the first container overwrites the rest
|
||||||
@ -993,12 +993,12 @@ func TestPrintPod(t *testing.T) {
|
|||||||
Status: api.PodStatus{
|
Status: api.PodStatus{
|
||||||
Phase: "podPhase",
|
Phase: "podPhase",
|
||||||
ContainerStatuses: []api.ContainerStatus{
|
ContainerStatuses: []api.ContainerStatus{
|
||||||
{State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "containerWaitingReason"}}, RestartCount: 3},
|
{State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ContainerWaitingReason"}}, RestartCount: 3},
|
||||||
{State: api.ContainerState{Terminated: &api.ContainerStateTerminated{Reason: "containerTerminatedReason"}}, RestartCount: 3},
|
{State: api.ContainerState{Terminated: &api.ContainerStateTerminated{Reason: "ContainerTerminatedReason"}}, RestartCount: 3},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"test3\t0/2\tcontainerWaitingReason\t6\t",
|
"test3\t0/2\tContainerWaitingReason\t6\t",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// Test ready is not enough for reporting running
|
// Test ready is not enough for reporting running
|
||||||
@ -1015,6 +1015,22 @@ func TestPrintPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
"test4\t1/2\tpodPhase\t6\t",
|
"test4\t1/2\tpodPhase\t6\t",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
// Test ready is not enough for reporting running
|
||||||
|
api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{Name: "test5"},
|
||||||
|
Spec: api.PodSpec{Containers: make([]api.Container, 2)},
|
||||||
|
Status: api.PodStatus{
|
||||||
|
Reason: "OutOfDisk",
|
||||||
|
Phase: "podPhase",
|
||||||
|
ContainerStatuses: []api.ContainerStatus{
|
||||||
|
{Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}},
|
||||||
|
{Ready: true, RestartCount: 3},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"test5\t1/2\tOutOfDisk\t6\t",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
buf := bytes.NewBuffer([]byte{})
|
buf := bytes.NewBuffer([]byte{})
|
||||||
|
@ -1584,9 +1584,11 @@ func (kl *Kubelet) handleOutOfDisk(pods []*api.Pod, podSyncTypes map[types.UID]S
|
|||||||
pod := pods[i]
|
pod := pods[i]
|
||||||
// Only reject pods that didn't start yet.
|
// Only reject pods that didn't start yet.
|
||||||
if podSyncTypes[pod.UID] == SyncPodCreate {
|
if podSyncTypes[pod.UID] == SyncPodCreate {
|
||||||
kl.recorder.Eventf(pod, "OutOfDisk", "Cannot start the pod due to lack of disk space.")
|
reason := "OutOfDisk"
|
||||||
|
kl.recorder.Eventf(pod, reason, "Cannot start the pod due to lack of disk space.")
|
||||||
kl.statusManager.SetPodStatus(pod, api.PodStatus{
|
kl.statusManager.SetPodStatus(pod, api.PodStatus{
|
||||||
Phase: api.PodFailed,
|
Phase: api.PodFailed,
|
||||||
|
Reason: reason,
|
||||||
Message: "Pod cannot be started due to lack of disk space."})
|
Message: "Pod cannot be started due to lack of disk space."})
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -1621,23 +1623,29 @@ func (kl *Kubelet) checkNodeSelectorMatching(pods []*api.Pod) (fitting []*api.Po
|
|||||||
func (kl *Kubelet) handleNotFittingPods(pods []*api.Pod) []*api.Pod {
|
func (kl *Kubelet) handleNotFittingPods(pods []*api.Pod) []*api.Pod {
|
||||||
fitting, notFitting := checkHostPortConflicts(pods)
|
fitting, notFitting := checkHostPortConflicts(pods)
|
||||||
for _, pod := range notFitting {
|
for _, pod := range notFitting {
|
||||||
kl.recorder.Eventf(pod, "hostPortConflict", "Cannot start the pod due to host port conflict.")
|
reason := "HostPortConflict"
|
||||||
|
kl.recorder.Eventf(pod, reason, "Cannot start the pod due to host port conflict.")
|
||||||
kl.statusManager.SetPodStatus(pod, api.PodStatus{
|
kl.statusManager.SetPodStatus(pod, api.PodStatus{
|
||||||
Phase: api.PodFailed,
|
Phase: api.PodFailed,
|
||||||
|
Reason: reason,
|
||||||
Message: "Pod cannot be started due to host port conflict"})
|
Message: "Pod cannot be started due to host port conflict"})
|
||||||
}
|
}
|
||||||
fitting, notFitting = kl.checkNodeSelectorMatching(fitting)
|
fitting, notFitting = kl.checkNodeSelectorMatching(fitting)
|
||||||
for _, pod := range notFitting {
|
for _, pod := range notFitting {
|
||||||
kl.recorder.Eventf(pod, "nodeSelectorMismatching", "Cannot start the pod due to node selector mismatch.")
|
reason := "NodeSelectorMismatching"
|
||||||
|
kl.recorder.Eventf(pod, reason, "Cannot start the pod due to node selector mismatch.")
|
||||||
kl.statusManager.SetPodStatus(pod, api.PodStatus{
|
kl.statusManager.SetPodStatus(pod, api.PodStatus{
|
||||||
Phase: api.PodFailed,
|
Phase: api.PodFailed,
|
||||||
|
Reason: reason,
|
||||||
Message: "Pod cannot be started due to node selector mismatch"})
|
Message: "Pod cannot be started due to node selector mismatch"})
|
||||||
}
|
}
|
||||||
fitting, notFitting = kl.checkCapacityExceeded(fitting)
|
fitting, notFitting = kl.checkCapacityExceeded(fitting)
|
||||||
for _, pod := range notFitting {
|
for _, pod := range notFitting {
|
||||||
kl.recorder.Eventf(pod, "capacityExceeded", "Cannot start the pod due to exceeded capacity.")
|
reason := "CapacityExceeded"
|
||||||
|
kl.recorder.Eventf(pod, reason, "Cannot start the pod due to exceeded capacity.")
|
||||||
kl.statusManager.SetPodStatus(pod, api.PodStatus{
|
kl.statusManager.SetPodStatus(pod, api.PodStatus{
|
||||||
Phase: api.PodFailed,
|
Phase: api.PodFailed,
|
||||||
|
Reason: reason,
|
||||||
Message: "Pod cannot be started due to exceeded capacity"})
|
Message: "Pod cannot be started due to exceeded capacity"})
|
||||||
}
|
}
|
||||||
return fitting
|
return fitting
|
||||||
@ -2138,9 +2146,11 @@ func (kl *Kubelet) generatePodStatus(pod *api.Pod) (api.PodStatus, error) {
|
|||||||
|
|
||||||
// TODO: Consider include the container information.
|
// TODO: Consider include the container information.
|
||||||
if kl.pastActiveDeadline(pod) {
|
if kl.pastActiveDeadline(pod) {
|
||||||
kl.recorder.Eventf(pod, "deadline", "Pod was active on the node longer than specified deadline")
|
reason := "DeadlineExceeded"
|
||||||
|
kl.recorder.Eventf(pod, reason, "Pod was active on the node longer than specified deadline")
|
||||||
return api.PodStatus{
|
return api.PodStatus{
|
||||||
Phase: api.PodFailed,
|
Phase: api.PodFailed,
|
||||||
|
Reason: reason,
|
||||||
Message: "Pod was active on the node longer than specified deadline"}, nil
|
Message: "Pod was active on the node longer than specified deadline"}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2153,13 +2163,14 @@ func (kl *Kubelet) generatePodStatus(pod *api.Pod) (api.PodStatus, error) {
|
|||||||
if strings.Contains(err.Error(), "resource temporarily unavailable") {
|
if strings.Contains(err.Error(), "resource temporarily unavailable") {
|
||||||
// Leave upstream layer to decide what to do
|
// Leave upstream layer to decide what to do
|
||||||
return api.PodStatus{}, err
|
return api.PodStatus{}, err
|
||||||
} else {
|
|
||||||
pendingStatus := api.PodStatus{
|
|
||||||
Phase: api.PodPending,
|
|
||||||
Message: fmt.Sprintf("Query container info failed with error (%v)", err),
|
|
||||||
}
|
|
||||||
return pendingStatus, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pendingStatus := api.PodStatus{
|
||||||
|
Phase: api.PodPending,
|
||||||
|
Reason: "GeneralError",
|
||||||
|
Message: fmt.Sprintf("Query container info failed with error (%v)", err),
|
||||||
|
}
|
||||||
|
return pendingStatus, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assume info is ready to process
|
// Assume info is ready to process
|
||||||
|
Loading…
Reference in New Issue
Block a user