diff --git a/api/swagger-spec/v1.json b/api/swagger-spec/v1.json index 2a74f5eda0c..574fab038d0 100644 --- a/api/swagger-spec/v1.json +++ b/api/swagger-spec/v1.json @@ -13171,7 +13171,11 @@ "properties": { "reason": { "type": "string", - "description": "(brief) reason the container is not yet running, such as pulling its image." + "description": "(brief) reason the container is not yet running." + }, + "message": { + "type": "string", + "description": "Message regarding why the container is not yet running." } } }, diff --git a/pkg/api/deep_copy_generated.go b/pkg/api/deep_copy_generated.go index b7d8f958a7a..c4fe1b3e8c4 100644 --- a/pkg/api/deep_copy_generated.go +++ b/pkg/api/deep_copy_generated.go @@ -302,6 +302,7 @@ func deepCopy_api_ContainerStateTerminated(in ContainerStateTerminated, out *Con func deepCopy_api_ContainerStateWaiting(in ContainerStateWaiting, out *ContainerStateWaiting, c *conversion.Cloner) error { out.Reason = in.Reason + out.Message = in.Message return nil } diff --git a/pkg/api/types.go b/pkg/api/types.go index cd36da099c3..34bb9029944 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -835,8 +835,10 @@ const ( ) type ContainerStateWaiting struct { - // Reason could be pulling image, + // A brief CamelCase string indicating details about why the container is in waiting state. Reason string `json:"reason,omitempty"` + // A human-readable message indicating details about why the container is in waiting state. + Message string `json:"message,omitempty"` } type ContainerStateRunning struct { diff --git a/pkg/api/v1/conversion_generated.go b/pkg/api/v1/conversion_generated.go index 70a047e8695..2366753cd10 100644 --- a/pkg/api/v1/conversion_generated.go +++ b/pkg/api/v1/conversion_generated.go @@ -340,6 +340,7 @@ func convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.Conta defaulting.(func(*api.ContainerStateWaiting))(in) } out.Reason = in.Reason + out.Message = in.Message return nil } @@ -2742,6 +2743,7 @@ func convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *Container defaulting.(func(*ContainerStateWaiting))(in) } out.Reason = in.Reason + out.Message = in.Message return nil } diff --git a/pkg/api/v1/deep_copy_generated.go b/pkg/api/v1/deep_copy_generated.go index e5db68e2478..81010a29753 100644 --- a/pkg/api/v1/deep_copy_generated.go +++ b/pkg/api/v1/deep_copy_generated.go @@ -317,6 +317,7 @@ func deepCopy_v1_ContainerStateTerminated(in ContainerStateTerminated, out *Cont func deepCopy_v1_ContainerStateWaiting(in ContainerStateWaiting, out *ContainerStateWaiting, c *conversion.Cloner) error { out.Reason = in.Reason + out.Message = in.Message return nil } diff --git a/pkg/api/v1/types.go b/pkg/api/v1/types.go index d70760b9e2a..9c475331592 100644 --- a/pkg/api/v1/types.go +++ b/pkg/api/v1/types.go @@ -1036,8 +1036,10 @@ const ( // ContainerStateWaiting is a waiting state of a container. type ContainerStateWaiting struct { - // (brief) reason the container is not yet running, such as pulling its image. + // (brief) reason the container is not yet running. Reason string `json:"reason,omitempty"` + // Message regarding why the container is not yet running. + Message string `json:"message,omitempty"` } // ContainerStateRunning is a running state of a container. diff --git a/pkg/api/v1/types_swagger_doc_generated.go b/pkg/api/v1/types_swagger_doc_generated.go index ef56378a642..38b51d0cbe9 100644 --- a/pkg/api/v1/types_swagger_doc_generated.go +++ b/pkg/api/v1/types_swagger_doc_generated.go @@ -199,8 +199,9 @@ func (ContainerStateTerminated) SwaggerDoc() map[string]string { } var map_ContainerStateWaiting = map[string]string{ - "": "ContainerStateWaiting is a waiting state of a container.", - "reason": "(brief) reason the container is not yet running, such as pulling its image.", + "": "ContainerStateWaiting is a waiting state of a container.", + "reason": "(brief) reason the container is not yet running.", + "message": "Message regarding why the container is not yet running.", } func (ContainerStateWaiting) SwaggerDoc() map[string]string { diff --git a/pkg/kubelet/container/image_puller.go b/pkg/kubelet/container/image_puller.go index 95c0ae04c54..7b2fbc614ff 100644 --- a/pkg/kubelet/container/image_puller.go +++ b/pkg/kubelet/container/image_puller.go @@ -64,11 +64,11 @@ func (puller *imagePuller) reportImagePull(ref *api.ObjectReference, event strin switch event { case "pulling": - puller.recorder.Eventf(ref, "pulling", "Pulling image %q", image) + puller.recorder.Eventf(ref, "Pulling", "Pulling image %q", image) case "pulled": - puller.recorder.Eventf(ref, "pulled", "Successfully pulled image %q", image) + puller.recorder.Eventf(ref, "Pulled", "Successfully pulled image %q", image) case "failed": - puller.recorder.Eventf(ref, "failed", "Failed to pull image %q: %v", image, pullError) + puller.recorder.Eventf(ref, "Failed", "Failed to pull image %q: %v", image, pullError) } } @@ -82,14 +82,14 @@ func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pul present, err := puller.runtime.IsImagePresent(spec) if err != nil { if ref != nil { - puller.recorder.Eventf(ref, "failed", "Failed to inspect image %q: %v", container.Image, err) + puller.recorder.Eventf(ref, "Failed", "Failed to inspect image %q: %v", container.Image, err) } return fmt.Errorf("failed to inspect image %q: %v", container.Image, err) } if !shouldPullImage(container, present) { if present && ref != nil { - puller.recorder.Eventf(ref, "pulled", "Container image %q already present on machine", container.Image) + puller.recorder.Eventf(ref, "Pulled", "Container image %q already present on machine", container.Image) } return nil } diff --git a/pkg/kubelet/dockertools/manager.go b/pkg/kubelet/dockertools/manager.go index f1b8a6da4d0..da238810bd9 100644 --- a/pkg/kubelet/dockertools/manager.go +++ b/pkg/kubelet/dockertools/manager.go @@ -274,13 +274,13 @@ func (dm *DockerManager) GetContainerLogs(pod *api.Pod, containerID, tail string var ( // ErrNoContainersInPod is returned when there are no containers for a given pod - ErrNoContainersInPod = errors.New("no containers exist for this pod") + ErrNoContainersInPod = errors.New("NoContainersInPod") // ErrNoPodInfraContainerInPod is returned when there is no pod infra container for a given pod - ErrNoPodInfraContainerInPod = errors.New("No pod infra container exists for this pod") + ErrNoPodInfraContainerInPod = errors.New("NoPodInfraContainerInPod") // ErrContainerCannotRun is returned when a container is created, but cannot run properly - ErrContainerCannotRun = errors.New("Container cannot run") + ErrContainerCannotRun = errors.New("ContainerCannotRun") ) // Internal information kept for containers from inspection @@ -332,17 +332,21 @@ func (dm *DockerManager) inspectContainer(dockerID, containerName, tPath string, } } else if !inspectResult.State.FinishedAt.IsZero() { reason := "" + message := "" // Note: An application might handle OOMKilled gracefully. // In that case, the container is oom killed, but the exit // code could be 0. if inspectResult.State.OOMKilled { - reason = "OOM Killed" + reason = "OOMKilled" } else { - reason = inspectResult.State.Error + reason = "Error" + message = inspectResult.State.Error } result.status.State.Terminated = &api.ContainerStateTerminated{ - ExitCode: inspectResult.State.ExitCode, - Reason: reason, + ExitCode: inspectResult.State.ExitCode, + Message: message, + Reason: reason, + StartedAt: util.NewTime(inspectResult.State.StartedAt), FinishedAt: util.NewTime(inspectResult.State.FinishedAt), ContainerID: DockerPrefix + dockerID, @@ -502,11 +506,13 @@ func (dm *DockerManager) GetPodStatus(pod *api.Pod) (*api.PodStatus, error) { _, err := dm.client.InspectImage(image) if err == nil { containerStatus.State.Waiting = &api.ContainerStateWaiting{ - Reason: fmt.Sprintf("Image: %s is ready, container is creating", image), + Message: fmt.Sprintf("Image: %s is ready, container is creating", image), + Reason: "ContainerCreating", } } else if err == docker.ErrNoSuchImage { containerStatus.State.Waiting = &api.ContainerStateWaiting{ - Reason: fmt.Sprintf("Image: %s is not ready on the node", image), + Message: fmt.Sprintf("Image: %s is not ready on the node", image), + Reason: "ImageNotReady", } } statuses[container.Name] = &containerStatus diff --git a/pkg/kubelet/dockertools/manager_test.go b/pkg/kubelet/dockertools/manager_test.go index ef289f4e309..90f1874d65f 100644 --- a/pkg/kubelet/dockertools/manager_test.go +++ b/pkg/kubelet/dockertools/manager_test.go @@ -1300,21 +1300,21 @@ func TestSyncPodWithPullPolicy(t *testing.T) { fakeDocker.Lock() eventSet := []string{ - `pulling Pulling image "pod_infra_image"`, - `pulled Successfully pulled image "pod_infra_image"`, - `pulling Pulling image "pull_always_image"`, - `pulled Successfully pulled image "pull_always_image"`, - `pulling Pulling image "pull_if_not_present_image"`, - `pulled Successfully pulled image "pull_if_not_present_image"`, - `pulled Container image "existing_one" already present on machine`, - `pulled Container image "want:latest" already present on machine`, + `Pulling Pulling image "pod_infra_image"`, + `Pulled Successfully pulled image "pod_infra_image"`, + `Pulling Pulling image "pull_always_image"`, + `Pulled Successfully pulled image "pull_always_image"`, + `Pulling Pulling image "pull_if_not_present_image"`, + `Pulled Successfully pulled image "pull_if_not_present_image"`, + `Pulled Container image "existing_one" already present on machine`, + `Pulled Container image "want:latest" already present on machine`, } recorder := dm.recorder.(*record.FakeRecorder) var actualEvents []string for _, ev := range recorder.Events { - if strings.HasPrefix(ev, "pull") { + if strings.HasPrefix(ev, "Pull") { actualEvents = append(actualEvents, ev) } } diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 482564b4729..774b38f1de2 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -1336,7 +1336,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont } if egress != nil || ingress != nil { if pod.Spec.HostNetwork { - kl.recorder.Event(pod, "host network not supported", "Bandwidth shaping is not currently supported on the host network") + kl.recorder.Event(pod, "HostNetworkNotSupported", "Bandwidth shaping is not currently supported on the host network") } else if kl.shaper != nil { status, found := kl.statusManager.GetPodStatus(pod.UID) if !found { @@ -1351,7 +1351,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont err = kl.shaper.ReconcileCIDR(fmt.Sprintf("%s/32", status.PodIP), egress, ingress) } } else { - kl.recorder.Event(pod, "nil shaper", "Pod requests bandwidth shaping, but the shaper is undefined") + kl.recorder.Event(pod, "NilShaper", "Pod requests bandwidth shaping, but the shaper is undefined") } } @@ -2343,21 +2343,24 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error { newNodeReadyCondition = api.NodeCondition{ Type: api.NodeReady, Status: api.ConditionTrue, - Reason: "kubelet is posting ready status", + Reason: "KubeletReady", + Message: "kubelet is posting ready status", LastHeartbeatTime: currentTime, } } else { var reasons []string + var messages []string if !containerRuntimeUp { - reasons = append(reasons, "container runtime is down") + messages = append(messages, "container runtime is down") } if !networkConfigured { - reasons = append(reasons, "network not configured correctly") + messages = append(reasons, "network not configured correctly") } newNodeReadyCondition = api.NodeCondition{ Type: api.NodeReady, Status: api.ConditionFalse, - Reason: strings.Join(reasons, ","), + Reason: "KubeletNotReady", + Message: strings.Join(messages, ","), LastHeartbeatTime: currentTime, } } diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 87fba50f571..6e83c7c70ab 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -2380,7 +2380,8 @@ func TestUpdateNewNodeStatus(t *testing.T) { { Type: api.NodeReady, Status: api.ConditionTrue, - Reason: fmt.Sprintf("kubelet is posting ready status"), + Reason: "KubeletReady", + Message: fmt.Sprintf("kubelet is posting ready status"), LastHeartbeatTime: util.Time{}, LastTransitionTime: util.Time{}, }, @@ -2448,7 +2449,8 @@ func TestUpdateExistingNodeStatus(t *testing.T) { { Type: api.NodeReady, Status: api.ConditionTrue, - Reason: fmt.Sprintf("kubelet is posting ready status"), + Reason: "KubeletReady", + Message: fmt.Sprintf("kubelet is posting ready status"), LastHeartbeatTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, @@ -2484,7 +2486,8 @@ func TestUpdateExistingNodeStatus(t *testing.T) { { Type: api.NodeReady, Status: api.ConditionTrue, - Reason: fmt.Sprintf("kubelet is posting ready status"), + Reason: "KubeletReady", + Message: fmt.Sprintf("kubelet is posting ready status"), LastHeartbeatTime: util.Time{}, // placeholder LastTransitionTime: util.Time{}, // placeholder }, @@ -2578,7 +2581,8 @@ func TestUpdateNodeStatusWithoutContainerRuntime(t *testing.T) { { Type: api.NodeReady, Status: api.ConditionFalse, - Reason: fmt.Sprintf("container runtime is down"), + Reason: "KubeletNotReady", + Message: fmt.Sprintf("container runtime is down"), LastHeartbeatTime: util.Time{}, LastTransitionTime: util.Time{}, },