mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 14:37:00 +00:00
Merge pull request #13727 from jiangyaoguo/fix-reason-reporting-in-kubelet
Auto commit by PR queue bot
This commit is contained in:
commit
6f01200188
@ -13171,7 +13171,11 @@
|
||||
"properties": {
|
||||
"reason": {
|
||||
"type": "string",
|
||||
"description": "(brief) reason the container is not yet running, such as pulling its image."
|
||||
"description": "(brief) reason the container is not yet running."
|
||||
},
|
||||
"message": {
|
||||
"type": "string",
|
||||
"description": "Message regarding why the container is not yet running."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -302,6 +302,7 @@ func deepCopy_api_ContainerStateTerminated(in ContainerStateTerminated, out *Con
|
||||
|
||||
func deepCopy_api_ContainerStateWaiting(in ContainerStateWaiting, out *ContainerStateWaiting, c *conversion.Cloner) error {
|
||||
out.Reason = in.Reason
|
||||
out.Message = in.Message
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -835,8 +835,10 @@ const (
|
||||
)
|
||||
|
||||
type ContainerStateWaiting struct {
|
||||
// Reason could be pulling image,
|
||||
// A brief CamelCase string indicating details about why the container is in waiting state.
|
||||
Reason string `json:"reason,omitempty"`
|
||||
// A human-readable message indicating details about why the container is in waiting state.
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
type ContainerStateRunning struct {
|
||||
|
@ -340,6 +340,7 @@ func convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.Conta
|
||||
defaulting.(func(*api.ContainerStateWaiting))(in)
|
||||
}
|
||||
out.Reason = in.Reason
|
||||
out.Message = in.Message
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -2743,6 +2744,7 @@ func convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *Container
|
||||
defaulting.(func(*ContainerStateWaiting))(in)
|
||||
}
|
||||
out.Reason = in.Reason
|
||||
out.Message = in.Message
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -317,6 +317,7 @@ func deepCopy_v1_ContainerStateTerminated(in ContainerStateTerminated, out *Cont
|
||||
|
||||
func deepCopy_v1_ContainerStateWaiting(in ContainerStateWaiting, out *ContainerStateWaiting, c *conversion.Cloner) error {
|
||||
out.Reason = in.Reason
|
||||
out.Message = in.Message
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1036,8 +1036,10 @@ const (
|
||||
|
||||
// ContainerStateWaiting is a waiting state of a container.
|
||||
type ContainerStateWaiting struct {
|
||||
// (brief) reason the container is not yet running, such as pulling its image.
|
||||
// (brief) reason the container is not yet running.
|
||||
Reason string `json:"reason,omitempty"`
|
||||
// Message regarding why the container is not yet running.
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
// ContainerStateRunning is a running state of a container.
|
||||
|
@ -199,8 +199,9 @@ func (ContainerStateTerminated) SwaggerDoc() map[string]string {
|
||||
}
|
||||
|
||||
var map_ContainerStateWaiting = map[string]string{
|
||||
"": "ContainerStateWaiting is a waiting state of a container.",
|
||||
"reason": "(brief) reason the container is not yet running, such as pulling its image.",
|
||||
"": "ContainerStateWaiting is a waiting state of a container.",
|
||||
"reason": "(brief) reason the container is not yet running.",
|
||||
"message": "Message regarding why the container is not yet running.",
|
||||
}
|
||||
|
||||
func (ContainerStateWaiting) SwaggerDoc() map[string]string {
|
||||
|
@ -64,11 +64,11 @@ func (puller *imagePuller) reportImagePull(ref *api.ObjectReference, event strin
|
||||
|
||||
switch event {
|
||||
case "pulling":
|
||||
puller.recorder.Eventf(ref, "pulling", "Pulling image %q", image)
|
||||
puller.recorder.Eventf(ref, "Pulling", "Pulling image %q", image)
|
||||
case "pulled":
|
||||
puller.recorder.Eventf(ref, "pulled", "Successfully pulled image %q", image)
|
||||
puller.recorder.Eventf(ref, "Pulled", "Successfully pulled image %q", image)
|
||||
case "failed":
|
||||
puller.recorder.Eventf(ref, "failed", "Failed to pull image %q: %v", image, pullError)
|
||||
puller.recorder.Eventf(ref, "Failed", "Failed to pull image %q: %v", image, pullError)
|
||||
}
|
||||
}
|
||||
|
||||
@ -82,14 +82,14 @@ func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pul
|
||||
present, err := puller.runtime.IsImagePresent(spec)
|
||||
if err != nil {
|
||||
if ref != nil {
|
||||
puller.recorder.Eventf(ref, "failed", "Failed to inspect image %q: %v", container.Image, err)
|
||||
puller.recorder.Eventf(ref, "Failed", "Failed to inspect image %q: %v", container.Image, err)
|
||||
}
|
||||
return fmt.Errorf("failed to inspect image %q: %v", container.Image, err)
|
||||
}
|
||||
|
||||
if !shouldPullImage(container, present) {
|
||||
if present && ref != nil {
|
||||
puller.recorder.Eventf(ref, "pulled", "Container image %q already present on machine", container.Image)
|
||||
puller.recorder.Eventf(ref, "Pulled", "Container image %q already present on machine", container.Image)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -275,13 +275,13 @@ func (dm *DockerManager) GetContainerLogs(pod *api.Pod, containerID, tail string
|
||||
|
||||
var (
|
||||
// ErrNoContainersInPod is returned when there are no containers for a given pod
|
||||
ErrNoContainersInPod = errors.New("no containers exist for this pod")
|
||||
ErrNoContainersInPod = errors.New("NoContainersInPod")
|
||||
|
||||
// ErrNoPodInfraContainerInPod is returned when there is no pod infra container for a given pod
|
||||
ErrNoPodInfraContainerInPod = errors.New("No pod infra container exists for this pod")
|
||||
ErrNoPodInfraContainerInPod = errors.New("NoPodInfraContainerInPod")
|
||||
|
||||
// ErrContainerCannotRun is returned when a container is created, but cannot run properly
|
||||
ErrContainerCannotRun = errors.New("Container cannot run")
|
||||
ErrContainerCannotRun = errors.New("ContainerCannotRun")
|
||||
)
|
||||
|
||||
// Internal information kept for containers from inspection
|
||||
@ -333,17 +333,21 @@ func (dm *DockerManager) inspectContainer(dockerID, containerName, tPath string,
|
||||
}
|
||||
} else if !inspectResult.State.FinishedAt.IsZero() {
|
||||
reason := ""
|
||||
message := ""
|
||||
// Note: An application might handle OOMKilled gracefully.
|
||||
// In that case, the container is oom killed, but the exit
|
||||
// code could be 0.
|
||||
if inspectResult.State.OOMKilled {
|
||||
reason = "OOM Killed"
|
||||
reason = "OOMKilled"
|
||||
} else {
|
||||
reason = inspectResult.State.Error
|
||||
reason = "Error"
|
||||
message = inspectResult.State.Error
|
||||
}
|
||||
result.status.State.Terminated = &api.ContainerStateTerminated{
|
||||
ExitCode: inspectResult.State.ExitCode,
|
||||
Reason: reason,
|
||||
ExitCode: inspectResult.State.ExitCode,
|
||||
Message: message,
|
||||
Reason: reason,
|
||||
|
||||
StartedAt: util.NewTime(inspectResult.State.StartedAt),
|
||||
FinishedAt: util.NewTime(inspectResult.State.FinishedAt),
|
||||
ContainerID: DockerPrefix + dockerID,
|
||||
@ -503,11 +507,13 @@ func (dm *DockerManager) GetPodStatus(pod *api.Pod) (*api.PodStatus, error) {
|
||||
_, err := dm.client.InspectImage(image)
|
||||
if err == nil {
|
||||
containerStatus.State.Waiting = &api.ContainerStateWaiting{
|
||||
Reason: fmt.Sprintf("Image: %s is ready, container is creating", image),
|
||||
Message: fmt.Sprintf("Image: %s is ready, container is creating", image),
|
||||
Reason: "ContainerCreating",
|
||||
}
|
||||
} else if err == docker.ErrNoSuchImage {
|
||||
containerStatus.State.Waiting = &api.ContainerStateWaiting{
|
||||
Reason: fmt.Sprintf("Image: %s is not ready on the node", image),
|
||||
Message: fmt.Sprintf("Image: %s is not ready on the node", image),
|
||||
Reason: "ImageNotReady",
|
||||
}
|
||||
}
|
||||
statuses[container.Name] = &containerStatus
|
||||
|
@ -1301,21 +1301,21 @@ func TestSyncPodWithPullPolicy(t *testing.T) {
|
||||
fakeDocker.Lock()
|
||||
|
||||
eventSet := []string{
|
||||
`pulling Pulling image "pod_infra_image"`,
|
||||
`pulled Successfully pulled image "pod_infra_image"`,
|
||||
`pulling Pulling image "pull_always_image"`,
|
||||
`pulled Successfully pulled image "pull_always_image"`,
|
||||
`pulling Pulling image "pull_if_not_present_image"`,
|
||||
`pulled Successfully pulled image "pull_if_not_present_image"`,
|
||||
`pulled Container image "existing_one" already present on machine`,
|
||||
`pulled Container image "want:latest" already present on machine`,
|
||||
`Pulling Pulling image "pod_infra_image"`,
|
||||
`Pulled Successfully pulled image "pod_infra_image"`,
|
||||
`Pulling Pulling image "pull_always_image"`,
|
||||
`Pulled Successfully pulled image "pull_always_image"`,
|
||||
`Pulling Pulling image "pull_if_not_present_image"`,
|
||||
`Pulled Successfully pulled image "pull_if_not_present_image"`,
|
||||
`Pulled Container image "existing_one" already present on machine`,
|
||||
`Pulled Container image "want:latest" already present on machine`,
|
||||
}
|
||||
|
||||
recorder := dm.recorder.(*record.FakeRecorder)
|
||||
|
||||
var actualEvents []string
|
||||
for _, ev := range recorder.Events {
|
||||
if strings.HasPrefix(ev, "pull") {
|
||||
if strings.HasPrefix(ev, "Pull") {
|
||||
actualEvents = append(actualEvents, ev)
|
||||
}
|
||||
}
|
||||
|
@ -1333,7 +1333,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
|
||||
}
|
||||
if egress != nil || ingress != nil {
|
||||
if pod.Spec.HostNetwork {
|
||||
kl.recorder.Event(pod, "host network not supported", "Bandwidth shaping is not currently supported on the host network")
|
||||
kl.recorder.Event(pod, "HostNetworkNotSupported", "Bandwidth shaping is not currently supported on the host network")
|
||||
} else if kl.shaper != nil {
|
||||
status, found := kl.statusManager.GetPodStatus(pod.UID)
|
||||
if !found {
|
||||
@ -1348,7 +1348,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
|
||||
err = kl.shaper.ReconcileCIDR(fmt.Sprintf("%s/32", status.PodIP), egress, ingress)
|
||||
}
|
||||
} else {
|
||||
kl.recorder.Event(pod, "nil shaper", "Pod requests bandwidth shaping, but the shaper is undefined")
|
||||
kl.recorder.Event(pod, "NilShaper", "Pod requests bandwidth shaping, but the shaper is undefined")
|
||||
}
|
||||
}
|
||||
|
||||
@ -2340,21 +2340,24 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
|
||||
newNodeReadyCondition = api.NodeCondition{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionTrue,
|
||||
Reason: "kubelet is posting ready status",
|
||||
Reason: "KubeletReady",
|
||||
Message: "kubelet is posting ready status",
|
||||
LastHeartbeatTime: currentTime,
|
||||
}
|
||||
} else {
|
||||
var reasons []string
|
||||
var messages []string
|
||||
if !containerRuntimeUp {
|
||||
reasons = append(reasons, "container runtime is down")
|
||||
messages = append(messages, "container runtime is down")
|
||||
}
|
||||
if !networkConfigured {
|
||||
reasons = append(reasons, "network not configured correctly")
|
||||
messages = append(reasons, "network not configured correctly")
|
||||
}
|
||||
newNodeReadyCondition = api.NodeCondition{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionFalse,
|
||||
Reason: strings.Join(reasons, ","),
|
||||
Reason: "KubeletNotReady",
|
||||
Message: strings.Join(messages, ","),
|
||||
LastHeartbeatTime: currentTime,
|
||||
}
|
||||
}
|
||||
|
@ -2384,7 +2384,8 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
||||
{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionTrue,
|
||||
Reason: fmt.Sprintf("kubelet is posting ready status"),
|
||||
Reason: "KubeletReady",
|
||||
Message: fmt.Sprintf("kubelet is posting ready status"),
|
||||
LastHeartbeatTime: util.Time{},
|
||||
LastTransitionTime: util.Time{},
|
||||
},
|
||||
@ -2452,7 +2453,8 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
||||
{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionTrue,
|
||||
Reason: fmt.Sprintf("kubelet is posting ready status"),
|
||||
Reason: "KubeletReady",
|
||||
Message: fmt.Sprintf("kubelet is posting ready status"),
|
||||
LastHeartbeatTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
@ -2488,7 +2490,8 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
||||
{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionTrue,
|
||||
Reason: fmt.Sprintf("kubelet is posting ready status"),
|
||||
Reason: "KubeletReady",
|
||||
Message: fmt.Sprintf("kubelet is posting ready status"),
|
||||
LastHeartbeatTime: util.Time{}, // placeholder
|
||||
LastTransitionTime: util.Time{}, // placeholder
|
||||
},
|
||||
@ -2582,7 +2585,8 @@ func TestUpdateNodeStatusWithoutContainerRuntime(t *testing.T) {
|
||||
{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionFalse,
|
||||
Reason: fmt.Sprintf("container runtime is down"),
|
||||
Reason: "KubeletNotReady",
|
||||
Message: fmt.Sprintf("container runtime is down"),
|
||||
LastHeartbeatTime: util.Time{},
|
||||
LastTransitionTime: util.Time{},
|
||||
},
|
||||
|
Loading…
Reference in New Issue
Block a user