mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 09:22:44 +00:00
Fix reporting reason in kubelet
1. Make reason field of StatusReport objects in kubelet in CamelCase format. 2. Add Message field for ContainerStateWaiting to describe detail about Reason. 3. Make reason field of Events in kubelet in CamelCase format. 4. Update swagger,deep-copy and so on.
This commit is contained in:
parent
6a5049f09d
commit
62c0c35307
@ -13171,7 +13171,11 @@
|
|||||||
"properties": {
|
"properties": {
|
||||||
"reason": {
|
"reason": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "(brief) reason the container is not yet running, such as pulling its image."
|
"description": "(brief) reason the container is not yet running."
|
||||||
|
},
|
||||||
|
"message": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Message regarding why the container is not yet running."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -302,6 +302,7 @@ func deepCopy_api_ContainerStateTerminated(in ContainerStateTerminated, out *Con
|
|||||||
|
|
||||||
func deepCopy_api_ContainerStateWaiting(in ContainerStateWaiting, out *ContainerStateWaiting, c *conversion.Cloner) error {
|
func deepCopy_api_ContainerStateWaiting(in ContainerStateWaiting, out *ContainerStateWaiting, c *conversion.Cloner) error {
|
||||||
out.Reason = in.Reason
|
out.Reason = in.Reason
|
||||||
|
out.Message = in.Message
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -835,8 +835,10 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type ContainerStateWaiting struct {
|
type ContainerStateWaiting struct {
|
||||||
// Reason could be pulling image,
|
// A brief CamelCase string indicating details about why the container is in waiting state.
|
||||||
Reason string `json:"reason,omitempty"`
|
Reason string `json:"reason,omitempty"`
|
||||||
|
// A human-readable message indicating details about why the container is in waiting state.
|
||||||
|
Message string `json:"message,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ContainerStateRunning struct {
|
type ContainerStateRunning struct {
|
||||||
|
@ -340,6 +340,7 @@ func convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.Conta
|
|||||||
defaulting.(func(*api.ContainerStateWaiting))(in)
|
defaulting.(func(*api.ContainerStateWaiting))(in)
|
||||||
}
|
}
|
||||||
out.Reason = in.Reason
|
out.Reason = in.Reason
|
||||||
|
out.Message = in.Message
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2742,6 +2743,7 @@ func convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *Container
|
|||||||
defaulting.(func(*ContainerStateWaiting))(in)
|
defaulting.(func(*ContainerStateWaiting))(in)
|
||||||
}
|
}
|
||||||
out.Reason = in.Reason
|
out.Reason = in.Reason
|
||||||
|
out.Message = in.Message
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -317,6 +317,7 @@ func deepCopy_v1_ContainerStateTerminated(in ContainerStateTerminated, out *Cont
|
|||||||
|
|
||||||
func deepCopy_v1_ContainerStateWaiting(in ContainerStateWaiting, out *ContainerStateWaiting, c *conversion.Cloner) error {
|
func deepCopy_v1_ContainerStateWaiting(in ContainerStateWaiting, out *ContainerStateWaiting, c *conversion.Cloner) error {
|
||||||
out.Reason = in.Reason
|
out.Reason = in.Reason
|
||||||
|
out.Message = in.Message
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1036,8 +1036,10 @@ const (
|
|||||||
|
|
||||||
// ContainerStateWaiting is a waiting state of a container.
|
// ContainerStateWaiting is a waiting state of a container.
|
||||||
type ContainerStateWaiting struct {
|
type ContainerStateWaiting struct {
|
||||||
// (brief) reason the container is not yet running, such as pulling its image.
|
// (brief) reason the container is not yet running.
|
||||||
Reason string `json:"reason,omitempty"`
|
Reason string `json:"reason,omitempty"`
|
||||||
|
// Message regarding why the container is not yet running.
|
||||||
|
Message string `json:"message,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ContainerStateRunning is a running state of a container.
|
// ContainerStateRunning is a running state of a container.
|
||||||
|
@ -199,8 +199,9 @@ func (ContainerStateTerminated) SwaggerDoc() map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var map_ContainerStateWaiting = map[string]string{
|
var map_ContainerStateWaiting = map[string]string{
|
||||||
"": "ContainerStateWaiting is a waiting state of a container.",
|
"": "ContainerStateWaiting is a waiting state of a container.",
|
||||||
"reason": "(brief) reason the container is not yet running, such as pulling its image.",
|
"reason": "(brief) reason the container is not yet running.",
|
||||||
|
"message": "Message regarding why the container is not yet running.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ContainerStateWaiting) SwaggerDoc() map[string]string {
|
func (ContainerStateWaiting) SwaggerDoc() map[string]string {
|
||||||
|
@ -64,11 +64,11 @@ func (puller *imagePuller) reportImagePull(ref *api.ObjectReference, event strin
|
|||||||
|
|
||||||
switch event {
|
switch event {
|
||||||
case "pulling":
|
case "pulling":
|
||||||
puller.recorder.Eventf(ref, "pulling", "Pulling image %q", image)
|
puller.recorder.Eventf(ref, "Pulling", "Pulling image %q", image)
|
||||||
case "pulled":
|
case "pulled":
|
||||||
puller.recorder.Eventf(ref, "pulled", "Successfully pulled image %q", image)
|
puller.recorder.Eventf(ref, "Pulled", "Successfully pulled image %q", image)
|
||||||
case "failed":
|
case "failed":
|
||||||
puller.recorder.Eventf(ref, "failed", "Failed to pull image %q: %v", image, pullError)
|
puller.recorder.Eventf(ref, "Failed", "Failed to pull image %q: %v", image, pullError)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -82,14 +82,14 @@ func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pul
|
|||||||
present, err := puller.runtime.IsImagePresent(spec)
|
present, err := puller.runtime.IsImagePresent(spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if ref != nil {
|
if ref != nil {
|
||||||
puller.recorder.Eventf(ref, "failed", "Failed to inspect image %q: %v", container.Image, err)
|
puller.recorder.Eventf(ref, "Failed", "Failed to inspect image %q: %v", container.Image, err)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("failed to inspect image %q: %v", container.Image, err)
|
return fmt.Errorf("failed to inspect image %q: %v", container.Image, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !shouldPullImage(container, present) {
|
if !shouldPullImage(container, present) {
|
||||||
if present && ref != nil {
|
if present && ref != nil {
|
||||||
puller.recorder.Eventf(ref, "pulled", "Container image %q already present on machine", container.Image)
|
puller.recorder.Eventf(ref, "Pulled", "Container image %q already present on machine", container.Image)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -274,13 +274,13 @@ func (dm *DockerManager) GetContainerLogs(pod *api.Pod, containerID, tail string
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
// ErrNoContainersInPod is returned when there are no containers for a given pod
|
// ErrNoContainersInPod is returned when there are no containers for a given pod
|
||||||
ErrNoContainersInPod = errors.New("no containers exist for this pod")
|
ErrNoContainersInPod = errors.New("NoContainersInPod")
|
||||||
|
|
||||||
// ErrNoPodInfraContainerInPod is returned when there is no pod infra container for a given pod
|
// ErrNoPodInfraContainerInPod is returned when there is no pod infra container for a given pod
|
||||||
ErrNoPodInfraContainerInPod = errors.New("No pod infra container exists for this pod")
|
ErrNoPodInfraContainerInPod = errors.New("NoPodInfraContainerInPod")
|
||||||
|
|
||||||
// ErrContainerCannotRun is returned when a container is created, but cannot run properly
|
// ErrContainerCannotRun is returned when a container is created, but cannot run properly
|
||||||
ErrContainerCannotRun = errors.New("Container cannot run")
|
ErrContainerCannotRun = errors.New("ContainerCannotRun")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Internal information kept for containers from inspection
|
// Internal information kept for containers from inspection
|
||||||
@ -332,17 +332,21 @@ func (dm *DockerManager) inspectContainer(dockerID, containerName, tPath string,
|
|||||||
}
|
}
|
||||||
} else if !inspectResult.State.FinishedAt.IsZero() {
|
} else if !inspectResult.State.FinishedAt.IsZero() {
|
||||||
reason := ""
|
reason := ""
|
||||||
|
message := ""
|
||||||
// Note: An application might handle OOMKilled gracefully.
|
// Note: An application might handle OOMKilled gracefully.
|
||||||
// In that case, the container is oom killed, but the exit
|
// In that case, the container is oom killed, but the exit
|
||||||
// code could be 0.
|
// code could be 0.
|
||||||
if inspectResult.State.OOMKilled {
|
if inspectResult.State.OOMKilled {
|
||||||
reason = "OOM Killed"
|
reason = "OOMKilled"
|
||||||
} else {
|
} else {
|
||||||
reason = inspectResult.State.Error
|
reason = "Error"
|
||||||
|
message = inspectResult.State.Error
|
||||||
}
|
}
|
||||||
result.status.State.Terminated = &api.ContainerStateTerminated{
|
result.status.State.Terminated = &api.ContainerStateTerminated{
|
||||||
ExitCode: inspectResult.State.ExitCode,
|
ExitCode: inspectResult.State.ExitCode,
|
||||||
Reason: reason,
|
Message: message,
|
||||||
|
Reason: reason,
|
||||||
|
|
||||||
StartedAt: util.NewTime(inspectResult.State.StartedAt),
|
StartedAt: util.NewTime(inspectResult.State.StartedAt),
|
||||||
FinishedAt: util.NewTime(inspectResult.State.FinishedAt),
|
FinishedAt: util.NewTime(inspectResult.State.FinishedAt),
|
||||||
ContainerID: DockerPrefix + dockerID,
|
ContainerID: DockerPrefix + dockerID,
|
||||||
@ -502,11 +506,13 @@ func (dm *DockerManager) GetPodStatus(pod *api.Pod) (*api.PodStatus, error) {
|
|||||||
_, err := dm.client.InspectImage(image)
|
_, err := dm.client.InspectImage(image)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
containerStatus.State.Waiting = &api.ContainerStateWaiting{
|
containerStatus.State.Waiting = &api.ContainerStateWaiting{
|
||||||
Reason: fmt.Sprintf("Image: %s is ready, container is creating", image),
|
Message: fmt.Sprintf("Image: %s is ready, container is creating", image),
|
||||||
|
Reason: "ContainerCreating",
|
||||||
}
|
}
|
||||||
} else if err == docker.ErrNoSuchImage {
|
} else if err == docker.ErrNoSuchImage {
|
||||||
containerStatus.State.Waiting = &api.ContainerStateWaiting{
|
containerStatus.State.Waiting = &api.ContainerStateWaiting{
|
||||||
Reason: fmt.Sprintf("Image: %s is not ready on the node", image),
|
Message: fmt.Sprintf("Image: %s is not ready on the node", image),
|
||||||
|
Reason: "ImageNotReady",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
statuses[container.Name] = &containerStatus
|
statuses[container.Name] = &containerStatus
|
||||||
|
@ -1300,21 +1300,21 @@ func TestSyncPodWithPullPolicy(t *testing.T) {
|
|||||||
fakeDocker.Lock()
|
fakeDocker.Lock()
|
||||||
|
|
||||||
eventSet := []string{
|
eventSet := []string{
|
||||||
`pulling Pulling image "pod_infra_image"`,
|
`Pulling Pulling image "pod_infra_image"`,
|
||||||
`pulled Successfully pulled image "pod_infra_image"`,
|
`Pulled Successfully pulled image "pod_infra_image"`,
|
||||||
`pulling Pulling image "pull_always_image"`,
|
`Pulling Pulling image "pull_always_image"`,
|
||||||
`pulled Successfully pulled image "pull_always_image"`,
|
`Pulled Successfully pulled image "pull_always_image"`,
|
||||||
`pulling Pulling image "pull_if_not_present_image"`,
|
`Pulling Pulling image "pull_if_not_present_image"`,
|
||||||
`pulled Successfully pulled image "pull_if_not_present_image"`,
|
`Pulled Successfully pulled image "pull_if_not_present_image"`,
|
||||||
`pulled Container image "existing_one" already present on machine`,
|
`Pulled Container image "existing_one" already present on machine`,
|
||||||
`pulled Container image "want:latest" already present on machine`,
|
`Pulled Container image "want:latest" already present on machine`,
|
||||||
}
|
}
|
||||||
|
|
||||||
recorder := dm.recorder.(*record.FakeRecorder)
|
recorder := dm.recorder.(*record.FakeRecorder)
|
||||||
|
|
||||||
var actualEvents []string
|
var actualEvents []string
|
||||||
for _, ev := range recorder.Events {
|
for _, ev := range recorder.Events {
|
||||||
if strings.HasPrefix(ev, "pull") {
|
if strings.HasPrefix(ev, "Pull") {
|
||||||
actualEvents = append(actualEvents, ev)
|
actualEvents = append(actualEvents, ev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1336,7 +1336,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
|
|||||||
}
|
}
|
||||||
if egress != nil || ingress != nil {
|
if egress != nil || ingress != nil {
|
||||||
if pod.Spec.HostNetwork {
|
if pod.Spec.HostNetwork {
|
||||||
kl.recorder.Event(pod, "host network not supported", "Bandwidth shaping is not currently supported on the host network")
|
kl.recorder.Event(pod, "HostNetworkNotSupported", "Bandwidth shaping is not currently supported on the host network")
|
||||||
} else if kl.shaper != nil {
|
} else if kl.shaper != nil {
|
||||||
status, found := kl.statusManager.GetPodStatus(pod.UID)
|
status, found := kl.statusManager.GetPodStatus(pod.UID)
|
||||||
if !found {
|
if !found {
|
||||||
@ -1351,7 +1351,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
|
|||||||
err = kl.shaper.ReconcileCIDR(fmt.Sprintf("%s/32", status.PodIP), egress, ingress)
|
err = kl.shaper.ReconcileCIDR(fmt.Sprintf("%s/32", status.PodIP), egress, ingress)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
kl.recorder.Event(pod, "nil shaper", "Pod requests bandwidth shaping, but the shaper is undefined")
|
kl.recorder.Event(pod, "NilShaper", "Pod requests bandwidth shaping, but the shaper is undefined")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2343,21 +2343,24 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
|
|||||||
newNodeReadyCondition = api.NodeCondition{
|
newNodeReadyCondition = api.NodeCondition{
|
||||||
Type: api.NodeReady,
|
Type: api.NodeReady,
|
||||||
Status: api.ConditionTrue,
|
Status: api.ConditionTrue,
|
||||||
Reason: "kubelet is posting ready status",
|
Reason: "KubeletReady",
|
||||||
|
Message: "kubelet is posting ready status",
|
||||||
LastHeartbeatTime: currentTime,
|
LastHeartbeatTime: currentTime,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
var reasons []string
|
var reasons []string
|
||||||
|
var messages []string
|
||||||
if !containerRuntimeUp {
|
if !containerRuntimeUp {
|
||||||
reasons = append(reasons, "container runtime is down")
|
messages = append(messages, "container runtime is down")
|
||||||
}
|
}
|
||||||
if !networkConfigured {
|
if !networkConfigured {
|
||||||
reasons = append(reasons, "network not configured correctly")
|
messages = append(reasons, "network not configured correctly")
|
||||||
}
|
}
|
||||||
newNodeReadyCondition = api.NodeCondition{
|
newNodeReadyCondition = api.NodeCondition{
|
||||||
Type: api.NodeReady,
|
Type: api.NodeReady,
|
||||||
Status: api.ConditionFalse,
|
Status: api.ConditionFalse,
|
||||||
Reason: strings.Join(reasons, ","),
|
Reason: "KubeletNotReady",
|
||||||
|
Message: strings.Join(messages, ","),
|
||||||
LastHeartbeatTime: currentTime,
|
LastHeartbeatTime: currentTime,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2380,7 +2380,8 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Type: api.NodeReady,
|
Type: api.NodeReady,
|
||||||
Status: api.ConditionTrue,
|
Status: api.ConditionTrue,
|
||||||
Reason: fmt.Sprintf("kubelet is posting ready status"),
|
Reason: "KubeletReady",
|
||||||
|
Message: fmt.Sprintf("kubelet is posting ready status"),
|
||||||
LastHeartbeatTime: util.Time{},
|
LastHeartbeatTime: util.Time{},
|
||||||
LastTransitionTime: util.Time{},
|
LastTransitionTime: util.Time{},
|
||||||
},
|
},
|
||||||
@ -2448,7 +2449,8 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Type: api.NodeReady,
|
Type: api.NodeReady,
|
||||||
Status: api.ConditionTrue,
|
Status: api.ConditionTrue,
|
||||||
Reason: fmt.Sprintf("kubelet is posting ready status"),
|
Reason: "KubeletReady",
|
||||||
|
Message: fmt.Sprintf("kubelet is posting ready status"),
|
||||||
LastHeartbeatTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
LastHeartbeatTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||||
LastTransitionTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
LastTransitionTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||||
},
|
},
|
||||||
@ -2484,7 +2486,8 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Type: api.NodeReady,
|
Type: api.NodeReady,
|
||||||
Status: api.ConditionTrue,
|
Status: api.ConditionTrue,
|
||||||
Reason: fmt.Sprintf("kubelet is posting ready status"),
|
Reason: "KubeletReady",
|
||||||
|
Message: fmt.Sprintf("kubelet is posting ready status"),
|
||||||
LastHeartbeatTime: util.Time{}, // placeholder
|
LastHeartbeatTime: util.Time{}, // placeholder
|
||||||
LastTransitionTime: util.Time{}, // placeholder
|
LastTransitionTime: util.Time{}, // placeholder
|
||||||
},
|
},
|
||||||
@ -2578,7 +2581,8 @@ func TestUpdateNodeStatusWithoutContainerRuntime(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Type: api.NodeReady,
|
Type: api.NodeReady,
|
||||||
Status: api.ConditionFalse,
|
Status: api.ConditionFalse,
|
||||||
Reason: fmt.Sprintf("container runtime is down"),
|
Reason: "KubeletNotReady",
|
||||||
|
Message: fmt.Sprintf("container runtime is down"),
|
||||||
LastHeartbeatTime: util.Time{},
|
LastHeartbeatTime: util.Time{},
|
||||||
LastTransitionTime: util.Time{},
|
LastTransitionTime: util.Time{},
|
||||||
},
|
},
|
||||||
|
Loading…
Reference in New Issue
Block a user