mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-14 14:23:37 +00:00
Keep event reason in kubelet consistent with others
This commit is contained in:
parent
0ded91c521
commit
9ab4a46b9d
@ -327,7 +327,7 @@ func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventReco
|
||||
name := bestPodIdentString(pod)
|
||||
err := utilerrors.NewAggregate(errlist)
|
||||
glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err)
|
||||
recorder.Eventf(pod, "failedValidation", "Error validating pod %s from %s, ignoring: %v", name, source, err)
|
||||
recorder.Eventf(pod, "FailedValidation", "Error validating pod %s from %s, ignoring: %v", name, source, err)
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, pod)
|
||||
|
@ -645,13 +645,13 @@ func (dm *DockerManager) runContainer(
|
||||
dockerContainer, err := dm.client.CreateContainer(dockerOpts)
|
||||
if err != nil {
|
||||
if ref != nil {
|
||||
dm.recorder.Eventf(ref, "failed", "Failed to create docker container with error: %v", err)
|
||||
dm.recorder.Eventf(ref, "Failed", "Failed to create docker container with error: %v", err)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
if ref != nil {
|
||||
dm.recorder.Eventf(ref, "created", "Created with docker id %v", util.ShortenString(dockerContainer.ID, 12))
|
||||
dm.recorder.Eventf(ref, "Created", "Created with docker id %v", util.ShortenString(dockerContainer.ID, 12))
|
||||
}
|
||||
|
||||
binds := makeMountBindings(opts.Mounts)
|
||||
@ -697,13 +697,13 @@ func (dm *DockerManager) runContainer(
|
||||
|
||||
if err = dm.client.StartContainer(dockerContainer.ID, hc); err != nil {
|
||||
if ref != nil {
|
||||
dm.recorder.Eventf(ref, "failed",
|
||||
dm.recorder.Eventf(ref, "Failed",
|
||||
"Failed to start with docker id %v with error: %v", util.ShortenString(dockerContainer.ID, 12), err)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
if ref != nil {
|
||||
dm.recorder.Eventf(ref, "started", "Started with docker id %v", util.ShortenString(dockerContainer.ID, 12))
|
||||
dm.recorder.Eventf(ref, "Started", "Started with docker id %v", util.ShortenString(dockerContainer.ID, 12))
|
||||
}
|
||||
return dockerContainer.ID, nil
|
||||
}
|
||||
@ -1218,7 +1218,7 @@ func (dm *DockerManager) killContainer(containerID types.UID) error {
|
||||
glog.Warningf("No ref for pod '%v'", ID)
|
||||
} else {
|
||||
// TODO: pass reason down here, and state, or move this call up the stack.
|
||||
dm.recorder.Eventf(ref, "killing", "Killing with docker id %v", util.ShortenString(ID, 12))
|
||||
dm.recorder.Eventf(ref, "Killing", "Killing with docker id %v", util.ShortenString(ID, 12))
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -1535,13 +1535,13 @@ func (dm *DockerManager) pullImage(pod *api.Pod, container *api.Container, pullS
|
||||
present, err := dm.IsImagePresent(spec)
|
||||
if err != nil {
|
||||
if ref != nil {
|
||||
dm.recorder.Eventf(ref, "failed", "Failed to inspect image %q: %v", container.Image, err)
|
||||
dm.recorder.Eventf(ref, "Failed", "Failed to inspect image %q: %v", container.Image, err)
|
||||
}
|
||||
return fmt.Errorf("failed to inspect image %q: %v", container.Image, err)
|
||||
}
|
||||
if !dm.runtimeHooks.ShouldPullImage(pod, container, present) {
|
||||
if present && ref != nil {
|
||||
dm.recorder.Eventf(ref, "pulled", "Container image %q already present on machine", container.Image)
|
||||
dm.recorder.Eventf(ref, "Pulled", "Container image %q already present on machine", container.Image)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -81,11 +81,11 @@ func (fr *fakeRuntimeHooks) ShouldPullImage(pod *api.Pod, container *api.Contain
|
||||
}
|
||||
|
||||
func (fr *fakeRuntimeHooks) ReportImagePulling(pod *api.Pod, container *api.Container) {
|
||||
fr.recorder.Eventf(nil, "pulling", fmt.Sprintf("%s:%s:%s", pod.Name, container.Name, container.Image))
|
||||
fr.recorder.Eventf(nil, "Pulling", fmt.Sprintf("%s:%s:%s", pod.Name, container.Name, container.Image))
|
||||
}
|
||||
|
||||
func (fr *fakeRuntimeHooks) ReportImagePulled(pod *api.Pod, container *api.Container, pullError error) {
|
||||
fr.recorder.Eventf(nil, "pulled", fmt.Sprintf("%s:%s:%s", pod.Name, container.Name, container.Image))
|
||||
fr.recorder.Eventf(nil, "Pulled", fmt.Sprintf("%s:%s:%s", pod.Name, container.Name, container.Image))
|
||||
}
|
||||
|
||||
type fakeOptionGenerator struct{}
|
||||
@ -1333,14 +1333,14 @@ func TestSyncPodWithPullPolicy(t *testing.T) {
|
||||
fakeDocker.Lock()
|
||||
|
||||
eventSet := []string{
|
||||
"pulling foo:POD:pod_infra_image",
|
||||
"pulled foo:POD:pod_infra_image",
|
||||
"pulling foo:bar:pull_always_image",
|
||||
"pulled foo:bar:pull_always_image",
|
||||
"pulling foo:bar2:pull_if_not_present_image",
|
||||
"pulled foo:bar2:pull_if_not_present_image",
|
||||
`pulled Container image "existing_one" already present on machine`,
|
||||
`pulled Container image "want:latest" already present on machine`,
|
||||
"Pulling foo:POD:pod_infra_image",
|
||||
"Pulled foo:POD:pod_infra_image",
|
||||
"Pulling foo:bar:pull_always_image",
|
||||
"Pulled foo:bar:pull_always_image",
|
||||
"Pulling foo:bar2:pull_if_not_present_image",
|
||||
"Pulled foo:bar2:pull_if_not_present_image",
|
||||
`Pulled Container image "existing_one" already present on machine`,
|
||||
`Pulled Container image "want:latest" already present on machine`,
|
||||
}
|
||||
|
||||
runtimeHooks := dm.runtimeHooks.(*fakeRuntimeHooks)
|
||||
@ -1348,7 +1348,7 @@ func TestSyncPodWithPullPolicy(t *testing.T) {
|
||||
|
||||
var actualEvents []string
|
||||
for _, ev := range recorder.Events {
|
||||
if strings.HasPrefix(ev, "pull") {
|
||||
if strings.HasPrefix(ev, "Pull") {
|
||||
actualEvents = append(actualEvents, ev)
|
||||
}
|
||||
}
|
||||
|
@ -191,7 +191,7 @@ func (im *realImageManager) GarbageCollect() error {
|
||||
// Check valid capacity.
|
||||
if capacity == 0 {
|
||||
err := fmt.Errorf("invalid capacity %d on device %q at mount point %q", capacity, fsInfo.Device, fsInfo.Mountpoint)
|
||||
im.recorder.Eventf(im.nodeRef, "invalidDiskCapacity", err.Error())
|
||||
im.recorder.Eventf(im.nodeRef, "InvalidDiskCapacity", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
@ -207,7 +207,7 @@ func (im *realImageManager) GarbageCollect() error {
|
||||
|
||||
if freed < amountToFree {
|
||||
err := fmt.Errorf("failed to garbage collect required amount of images. Wanted to free %d, but freed %d", amountToFree, freed)
|
||||
im.recorder.Eventf(im.nodeRef, "freeDiskSpaceFailed", err.Error())
|
||||
im.recorder.Eventf(im.nodeRef, "FreeDiskSpaceFailed", err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -714,22 +714,22 @@ func (kl *Kubelet) Run(updates <-chan PodUpdate) {
|
||||
}
|
||||
|
||||
if err := kl.imageManager.Start(); err != nil {
|
||||
kl.recorder.Eventf(kl.nodeRef, "kubeletSetupFailed", "Failed to start ImageManager %v", err)
|
||||
kl.recorder.Eventf(kl.nodeRef, "KubeletSetupFailed", "Failed to start ImageManager %v", err)
|
||||
glog.Errorf("Failed to start ImageManager, images may not be garbage collected: %v", err)
|
||||
}
|
||||
|
||||
if err := kl.cadvisor.Start(); err != nil {
|
||||
kl.recorder.Eventf(kl.nodeRef, "kubeletSetupFailed", "Failed to start CAdvisor %v", err)
|
||||
kl.recorder.Eventf(kl.nodeRef, "KubeletSetupFailed", "Failed to start CAdvisor %v", err)
|
||||
glog.Errorf("Failed to start CAdvisor, system may not be properly monitored: %v", err)
|
||||
}
|
||||
|
||||
if err := kl.containerManager.Start(); err != nil {
|
||||
kl.recorder.Eventf(kl.nodeRef, "kubeletSetupFailed", "Failed to start ContainerManager %v", err)
|
||||
kl.recorder.Eventf(kl.nodeRef, "KubeletSetupFailed", "Failed to start ContainerManager %v", err)
|
||||
glog.Errorf("Failed to start ContainerManager, system may not be properly isolated: %v", err)
|
||||
}
|
||||
|
||||
if err := kl.oomWatcher.Start(kl.nodeRef); err != nil {
|
||||
kl.recorder.Eventf(kl.nodeRef, "kubeletSetupFailed", "Failed to start OOM watcher %v", err)
|
||||
kl.recorder.Eventf(kl.nodeRef, "KubeletSetupFailed", "Failed to start OOM watcher %v", err)
|
||||
glog.Errorf("Failed to start OOM watching: %v", err)
|
||||
}
|
||||
|
||||
@ -1196,7 +1196,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
|
||||
podVolumes, err := kl.mountExternalVolumes(pod)
|
||||
if err != nil {
|
||||
if ref != nil {
|
||||
kl.recorder.Eventf(ref, "failedMount", "Unable to mount volumes for pod %q: %v", podFullName, err)
|
||||
kl.recorder.Eventf(ref, "FailedMount", "Unable to mount volumes for pod %q: %v", podFullName, err)
|
||||
}
|
||||
glog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", podFullName, err)
|
||||
return err
|
||||
@ -2092,7 +2092,7 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
|
||||
node.Status.NodeInfo.BootID != info.BootID {
|
||||
// TODO: This requires a transaction, either both node status is updated
|
||||
// and event is recorded or neither should happen, see issue #6055.
|
||||
kl.recorder.Eventf(kl.nodeRef, "rebooted",
|
||||
kl.recorder.Eventf(kl.nodeRef, "Rebooted",
|
||||
"Node %s has been rebooted, boot id: %s", kl.nodeName, info.BootID)
|
||||
}
|
||||
node.Status.NodeInfo.BootID = info.BootID
|
||||
@ -2448,7 +2448,7 @@ func (kl *Kubelet) PortForward(podFullName string, podUID types.UID, port uint16
|
||||
// BirthCry sends an event that the kubelet has started up.
|
||||
func (kl *Kubelet) BirthCry() {
|
||||
// Make an event that kubelet restarted.
|
||||
kl.recorder.Eventf(kl.nodeRef, "starting", "Starting kubelet.")
|
||||
kl.recorder.Eventf(kl.nodeRef, "Starting", "Starting kubelet.")
|
||||
}
|
||||
|
||||
func (kl *Kubelet) StreamingConnectionIdleTimeout() time.Duration {
|
||||
|
@ -109,7 +109,7 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan workUpdate) {
|
||||
kubecontainer.Pods(pods).FindPodByID(newWork.pod.UID), newWork.updateType)
|
||||
if err != nil {
|
||||
glog.Errorf("Error syncing pod %s, skipping: %v", newWork.pod.UID, err)
|
||||
p.recorder.Eventf(newWork.pod, "failedSync", "Error syncing pod, skipping: %v", err)
|
||||
p.recorder.Eventf(newWork.pod, "FailedSync", "Error syncing pod, skipping: %v", err)
|
||||
return
|
||||
}
|
||||
minRuntimeCacheTime = time.Now()
|
||||
|
@ -122,13 +122,13 @@ func (pb *prober) probeLiveness(pod *api.Pod, status api.PodStatus, container ap
|
||||
if err != nil {
|
||||
glog.V(1).Infof("Liveness probe for %q errored: %v", ctrName, err)
|
||||
if ok {
|
||||
pb.recorder.Eventf(ref, "unhealthy", "Liveness probe errored: %v", err)
|
||||
pb.recorder.Eventf(ref, "Unhealthy", "Liveness probe errored: %v", err)
|
||||
}
|
||||
return probe.Unknown, err
|
||||
} else { // live != probe.Success
|
||||
glog.V(1).Infof("Liveness probe for %q failed (%v): %s", ctrName, live, output)
|
||||
if ok {
|
||||
pb.recorder.Eventf(ref, "unhealthy", "Liveness probe failed: %s", output)
|
||||
pb.recorder.Eventf(ref, "Unhealthy", "Liveness probe failed: %s", output)
|
||||
}
|
||||
return live, nil
|
||||
}
|
||||
@ -162,13 +162,13 @@ func (pb *prober) probeReadiness(pod *api.Pod, status api.PodStatus, container a
|
||||
if err != nil {
|
||||
glog.V(1).Infof("readiness probe for %q errored: %v", ctrName, err)
|
||||
if ok {
|
||||
pb.recorder.Eventf(ref, "unhealthy", "Readiness probe errored: %v", err)
|
||||
pb.recorder.Eventf(ref, "Unhealthy", "Readiness probe errored: %v", err)
|
||||
}
|
||||
return
|
||||
} else { // ready != probe.Success
|
||||
glog.V(1).Infof("Readiness probe for %q failed (%v): %s", ctrName, ready, output)
|
||||
if ok {
|
||||
pb.recorder.Eventf(ref, "unhealthy", "Readiness probe failed: %s", output)
|
||||
pb.recorder.Eventf(ref, "Unhealthy", "Readiness probe failed: %s", output)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -57,9 +57,9 @@ func (kr *kubeletRuntimeHooks) ReportImagePulled(pod *api.Pod, container *api.Co
|
||||
}
|
||||
|
||||
if pullError != nil {
|
||||
kr.recorder.Eventf(ref, "failed", "Failed to pull image %q: %v", container.Image, pullError)
|
||||
kr.recorder.Eventf(ref, "Failed", "Failed to pull image %q: %v", container.Image, pullError)
|
||||
} else {
|
||||
kr.recorder.Eventf(ref, "pulled", "Successfully pulled image %q", container.Image)
|
||||
kr.recorder.Eventf(ref, "Pulled", "Successfully pulled image %q", container.Image)
|
||||
}
|
||||
}
|
||||
|
||||
@ -69,5 +69,5 @@ func (kr *kubeletRuntimeHooks) ReportImagePulling(pod *api.Pod, container *api.C
|
||||
glog.Errorf("Couldn't make a ref to pod %q, container %q: '%v'", pod.Name, container.Name, err)
|
||||
return
|
||||
}
|
||||
kr.recorder.Eventf(ref, "pulling", "Pulling image %q", container.Image)
|
||||
kr.recorder.Eventf(ref, "Pulling", "Pulling image %q", container.Image)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user