make reasons constants with clarity

This commit is contained in:
jiangyaoguo 2015-10-27 16:50:18 +08:00
parent 55bf786216
commit b0f0c294d9
10 changed files with 105 additions and 40 deletions

View File

@ -327,7 +327,7 @@ func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventReco
name := bestPodIdentString(pod)
err := utilerrors.NewAggregate(errlist)
glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err)
recorder.Eventf(pod, "FailedValidation", "Error validating pod %s from %s, ignoring: %v", name, source, err)
recorder.Eventf(pod, kubecontainer.FailedValidation, "Error validating pod %s from %s, ignoring: %v", name, source, err)
continue
}
filtered = append(filtered, pod)

View File

@ -0,0 +1,65 @@
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package container
const (
// Container event reason list
CreatedContainer = "Created"
StartedContainer = "Started"
FailedToCreateContainer = "Failed"
FailedToStartContainer = "Failed"
KillingContainer = "Killing"
BackOffStartContainer = "BackOff"
// Image event reason list
PullingImage = "Pulling"
PulledImage = "Pulled"
FailedToPullImage = "Failed"
FailedToInspectImage = "InspectFailed"
ErrImageNeverPullPolicy = "ErrImageNeverPull"
BackOffPullImage = "BackOff"
// kubelet event reason list
NodeReady = "NodeReady"
NodeNotReady = "NodeReady"
NodeSchedulable = "NodeSchedulable"
NodeNotSchedulable = "NodeNotSchedulable"
StartingKubelet = "Starting"
KubeletSetupFailed = "KubeletSetupFailed"
FailedMountVolume = "FailedMount"
HostPortConflict = "HostPortConflict"
NodeSelectorMismatching = "NodeSelectorMismatching"
InsufficientFreeCPU = "InsufficientFreeCPU"
InsufficientFreeMemory = "InsufficientFreeMemory"
OutOfDisk = "OutOfDisk"
HostNetworkNotSupported = "HostNetworkNotSupported"
UndefinedShaper = "NilShaper"
NodeRebooted = "Rebooted"
// Image manager event reason list
InvalidDiskCapacity = "InvalidDiskCapacity"
FreeDiskSpaceFailed = "FreeDiskSpaceFailed"
// Probe event reason list
ContainerUnhealthy = "Unhealthy"
// Pod worker event reason list
FailedSync = "FailedSync"
// Config event reason list
FailedValidation = "FailedValidation"
)

View File

@ -83,7 +83,7 @@ func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pul
present, err := puller.runtime.IsImagePresent(spec)
if err != nil {
msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err)
puller.logIt(ref, "Failed", logPrefix, msg, glog.Warning)
puller.logIt(ref, FailedToInspectImage, logPrefix, msg, glog.Warning)
return ErrImageInspect, msg
}
@ -94,7 +94,7 @@ func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pul
return nil, ""
} else {
msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image)
puller.logIt(ref, "ErrImageNeverPull", logPrefix, msg, glog.Warning)
puller.logIt(ref, ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning)
return ErrImageNeverPull, msg
}
}
@ -102,7 +102,7 @@ func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pul
backOffKey := fmt.Sprintf("%s_%s", pod.Name, container.Image)
if puller.backOff.IsInBackOffSinceUpdate(backOffKey, puller.backOff.Clock.Now()) {
msg := fmt.Sprintf("Back-off pulling image %q", container.Image)
puller.logIt(ref, "Back-off", logPrefix, msg, glog.Info)
puller.logIt(ref, BackOffPullImage, logPrefix, msg, glog.Info)
return ErrImagePullBackOff, msg
}
puller.logIt(ref, "Pulling", logPrefix, fmt.Sprintf("pulling image %q", container.Image), glog.Info)

View File

@ -84,18 +84,18 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont
present, err := puller.runtime.IsImagePresent(spec)
if err != nil {
msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err)
puller.logIt(ref, "Failed", logPrefix, msg, glog.Warning)
puller.logIt(ref, FailedToInspectImage, logPrefix, msg, glog.Warning)
return ErrImageInspect, msg
}
if !shouldPullImage(container, present) {
if present {
msg := fmt.Sprintf("Container image %q already present on machine", container.Image)
puller.logIt(ref, "Pulled", logPrefix, msg, glog.Info)
puller.logIt(ref, PulledImage, logPrefix, msg, glog.Info)
return nil, ""
} else {
msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image)
puller.logIt(ref, "ErrImageNeverPull", logPrefix, msg, glog.Warning)
puller.logIt(ref, ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning)
return ErrImageNeverPull, msg
}
}
@ -103,7 +103,7 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont
backOffKey := fmt.Sprintf("%s_%s", pod.Name, container.Image)
if puller.backOff.IsInBackOffSinceUpdate(backOffKey, puller.backOff.Clock.Now()) {
msg := fmt.Sprintf("Back-off pulling image %q", container.Image)
puller.logIt(ref, "Back-off", logPrefix, msg, glog.Info)
puller.logIt(ref, BackOffPullImage, logPrefix, msg, glog.Info)
return ErrImagePullBackOff, msg
}
@ -118,7 +118,7 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont
returnChan: returnChan,
}
if err = <-returnChan; err != nil {
puller.logIt(ref, "Failed", logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning)
puller.logIt(ref, FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning)
puller.backOff.Next(backOffKey, puller.backOff.Clock.Now())
if err == RegistryUnavailable {
msg := fmt.Sprintf("image pull failed for %s because the registry is temporarily unavailable.", container.Image)
@ -127,14 +127,14 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont
return ErrImagePull, err.Error()
}
}
puller.logIt(ref, "Pulled", logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info)
puller.logIt(ref, PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info)
puller.backOff.GC()
return nil, ""
}
func (puller *serializedImagePuller) pullImages() {
for pullRequest := range puller.pullRequests {
puller.logIt(pullRequest.ref, "Pulling", pullRequest.logPrefix, fmt.Sprintf("pulling image %q", pullRequest.container.Image), glog.Info)
puller.logIt(pullRequest.ref, PullingImage, pullRequest.logPrefix, fmt.Sprintf("pulling image %q", pullRequest.container.Image), glog.Info)
pullRequest.returnChan <- puller.runtime.PullImage(pullRequest.spec, pullRequest.pullSecrets)
}
}

View File

@ -766,11 +766,11 @@ func (dm *DockerManager) runContainer(
securityContextProvider.ModifyContainerConfig(pod, container, dockerOpts.Config)
dockerContainer, err := dm.client.CreateContainer(dockerOpts)
if err != nil {
dm.recorder.Eventf(ref, "Failed", "Failed to create docker container with error: %v", err)
dm.recorder.Eventf(ref, kubecontainer.FailedToCreateContainer, "Failed to create docker container with error: %v", err)
return kubecontainer.ContainerID{}, err
}
dm.recorder.Eventf(ref, "Created", "Created container with docker id %v", util.ShortenString(dockerContainer.ID, 12))
dm.recorder.Eventf(ref, kubecontainer.CreatedContainer, "Created container with docker id %v", util.ShortenString(dockerContainer.ID, 12))
podHasSELinuxLabel := pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.SELinuxOptions != nil
binds := makeMountBindings(opts.Mounts, podHasSELinuxLabel)
@ -826,11 +826,11 @@ func (dm *DockerManager) runContainer(
securityContextProvider.ModifyHostConfig(pod, container, hc)
if err = dm.client.StartContainer(dockerContainer.ID, hc); err != nil {
dm.recorder.Eventf(ref, "Failed",
dm.recorder.Eventf(ref, kubecontainer.FailedToStartContainer,
"Failed to start container with docker id %v with error: %v", util.ShortenString(dockerContainer.ID, 12), err)
return kubecontainer.ContainerID{}, err
}
dm.recorder.Eventf(ref, "Started", "Started container with docker id %v", util.ShortenString(dockerContainer.ID, 12))
dm.recorder.Eventf(ref, kubecontainer.StartedContainer, "Started container with docker id %v", util.ShortenString(dockerContainer.ID, 12))
return kubetypes.DockerID(dockerContainer.ID).ContainerID(), nil
}
@ -1446,7 +1446,7 @@ func (dm *DockerManager) killContainer(containerID kubecontainer.ContainerID, co
if reason != "" {
message = fmt.Sprint(message, ": ", reason)
}
dm.recorder.Event(ref, "Killing", message)
dm.recorder.Event(ref, kubecontainer.KillingContainer, message)
dm.containerRefManager.ClearRef(containerID)
}
return err
@ -2058,7 +2058,7 @@ func (dm *DockerManager) doBackOff(pod *api.Pod, container *api.Container, podSt
stableName, _ := BuildDockerName(dockerName, container)
if backOff.IsInBackOffSince(stableName, ts.Time) {
if ref, err := kubecontainer.GenerateContainerRef(pod, container); err == nil {
dm.recorder.Eventf(ref, "Backoff", "Back-off restarting failed docker container")
dm.recorder.Eventf(ref, kubecontainer.BackOffStartContainer, "Back-off restarting failed docker container")
}
err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(stableName), container.Name, kubecontainer.GetPodFullName(pod))
dm.updateReasonCache(pod, container, kubecontainer.ErrCrashLoopBackOff.Error(), err)

View File

@ -191,7 +191,7 @@ func (im *realImageManager) GarbageCollect() error {
// Check valid capacity.
if capacity == 0 {
err := fmt.Errorf("invalid capacity %d on device %q at mount point %q", capacity, fsInfo.Device, fsInfo.Mountpoint)
im.recorder.Eventf(im.nodeRef, "InvalidDiskCapacity", err.Error())
im.recorder.Eventf(im.nodeRef, container.InvalidDiskCapacity, err.Error())
return err
}
@ -207,7 +207,7 @@ func (im *realImageManager) GarbageCollect() error {
if freed < amountToFree {
err := fmt.Errorf("failed to garbage collect required amount of images. Wanted to free %d, but freed %d", amountToFree, freed)
im.recorder.Eventf(im.nodeRef, "FreeDiskSpaceFailed", err.Error())
im.recorder.Eventf(im.nodeRef, container.FreeDiskSpaceFailed, err.Error())
return err
}
}

View File

@ -838,22 +838,22 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
}
if err := kl.imageManager.Start(); err != nil {
kl.recorder.Eventf(kl.nodeRef, "KubeletSetupFailed", "Failed to start ImageManager %v", err)
kl.recorder.Eventf(kl.nodeRef, kubecontainer.KubeletSetupFailed, "Failed to start ImageManager %v", err)
glog.Errorf("Failed to start ImageManager, images may not be garbage collected: %v", err)
}
if err := kl.cadvisor.Start(); err != nil {
kl.recorder.Eventf(kl.nodeRef, "KubeletSetupFailed", "Failed to start CAdvisor %v", err)
kl.recorder.Eventf(kl.nodeRef, kubecontainer.KubeletSetupFailed, "Failed to start CAdvisor %v", err)
glog.Errorf("Failed to start CAdvisor, system may not be properly monitored: %v", err)
}
if err := kl.containerManager.Start(); err != nil {
kl.recorder.Eventf(kl.nodeRef, "KubeletSetupFailed", "Failed to start ContainerManager %v", err)
kl.recorder.Eventf(kl.nodeRef, kubecontainer.KubeletSetupFailed, "Failed to start ContainerManager %v", err)
glog.Errorf("Failed to start ContainerManager, system may not be properly isolated: %v", err)
}
if err := kl.oomWatcher.Start(kl.nodeRef); err != nil {
kl.recorder.Eventf(kl.nodeRef, "KubeletSetupFailed", "Failed to start OOM watcher %v", err)
kl.recorder.Eventf(kl.nodeRef, kubecontainer.KubeletSetupFailed, "Failed to start OOM watcher %v", err)
glog.Errorf("Failed to start OOM watching: %v", err)
}
@ -1471,7 +1471,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
// Mount volumes.
podVolumes, err := kl.mountExternalVolumes(pod)
if err != nil {
kl.recorder.Eventf(ref, "FailedMount", "Unable to mount volumes for pod %q: %v", podFullName, err)
kl.recorder.Eventf(ref, kubecontainer.FailedMountVolume, "Unable to mount volumes for pod %q: %v", podFullName, err)
glog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", podFullName, err)
return err
}
@ -1533,7 +1533,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
}
if egress != nil || ingress != nil {
if podUsesHostNetwork(pod) {
kl.recorder.Event(pod, "HostNetworkNotSupported", "Bandwidth shaping is not currently supported on the host network")
kl.recorder.Event(pod, kubecontainer.HostNetworkNotSupported, "Bandwidth shaping is not currently supported on the host network")
} else if kl.shaper != nil {
status, found := kl.statusManager.GetPodStatus(pod.UID)
if !found {
@ -1548,7 +1548,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
err = kl.shaper.ReconcileCIDR(fmt.Sprintf("%s/32", status.PodIP), egress, ingress)
}
} else {
kl.recorder.Event(pod, "NilShaper", "Pod requests bandwidth shaping, but the shaper is undefined")
kl.recorder.Event(pod, kubecontainer.UndefinedShaper, "Pod requests bandwidth shaping, but the shaper is undefined")
}
}
@ -2537,7 +2537,7 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
node.Status.NodeInfo.BootID != info.BootID {
// TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055.
kl.recorder.Eventf(kl.nodeRef, "Rebooted",
kl.recorder.Eventf(kl.nodeRef, kubecontainer.NodeRebooted,
"Node %s has been rebooted, boot id: %s", kl.nodeName, info.BootID)
}
node.Status.NodeInfo.BootID = info.BootID
@ -2615,9 +2615,9 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
}
if !updated || oldNodeReadyConditionStatus != newNodeReadyCondition.Status {
if newNodeReadyCondition.Status == api.ConditionTrue {
kl.recordNodeStatusEvent("NodeReady")
kl.recordNodeStatusEvent(kubecontainer.NodeReady)
} else {
kl.recordNodeStatusEvent("NodeNotReady")
kl.recordNodeStatusEvent(kubecontainer.NodeNotReady)
}
}
@ -2677,9 +2677,9 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
if oldNodeUnschedulable != node.Spec.Unschedulable {
if node.Spec.Unschedulable {
kl.recordNodeStatusEvent("NodeNotSchedulable")
kl.recordNodeStatusEvent(kubecontainer.NodeNotSchedulable)
} else {
kl.recordNodeStatusEvent("NodeSchedulable")
kl.recordNodeStatusEvent(kubecontainer.NodeSchedulable)
}
oldNodeUnschedulable = node.Spec.Unschedulable
}
@ -3000,7 +3000,7 @@ func (kl *Kubelet) PortForward(podFullName string, podUID types.UID, port uint16
// BirthCry sends an event that the kubelet has started up.
func (kl *Kubelet) BirthCry() {
// Make an event that kubelet restarted.
kl.recorder.Eventf(kl.nodeRef, "Starting", "Starting kubelet.")
kl.recorder.Eventf(kl.nodeRef, kubecontainer.StartingKubelet, "Starting kubelet.")
}
func (kl *Kubelet) StreamingConnectionIdleTimeout() time.Duration {

View File

@ -123,7 +123,7 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan workUpdate) {
minRuntimeCacheTime = time.Now()
if err != nil {
glog.Errorf("Error syncing pod %s, skipping: %v", newWork.pod.UID, err)
p.recorder.Eventf(newWork.pod, "FailedSync", "Error syncing pod, skipping: %v", err)
p.recorder.Eventf(newWork.pod, kubecontainer.FailedSync, "Error syncing pod, skipping: %v", err)
return err
}
newWork.updateCompleteFn()

View File

@ -96,12 +96,12 @@ func (pb *prober) probe(probeType probeType, pod *api.Pod, status api.PodStatus,
if err != nil {
glog.V(1).Infof("%s probe for %q errored: %v", probeType, ctrName, err)
if hasRef {
pb.recorder.Eventf(ref, "Unhealthy", "%s probe errored: %v", probeType, err)
pb.recorder.Eventf(ref, kubecontainer.ContainerUnhealthy, "%s probe errored: %v", probeType, err)
}
} else { // result != probe.Success
glog.V(1).Infof("%s probe for %q failed (%v): %s", probeType, ctrName, result, output)
if hasRef {
pb.recorder.Eventf(ref, "Unhealthy", "%s probe failed: %s", probeType, output)
pb.recorder.Eventf(ref, kubecontainer.ContainerUnhealthy, "%s probe failed: %s", probeType, output)
}
}
return results.Failure, err

View File

@ -676,13 +676,13 @@ func (r *Runtime) generateEvents(runtimePod *kubecontainer.Pod, reason string, f
uuid := util.ShortenString(id.uuid, 8)
switch reason {
case "Created":
r.recorder.Eventf(ref, "Created", "Created with rkt id %v", uuid)
r.recorder.Eventf(ref, kubecontainer.CreatedContainer, "Created with rkt id %v", uuid)
case "Started":
r.recorder.Eventf(ref, "Started", "Started with rkt id %v", uuid)
r.recorder.Eventf(ref, kubecontainer.StartedContainer, "Started with rkt id %v", uuid)
case "Failed":
r.recorder.Eventf(ref, "Failed", "Failed to start with rkt id %v with error %v", uuid, failure)
r.recorder.Eventf(ref, kubecontainer.FailedToStartContainer, "Failed to start with rkt id %v with error %v", uuid, failure)
case "Killing":
r.recorder.Eventf(ref, "Killing", "Killing with rkt id %v", uuid)
r.recorder.Eventf(ref, kubecontainer.KillingContainer, "Killing with rkt id %v", uuid)
default:
glog.Errorf("rkt: Unexpected event %q", reason)
}
@ -707,7 +707,7 @@ func (r *Runtime) RunPod(pod *api.Pod, pullSecrets []api.Secret) error {
continue
}
if prepareErr != nil {
r.recorder.Eventf(ref, "Failed", "Failed to create rkt container with error: %v", prepareErr)
r.recorder.Eventf(ref, kubecontainer.FailedToCreateContainer, "Failed to create rkt container with error: %v", prepareErr)
continue
}
containerID := runtimePod.Containers[i].ID