This commit adds type information to events.

This addresses issue #15624.
This commit is contained in:
Avesh Agarwal 2015-11-13 17:30:01 -05:00
parent 751e7ba67c
commit 3d5207fd73
29 changed files with 166 additions and 120 deletions

View File

@ -389,5 +389,5 @@ func tryIptablesProxy(iptver iptables.IptablesVersioner) string {
} }
func (s *ProxyServer) birthCry() { func (s *ProxyServer) birthCry() {
s.Recorder.Eventf(s.Config.NodeRef, "Starting", "Starting kube-proxy.") s.Recorder.Eventf(s.Config.NodeRef, api.EventTypeNormal, "Starting", "Starting kube-proxy.")
} }

View File

@ -86,7 +86,7 @@ func (s *controller) scheduleOne() {
dest, err := s.algorithm.Schedule(pod) dest, err := s.algorithm.Schedule(pod)
if err != nil { if err != nil {
log.V(1).Infof("Failed to schedule: %+v", pod) log.V(1).Infof("Failed to schedule: %+v", pod)
s.recorder.Eventf(pod, FailedScheduling, "Error scheduling: %v", err) s.recorder.Eventf(pod, api.EventTypeWarning, FailedScheduling, "Error scheduling: %v", err)
s.error(pod, err) s.error(pod, err)
return return
} }
@ -99,9 +99,9 @@ func (s *controller) scheduleOne() {
} }
if err := s.binder.Bind(b); err != nil { if err := s.binder.Bind(b); err != nil {
log.V(1).Infof("Failed to bind pod: %+v", err) log.V(1).Infof("Failed to bind pod: %+v", err)
s.recorder.Eventf(pod, FailedScheduling, "Binding rejected: %v", err) s.recorder.Eventf(pod, api.EventTypeWarning, FailedScheduling, "Binding rejected: %v", err)
s.error(pod, err) s.error(pod, err)
return return
} }
s.recorder.Eventf(pod, Scheduled, "Successfully assigned %v to %v", pod.Name, dest) s.recorder.Eventf(pod, api.EventTypeNormal, Scheduled, "Successfully assigned %v to %v", pod.Name, dest)
} }

View File

@ -511,6 +511,7 @@ func deepCopy_api_Event(in Event, out *Event, c *conversion.Cloner) error {
return err return err
} }
out.Count = in.Count out.Count = in.Count
out.Type = in.Type
return nil return nil
} }

View File

@ -1781,6 +1781,14 @@ type EventSource struct {
Host string `json:"host,omitempty"` Host string `json:"host,omitempty"`
} }
// Valid values for event types (new types could be added in future)
const (
// Information only and will not cause any problems
EventTypeNormal string = "Normal"
// These events are to warn that something might go wrong
EventTypeWarning string = "Warning"
)
// Event is a report of an event somewhere in the cluster. // Event is a report of an event somewhere in the cluster.
// TODO: Decide whether to store these separately or with the object they apply to. // TODO: Decide whether to store these separately or with the object they apply to.
type Event struct { type Event struct {
@ -1811,6 +1819,9 @@ type Event struct {
// The number of times this event has occurred. // The number of times this event has occurred.
Count int `json:"count,omitempty"` Count int `json:"count,omitempty"`
// Type of this event (Normal, Warning), new types could be added in the future.
Type string `json:"type,omitempty"`
} }
// EventList is a list of events. // EventList is a list of events.

View File

@ -698,6 +698,7 @@ func autoconvert_api_Event_To_v1_Event(in *api.Event, out *Event, s conversion.S
return err return err
} }
out.Count = in.Count out.Count = in.Count
out.Type = in.Type
return nil return nil
} }
@ -3722,6 +3723,7 @@ func autoconvert_v1_Event_To_api_Event(in *Event, out *api.Event, s conversion.S
return err return err
} }
out.Count = in.Count out.Count = in.Count
out.Type = in.Type
return nil return nil
} }

View File

@ -545,6 +545,7 @@ func deepCopy_v1_Event(in Event, out *Event, c *conversion.Cloner) error {
return err return err
} }
out.Count = in.Count out.Count = in.Count
out.Type = in.Type
return nil return nil
} }

View File

@ -2220,6 +2220,14 @@ type EventSource struct {
Host string `json:"host,omitempty"` Host string `json:"host,omitempty"`
} }
// Valid values for event types (new types could be added in future)
const (
// Information only and will not cause any problems
EventTypeNormal string = "Normal"
// These events are to warn that something might go wrong
EventTypeWarning string = "Warning"
)
// Event is a report of an event somewhere in the cluster. // Event is a report of an event somewhere in the cluster.
// TODO: Decide whether to store these separately or with the object they apply to. // TODO: Decide whether to store these separately or with the object they apply to.
type Event struct { type Event struct {
@ -2251,6 +2259,9 @@ type Event struct {
// The number of times this event has occurred. // The number of times this event has occurred.
Count int `json:"count,omitempty"` Count int `json:"count,omitempty"`
// Type of this event (Normal, Warning), new types could be added in the future
Type string `json:"type,omitempty"`
} }
// EventList is a list of events. // EventList is a list of events.

View File

@ -53,6 +53,7 @@ type EventRecorder interface {
// Event constructs an event from the given information and puts it in the queue for sending. // Event constructs an event from the given information and puts it in the queue for sending.
// 'object' is the object this event is about. Event will make a reference-- or you may also // 'object' is the object this event is about. Event will make a reference-- or you may also
// pass a reference to the object directly. // pass a reference to the object directly.
// 'type' of this event, and can be one of Normal, Warning. New types could be added in future
// 'reason' is the reason this event is generated. 'reason' should be short and unique; it // 'reason' is the reason this event is generated. 'reason' should be short and unique; it
// should be in UpperCamelCase format (starting with a capital letter). "reason" will be used // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used
// to automate handling of events, so imagine people writing switch statements to handle them. // to automate handling of events, so imagine people writing switch statements to handle them.
@ -60,13 +61,13 @@ type EventRecorder interface {
// 'message' is intended to be human readable. // 'message' is intended to be human readable.
// //
// The resulting event will be created in the same namespace as the reference object. // The resulting event will be created in the same namespace as the reference object.
Event(object runtime.Object, reason, message string) Event(object runtime.Object, eventtype, reason, message string)
// Eventf is just like Event, but with Sprintf for the message field. // Eventf is just like Event, but with Sprintf for the message field.
Eventf(object runtime.Object, reason, messageFmt string, args ...interface{}) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{})
// PastEventf is just like Eventf, but with an option to specify the event's 'timestamp' field. // PastEventf is just like Eventf, but with an option to specify the event's 'timestamp' field.
PastEventf(object runtime.Object, timestamp unversioned.Time, reason, messageFmt string, args ...interface{}) PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{})
} }
// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. // EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log.
@ -202,7 +203,7 @@ func recordEvent(sink EventSink, event *api.Event, patch []byte, updateExistingE
func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface { func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface {
return eventBroadcaster.StartEventWatcher( return eventBroadcaster.StartEventWatcher(
func(e *api.Event) { func(e *api.Event) {
logf("Event(%#v): reason: '%v' %v", e.InvolvedObject, e.Reason, e.Message) logf("Event(%#v): type: '%v' reason: '%v' %v", e.InvolvedObject, e.Type, e.Reason, e.Message)
}) })
} }
@ -240,14 +241,19 @@ type recorderImpl struct {
clock util.Clock clock util.Clock
} }
func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp unversioned.Time, reason, message string) { func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp unversioned.Time, eventtype, reason, message string) {
ref, err := api.GetReference(object) ref, err := api.GetReference(object)
if err != nil { if err != nil {
glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v'", object, err, reason, message) glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message)
return return
} }
event := recorder.makeEvent(ref, reason, message) if !validateEventType(eventtype) {
glog.Errorf("Unsupported event type: '%v'", eventtype)
return
}
event := recorder.makeEvent(ref, eventtype, reason, message)
event.Source = recorder.source event.Source = recorder.source
go func() { go func() {
@ -256,19 +262,27 @@ func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp unv
}() }()
} }
func (recorder *recorderImpl) Event(object runtime.Object, reason, message string) { func validateEventType(eventtype string) bool {
recorder.generateEvent(object, unversioned.Now(), reason, message) switch eventtype {
case api.EventTypeNormal, api.EventTypeWarning:
return true
}
return false
} }
func (recorder *recorderImpl) Eventf(object runtime.Object, reason, messageFmt string, args ...interface{}) { func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) {
recorder.Event(object, reason, fmt.Sprintf(messageFmt, args...)) recorder.generateEvent(object, unversioned.Now(), eventtype, reason, message)
} }
func (recorder *recorderImpl) PastEventf(object runtime.Object, timestamp unversioned.Time, reason, messageFmt string, args ...interface{}) { func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
recorder.generateEvent(object, timestamp, reason, fmt.Sprintf(messageFmt, args...)) recorder.Event(object, eventtype, reason, fmt.Sprintf(messageFmt, args...))
} }
func (recorder *recorderImpl) makeEvent(ref *api.ObjectReference, reason, message string) *api.Event { func (recorder *recorderImpl) PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{}) {
recorder.generateEvent(object, timestamp, eventtype, reason, fmt.Sprintf(messageFmt, args...))
}
func (recorder *recorderImpl) makeEvent(ref *api.ObjectReference, eventtype, reason, message string) *api.Event {
t := unversioned.Time{recorder.clock.Now()} t := unversioned.Time{recorder.clock.Now()}
namespace := ref.Namespace namespace := ref.Namespace
if namespace == "" { if namespace == "" {
@ -285,5 +299,6 @@ func (recorder *recorderImpl) makeEvent(ref *api.ObjectReference, reason, messag
FirstTimestamp: t, FirstTimestamp: t,
LastTimestamp: t, LastTimestamp: t,
Count: 1, Count: 1,
Type: eventtype,
} }
} }

View File

@ -51,6 +51,7 @@ func getEventKey(event *api.Event) string {
event.InvolvedObject.Name, event.InvolvedObject.Name,
string(event.InvolvedObject.UID), string(event.InvolvedObject.UID),
event.InvolvedObject.APIVersion, event.InvolvedObject.APIVersion,
event.Type,
event.Reason, event.Reason,
event.Message, event.Message,
}, },
@ -71,7 +72,7 @@ func DefaultEventFilterFunc(event *api.Event) bool {
// localKey - key that makes this event in the local group // localKey - key that makes this event in the local group
type EventAggregatorKeyFunc func(event *api.Event) (aggregateKey string, localKey string) type EventAggregatorKeyFunc func(event *api.Event) (aggregateKey string, localKey string)
// EventAggregatorByReasonFunc aggregates events by exact match on event.Source, event.InvolvedObject, and event.Reason // EventAggregatorByReasonFunc aggregates events by exact match on event.Source, event.InvolvedObject, event.Type and event.Reason
func EventAggregatorByReasonFunc(event *api.Event) (string, string) { func EventAggregatorByReasonFunc(event *api.Event) (string, string) {
return strings.Join([]string{ return strings.Join([]string{
event.Source.Component, event.Source.Component,
@ -81,6 +82,7 @@ func EventAggregatorByReasonFunc(event *api.Event) (string, string) {
event.InvolvedObject.Name, event.InvolvedObject.Name,
string(event.InvolvedObject.UID), string(event.InvolvedObject.UID),
event.InvolvedObject.APIVersion, event.InvolvedObject.APIVersion,
event.Type,
event.Reason, event.Reason,
}, },
""), event.Message ""), event.Message
@ -179,6 +181,7 @@ func (e *EventAggregator) EventAggregate(newEvent *api.Event) (*api.Event, error
InvolvedObject: newEvent.InvolvedObject, InvolvedObject: newEvent.InvolvedObject,
LastTimestamp: now, LastTimestamp: now,
Message: e.messageFunc(newEvent), Message: e.messageFunc(newEvent),
Type: newEvent.Type,
Reason: newEvent.Reason, Reason: newEvent.Reason,
Source: newEvent.Source, Source: newEvent.Source,
} }

View File

@ -28,13 +28,13 @@ type FakeRecorder struct {
Events []string Events []string
} }
func (f *FakeRecorder) Event(object runtime.Object, reason, message string) { func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) {
f.Events = append(f.Events, fmt.Sprintf("%s %s", reason, message)) f.Events = append(f.Events, fmt.Sprintf("%s %s %s", eventtype, reason, message))
} }
func (f *FakeRecorder) Eventf(object runtime.Object, reason, messageFmt string, args ...interface{}) { func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
f.Events = append(f.Events, fmt.Sprintf(reason+" "+messageFmt, args...)) f.Events = append(f.Events, fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...))
} }
func (f *FakeRecorder) PastEventf(object runtime.Object, timestamp unversioned.Time, reason, messageFmt string, args ...interface{}) { func (f *FakeRecorder) PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{}) {
} }

View File

@ -308,11 +308,11 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *api.Pod
return fmt.Errorf("unable to create pods, no labels") return fmt.Errorf("unable to create pods, no labels")
} }
if newPod, err := r.KubeClient.Pods(namespace).Create(pod); err != nil { if newPod, err := r.KubeClient.Pods(namespace).Create(pod); err != nil {
r.Recorder.Eventf(object, "FailedCreate", "Error creating: %v", err) r.Recorder.Eventf(object, api.EventTypeWarning, "FailedCreate", "Error creating: %v", err)
return fmt.Errorf("unable to create pods: %v", err) return fmt.Errorf("unable to create pods: %v", err)
} else { } else {
glog.V(4).Infof("Controller %v created pod %v", meta.Name, newPod.Name) glog.V(4).Infof("Controller %v created pod %v", meta.Name, newPod.Name)
r.Recorder.Eventf(object, "SuccessfulCreate", "Created pod: %v", newPod.Name) r.Recorder.Eventf(object, api.EventTypeNormal, "SuccessfulCreate", "Created pod: %v", newPod.Name)
} }
return nil return nil
} }

View File

@ -261,7 +261,7 @@ func (d *DeploymentController) scaleRCAndRecordEvent(rc *api.ReplicationControll
} }
newRC, err := d.scaleRC(rc, newScale) newRC, err := d.scaleRC(rc, newScale)
if err == nil { if err == nil {
d.eventRecorder.Eventf(&deployment, "ScalingRC", "Scaled %s rc %s to %d", scalingOperation, rc.Name, newScale) d.eventRecorder.Eventf(&deployment, api.EventTypeNormal, "ScalingRC", "Scaled %s rc %s to %d", scalingOperation, rc.Name, newScale)
} }
return newRC, err return newRC, err
} }

View File

@ -247,7 +247,7 @@ func (nc *NodeController) Run(period time.Duration) {
if completed { if completed {
glog.Infof("All pods terminated on %s", value.Value) glog.Infof("All pods terminated on %s", value.Value)
nc.recordNodeEvent(value.Value, "TerminatedAllPods", fmt.Sprintf("Terminated all Pods on Node %s.", value.Value)) nc.recordNodeEvent(value.Value, api.EventTypeNormal, "TerminatedAllPods", fmt.Sprintf("Terminated all Pods on Node %s.", value.Value))
return true, 0 return true, 0
} }
@ -354,7 +354,7 @@ func (nc *NodeController) monitorNodeStatus() error {
for _, node := range nodes.Items { for _, node := range nodes.Items {
if !nc.knownNodeSet.Has(node.Name) { if !nc.knownNodeSet.Has(node.Name) {
glog.V(1).Infof("NodeController observed a new Node: %#v", node) glog.V(1).Infof("NodeController observed a new Node: %#v", node)
nc.recordNodeEvent(node.Name, "RegisteredNode", fmt.Sprintf("Registered Node %v in NodeController", node.Name)) nc.recordNodeEvent(node.Name, api.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in NodeController", node.Name))
nc.cancelPodEviction(node.Name) nc.cancelPodEviction(node.Name)
nc.knownNodeSet.Insert(node.Name) nc.knownNodeSet.Insert(node.Name)
} }
@ -369,7 +369,7 @@ func (nc *NodeController) monitorNodeStatus() error {
deleted := nc.knownNodeSet.Difference(observedSet) deleted := nc.knownNodeSet.Difference(observedSet)
for nodeName := range deleted { for nodeName := range deleted {
glog.V(1).Infof("NodeController observed a Node deletion: %v", nodeName) glog.V(1).Infof("NodeController observed a Node deletion: %v", nodeName)
nc.recordNodeEvent(nodeName, "RemovingNode", fmt.Sprintf("Removing Node %v from NodeController", nodeName)) nc.recordNodeEvent(nodeName, api.EventTypeNormal, "RemovingNode", fmt.Sprintf("Removing Node %v from NodeController", nodeName))
nc.evictPods(nodeName) nc.evictPods(nodeName)
nc.knownNodeSet.Delete(nodeName) nc.knownNodeSet.Delete(nodeName)
} }
@ -440,7 +440,7 @@ func (nc *NodeController) monitorNodeStatus() error {
} }
if _, err := instances.ExternalID(node.Name); err != nil && err == cloudprovider.InstanceNotFound { if _, err := instances.ExternalID(node.Name); err != nil && err == cloudprovider.InstanceNotFound {
glog.Infof("Deleting node (no longer present in cloud provider): %s", node.Name) glog.Infof("Deleting node (no longer present in cloud provider): %s", node.Name)
nc.recordNodeEvent(node.Name, "DeletingNode", fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name)) nc.recordNodeEvent(node.Name, api.EventTypeNormal, "DeletingNode", fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name))
remaining, err := nc.hasPods(node.Name) remaining, err := nc.hasPods(node.Name)
if err != nil { if err != nil {
@ -494,7 +494,7 @@ func (nc *NodeController) reconcileNodeCIDRs(nodes *api.NodeList) {
} }
} }
func (nc *NodeController) recordNodeEvent(nodeName string, reason string, event string) { func (nc *NodeController) recordNodeEvent(nodeName, eventtype, reason, event string) {
ref := &api.ObjectReference{ ref := &api.ObjectReference{
Kind: "Node", Kind: "Node",
Name: nodeName, Name: nodeName,
@ -502,7 +502,7 @@ func (nc *NodeController) recordNodeEvent(nodeName string, reason string, event
Namespace: "", Namespace: "",
} }
glog.V(2).Infof("Recording %s event message for node %s", event, nodeName) glog.V(2).Infof("Recording %s event message for node %s", event, nodeName)
nc.recorder.Eventf(ref, reason, "Node %s event: %s", nodeName, event) nc.recorder.Eventf(ref, eventtype, reason, "Node %s event: %s", nodeName, event)
} }
func (nc *NodeController) recordNodeStatusChange(node *api.Node, new_status string) { func (nc *NodeController) recordNodeStatusChange(node *api.Node, new_status string) {
@ -515,7 +515,7 @@ func (nc *NodeController) recordNodeStatusChange(node *api.Node, new_status stri
glog.V(2).Infof("Recording status change %s event message for node %s", new_status, node.Name) glog.V(2).Infof("Recording status change %s event message for node %s", new_status, node.Name)
// TODO: This requires a transaction, either both node status is updated // TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055. // and event is recorded or neither should happen, see issue #6055.
nc.recorder.Eventf(ref, new_status, "Node %s status is now: %s", node.Name, new_status) nc.recorder.Eventf(ref, api.EventTypeNormal, new_status, "Node %s status is now: %s", node.Name, new_status)
} }
// For a given node checks its conditions and tries to update it. Returns grace period to which given node // For a given node checks its conditions and tries to update it. Returns grace period to which given node
@ -723,7 +723,7 @@ func (nc *NodeController) deletePods(nodeName string) (bool, error) {
} }
if len(pods.Items) > 0 { if len(pods.Items) > 0 {
nc.recordNodeEvent(nodeName, "DeletingAllPods", fmt.Sprintf("Deleting all Pods from Node %v.", nodeName)) nc.recordNodeEvent(nodeName, api.EventTypeNormal, "DeletingAllPods", fmt.Sprintf("Deleting all Pods from Node %v.", nodeName))
} }
for _, pod := range pods.Items { for _, pod := range pods.Items {
@ -737,7 +737,7 @@ func (nc *NodeController) deletePods(nodeName string) (bool, error) {
} }
glog.V(2).Infof("Starting deletion of pod %v", pod.Name) glog.V(2).Infof("Starting deletion of pod %v", pod.Name)
nc.recorder.Eventf(&pod, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName) nc.recorder.Eventf(&pod, api.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName)
if err := nc.kubeClient.Pods(pod.Namespace).Delete(pod.Name, nil); err != nil { if err := nc.kubeClient.Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
return false, err return false, err
} }
@ -784,7 +784,7 @@ func (nc *NodeController) terminatePods(nodeName string, since time.Time) (bool,
if remaining < 0 { if remaining < 0 {
remaining = 0 remaining = 0
glog.V(2).Infof("Removing pod %v after %s grace period", pod.Name, grace) glog.V(2).Infof("Removing pod %v after %s grace period", pod.Name, grace)
nc.recordNodeEvent(nodeName, "TerminatingEvictedPod", fmt.Sprintf("Pod %s has exceeded the grace period for deletion after being evicted from Node %q and is being force killed", pod.Name, nodeName)) nc.recordNodeEvent(nodeName, api.EventTypeNormal, "TerminatingEvictedPod", fmt.Sprintf("Pod %s has exceeded the grace period for deletion after being evicted from Node %q and is being force killed", pod.Name, nodeName))
if err := nc.kubeClient.Pods(pod.Namespace).Delete(pod.Name, api.NewDeleteOptions(0)); err != nil { if err := nc.kubeClient.Pods(pod.Namespace).Delete(pod.Name, api.NewDeleteOptions(0)); err != nil {
glog.Errorf("Error completing deletion of pod %s: %v", pod.Name, err) glog.Errorf("Error completing deletion of pod %s: %v", pod.Name, err)
complete = false complete = false

View File

@ -80,7 +80,7 @@ func (a *HorizontalController) computeReplicasForCPUUtilization(hpa extensions.H
// TODO: what to do on partial errors (like metrics obtained for 75% of pods). // TODO: what to do on partial errors (like metrics obtained for 75% of pods).
if err != nil { if err != nil {
a.eventRecorder.Event(&hpa, "FailedGetMetrics", err.Error()) a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedGetMetrics", err.Error())
return 0, nil, fmt.Errorf("failed to get cpu utilization: %v", err) return 0, nil, fmt.Errorf("failed to get cpu utilization: %v", err)
} }
@ -97,14 +97,14 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA
scale, err := a.client.Extensions().Scales(hpa.Namespace).Get(hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Name) scale, err := a.client.Extensions().Scales(hpa.Namespace).Get(hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Name)
if err != nil { if err != nil {
a.eventRecorder.Event(&hpa, "FailedGetScale", err.Error()) a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedGetScale", err.Error())
return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err) return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err)
} }
currentReplicas := scale.Status.Replicas currentReplicas := scale.Status.Replicas
desiredReplicas, currentUtilization, err := a.computeReplicasForCPUUtilization(hpa, scale) desiredReplicas, currentUtilization, err := a.computeReplicasForCPUUtilization(hpa, scale)
if err != nil { if err != nil {
a.eventRecorder.Event(&hpa, "FailedComputeReplicas", err.Error()) a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedComputeReplicas", err.Error())
return fmt.Errorf("failed to compute desired number of replicas based on CPU utilization for %s: %v", reference, err) return fmt.Errorf("failed to compute desired number of replicas based on CPU utilization for %s: %v", reference, err)
} }
@ -145,10 +145,10 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA
scale.Spec.Replicas = desiredReplicas scale.Spec.Replicas = desiredReplicas
_, err = a.client.Extensions().Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale) _, err = a.client.Extensions().Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale)
if err != nil { if err != nil {
a.eventRecorder.Eventf(&hpa, "FailedRescale", "New size: %d; error: %v", desiredReplicas, err.Error()) a.eventRecorder.Eventf(&hpa, api.EventTypeWarning, "FailedRescale", "New size: %d; error: %v", desiredReplicas, err.Error())
return fmt.Errorf("failed to rescale %s: %v", reference, err) return fmt.Errorf("failed to rescale %s: %v", reference, err)
} }
a.eventRecorder.Eventf(&hpa, "SuccessfulRescale", "New size: %d", desiredReplicas) a.eventRecorder.Eventf(&hpa, api.EventTypeNormal, "SuccessfulRescale", "New size: %d", desiredReplicas)
glog.Infof("Successfull rescale of %s, old size: %d, new size: %d", glog.Infof("Successfull rescale of %s, old size: %d, new size: %d",
hpa.Name, currentReplicas, desiredReplicas) hpa.Name, currentReplicas, desiredReplicas)
} else { } else {
@ -168,7 +168,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA
_, err = a.client.Extensions().HorizontalPodAutoscalers(hpa.Namespace).UpdateStatus(&hpa) _, err = a.client.Extensions().HorizontalPodAutoscalers(hpa.Namespace).UpdateStatus(&hpa)
if err != nil { if err != nil {
a.eventRecorder.Event(&hpa, "FailedUpdateStatus", err.Error()) a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedUpdateStatus", err.Error())
return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err) return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err)
} }
return nil return nil

View File

@ -245,7 +245,7 @@ func (s *ServiceController) processDelta(delta *cache.Delta) (error, bool) {
message += " (will not retry): " message += " (will not retry): "
} }
message += err.Error() message += err.Error()
s.eventRecorder.Event(service, "CreatingLoadBalancerFailed", message) s.eventRecorder.Event(service, api.EventTypeWarning, "CreatingLoadBalancerFailed", message)
return err, retry return err, retry
} }
// Always update the cache upon success. // Always update the cache upon success.
@ -255,14 +255,14 @@ func (s *ServiceController) processDelta(delta *cache.Delta) (error, bool) {
cachedService.appliedState = service cachedService.appliedState = service
s.cache.set(namespacedName.String(), cachedService) s.cache.set(namespacedName.String(), cachedService)
case cache.Deleted: case cache.Deleted:
s.eventRecorder.Event(service, "DeletingLoadBalancer", "Deleting load balancer") s.eventRecorder.Event(service, api.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer")
err := s.balancer.EnsureTCPLoadBalancerDeleted(s.loadBalancerName(service), s.zone.Region) err := s.balancer.EnsureTCPLoadBalancerDeleted(s.loadBalancerName(service), s.zone.Region)
if err != nil { if err != nil {
message := "Error deleting load balancer (will retry): " + err.Error() message := "Error deleting load balancer (will retry): " + err.Error()
s.eventRecorder.Event(service, "DeletingLoadBalancerFailed", message) s.eventRecorder.Event(service, api.EventTypeWarning, "DeletingLoadBalancerFailed", message)
return err, retryable return err, retryable
} }
s.eventRecorder.Event(service, "DeletedLoadBalancer", "Deleted load balancer") s.eventRecorder.Event(service, api.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer")
s.cache.delete(namespacedName.String()) s.cache.delete(namespacedName.String())
default: default:
glog.Errorf("Unexpected delta type: %v", delta.Type) glog.Errorf("Unexpected delta type: %v", delta.Type)
@ -305,11 +305,11 @@ func (s *ServiceController) createLoadBalancerIfNeeded(namespacedName types.Name
if needDelete { if needDelete {
glog.Infof("Deleting existing load balancer for service %s that no longer needs a load balancer.", namespacedName) glog.Infof("Deleting existing load balancer for service %s that no longer needs a load balancer.", namespacedName)
s.eventRecorder.Event(service, "DeletingLoadBalancer", "Deleting load balancer") s.eventRecorder.Event(service, api.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer")
if err := s.balancer.EnsureTCPLoadBalancerDeleted(s.loadBalancerName(service), s.zone.Region); err != nil { if err := s.balancer.EnsureTCPLoadBalancerDeleted(s.loadBalancerName(service), s.zone.Region); err != nil {
return err, retryable return err, retryable
} }
s.eventRecorder.Event(service, "DeletedLoadBalancer", "Deleted load balancer") s.eventRecorder.Event(service, api.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer")
} }
service.Status.LoadBalancer = api.LoadBalancerStatus{} service.Status.LoadBalancer = api.LoadBalancerStatus{}
@ -319,12 +319,12 @@ func (s *ServiceController) createLoadBalancerIfNeeded(namespacedName types.Name
// TODO: We could do a dry-run here if wanted to avoid the spurious cloud-calls & events when we restart // TODO: We could do a dry-run here if wanted to avoid the spurious cloud-calls & events when we restart
// The load balancer doesn't exist yet, so create it. // The load balancer doesn't exist yet, so create it.
s.eventRecorder.Event(service, "CreatingLoadBalancer", "Creating load balancer") s.eventRecorder.Event(service, api.EventTypeNormal, "CreatingLoadBalancer", "Creating load balancer")
err := s.createLoadBalancer(service) err := s.createLoadBalancer(service)
if err != nil { if err != nil {
return fmt.Errorf("Failed to create load balancer for service %s: %v", namespacedName, err), retryable return fmt.Errorf("Failed to create load balancer for service %s: %v", namespacedName, err), retryable
} }
s.eventRecorder.Event(service, "CreatedLoadBalancer", "Created load balancer") s.eventRecorder.Event(service, api.EventTypeNormal, "CreatedLoadBalancer", "Created load balancer")
} }
// Write the state if changed // Write the state if changed
@ -686,7 +686,7 @@ func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *api.Service,
name := cloudprovider.GetLoadBalancerName(service) name := cloudprovider.GetLoadBalancerName(service)
err := s.balancer.UpdateTCPLoadBalancer(name, s.zone.Region, hosts) err := s.balancer.UpdateTCPLoadBalancer(name, s.zone.Region, hosts)
if err == nil { if err == nil {
s.eventRecorder.Event(service, "UpdatedLoadBalancer", "Updated load balancer with new hosts") s.eventRecorder.Event(service, api.EventTypeNormal, "UpdatedLoadBalancer", "Updated load balancer with new hosts")
return nil return nil
} }
@ -697,7 +697,7 @@ func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *api.Service,
return nil return nil
} }
s.eventRecorder.Eventf(service, "LoadBalancerUpdateFailed", "Error updating load balancer with new hosts %v: %v", hosts, err) s.eventRecorder.Eventf(service, api.EventTypeWarning, "LoadBalancerUpdateFailed", "Error updating load balancer with new hosts %v: %v", hosts, err)
return err return err
} }

View File

@ -1512,15 +1512,16 @@ func DescribeEvents(el *api.EventList, w io.Writer) {
return return
} }
sort.Sort(SortableEvents(el.Items)) sort.Sort(SortableEvents(el.Items))
fmt.Fprint(w, "Events:\n FirstSeen\tLastSeen\tCount\tFrom\tSubobjectPath\tReason\tMessage\n") fmt.Fprint(w, "Events:\n FirstSeen\tLastSeen\tCount\tFrom\tSubobjectPath\tType\tReason\tMessage\n")
fmt.Fprint(w, " ─────────\t────────\t─────\t────\t─────────────\t──────\t───────\n") fmt.Fprint(w, " ─────────\t────────\t─────\t────\t─────────────\t────────\t──────\t───────\n")
for _, e := range el.Items { for _, e := range el.Items {
fmt.Fprintf(w, " %s\t%s\t%d\t%v\t%v\t%v\t%v\n", fmt.Fprintf(w, " %s\t%s\t%d\t%v\t%v\t%v\t%v\t%v\n",
translateTimestamp(e.FirstTimestamp), translateTimestamp(e.FirstTimestamp),
translateTimestamp(e.LastTimestamp), translateTimestamp(e.LastTimestamp),
e.Count, e.Count,
e.Source, e.Source,
e.InvolvedObject.FieldPath, e.InvolvedObject.FieldPath,
e.Type,
e.Reason, e.Reason,
e.Message) e.Message)
} }

View File

@ -394,7 +394,7 @@ var ingressColumns = []string{"NAME", "RULE", "BACKEND", "ADDRESS"}
var endpointColumns = []string{"NAME", "ENDPOINTS", "AGE"} var endpointColumns = []string{"NAME", "ENDPOINTS", "AGE"}
var nodeColumns = []string{"NAME", "LABELS", "STATUS", "AGE"} var nodeColumns = []string{"NAME", "LABELS", "STATUS", "AGE"}
var daemonSetColumns = []string{"NAME", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "NODE-SELECTOR"} var daemonSetColumns = []string{"NAME", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "NODE-SELECTOR"}
var eventColumns = []string{"FIRSTSEEN", "LASTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "REASON", "SOURCE", "MESSAGE"} var eventColumns = []string{"FIRSTSEEN", "LASTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "TYPE", "REASON", "SOURCE", "MESSAGE"}
var limitRangeColumns = []string{"NAME", "AGE"} var limitRangeColumns = []string{"NAME", "AGE"}
var resourceQuotaColumns = []string{"NAME", "AGE"} var resourceQuotaColumns = []string{"NAME", "AGE"}
var namespaceColumns = []string{"NAME", "LABELS", "STATUS", "AGE"} var namespaceColumns = []string{"NAME", "LABELS", "STATUS", "AGE"}
@ -1203,13 +1203,14 @@ func printEvent(event *api.Event, w io.Writer, withNamespace bool, wide bool, sh
} }
} }
if _, err := fmt.Fprintf( if _, err := fmt.Fprintf(
w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s", w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s",
translateTimestamp(event.FirstTimestamp), translateTimestamp(event.FirstTimestamp),
translateTimestamp(event.LastTimestamp), translateTimestamp(event.LastTimestamp),
event.Count, event.Count,
event.InvolvedObject.Name, event.InvolvedObject.Name,
event.InvolvedObject.Kind, event.InvolvedObject.Kind,
event.InvolvedObject.FieldPath, event.InvolvedObject.FieldPath,
event.Type,
event.Reason, event.Reason,
event.Source, event.Source,
event.Message, event.Message,

View File

@ -327,7 +327,7 @@ func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventReco
name := bestPodIdentString(pod) name := bestPodIdentString(pod)
err := utilerrors.NewAggregate(errlist) err := utilerrors.NewAggregate(errlist)
glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err) glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err)
recorder.Eventf(pod, kubecontainer.FailedValidation, "Error validating pod %s from %s, ignoring: %v", name, source, err) recorder.Eventf(pod, api.EventTypeWarning, kubecontainer.FailedValidation, "Error validating pod %s from %s, ignoring: %v", name, source, err)
continue continue
} }
filtered = append(filtered, pod) filtered = append(filtered, pod)

View File

@ -132,21 +132,21 @@ func (irecorder *innerEventRecorder) shouldRecordEvent(object runtime.Object) (*
return nil, false return nil, false
} }
func (irecorder *innerEventRecorder) Event(object runtime.Object, reason, message string) { func (irecorder *innerEventRecorder) Event(object runtime.Object, eventtype, reason, message string) {
if ref, ok := irecorder.shouldRecordEvent(object); ok { if ref, ok := irecorder.shouldRecordEvent(object); ok {
irecorder.recorder.Event(ref, reason, message) irecorder.recorder.Event(ref, eventtype, reason, message)
} }
} }
func (irecorder *innerEventRecorder) Eventf(object runtime.Object, reason, messageFmt string, args ...interface{}) { func (irecorder *innerEventRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
if ref, ok := irecorder.shouldRecordEvent(object); ok { if ref, ok := irecorder.shouldRecordEvent(object); ok {
irecorder.recorder.Eventf(ref, reason, messageFmt, args...) irecorder.recorder.Eventf(ref, eventtype, reason, messageFmt, args...)
} }
} }
func (irecorder *innerEventRecorder) PastEventf(object runtime.Object, timestamp unversioned.Time, reason, messageFmt string, args ...interface{}) { func (irecorder *innerEventRecorder) PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{}) {
if ref, ok := irecorder.shouldRecordEvent(object); ok { if ref, ok := irecorder.shouldRecordEvent(object); ok {
irecorder.recorder.PastEventf(ref, timestamp, reason, messageFmt, args...) irecorder.recorder.PastEventf(ref, timestamp, eventtype, reason, messageFmt, args...)
} }
} }

View File

@ -63,9 +63,9 @@ func shouldPullImage(container *api.Container, imagePresent bool) bool {
} }
// records an event using ref, event msg. log to glog using prefix, msg, logFn // records an event using ref, event msg. log to glog using prefix, msg, logFn
func (puller *imagePuller) logIt(ref *api.ObjectReference, event, prefix, msg string, logFn func(args ...interface{})) { func (puller *imagePuller) logIt(ref *api.ObjectReference, eventtype, event, prefix, msg string, logFn func(args ...interface{})) {
if ref != nil { if ref != nil {
puller.recorder.Event(ref, event, msg) puller.recorder.Event(ref, eventtype, event, msg)
} else { } else {
logFn(fmt.Sprint(prefix, " ", msg)) logFn(fmt.Sprint(prefix, " ", msg))
} }
@ -83,18 +83,18 @@ func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pul
present, err := puller.runtime.IsImagePresent(spec) present, err := puller.runtime.IsImagePresent(spec)
if err != nil { if err != nil {
msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err) msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err)
puller.logIt(ref, FailedToInspectImage, logPrefix, msg, glog.Warning) puller.logIt(ref, api.EventTypeWarning, FailedToInspectImage, logPrefix, msg, glog.Warning)
return ErrImageInspect, msg return ErrImageInspect, msg
} }
if !shouldPullImage(container, present) { if !shouldPullImage(container, present) {
if present { if present {
msg := fmt.Sprintf("Container image %q already present on machine", container.Image) msg := fmt.Sprintf("Container image %q already present on machine", container.Image)
puller.logIt(ref, "Pulled", logPrefix, msg, glog.Info) puller.logIt(ref, api.EventTypeNormal, "Pulled", logPrefix, msg, glog.Info)
return nil, "" return nil, ""
} else { } else {
msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image) msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image)
puller.logIt(ref, ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning) puller.logIt(ref, api.EventTypeWarning, ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning)
return ErrImageNeverPull, msg return ErrImageNeverPull, msg
} }
} }
@ -102,12 +102,12 @@ func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pul
backOffKey := fmt.Sprintf("%s_%s", pod.Name, container.Image) backOffKey := fmt.Sprintf("%s_%s", pod.Name, container.Image)
if puller.backOff.IsInBackOffSinceUpdate(backOffKey, puller.backOff.Clock.Now()) { if puller.backOff.IsInBackOffSinceUpdate(backOffKey, puller.backOff.Clock.Now()) {
msg := fmt.Sprintf("Back-off pulling image %q", container.Image) msg := fmt.Sprintf("Back-off pulling image %q", container.Image)
puller.logIt(ref, BackOffPullImage, logPrefix, msg, glog.Info) puller.logIt(ref, api.EventTypeNormal, BackOffPullImage, logPrefix, msg, glog.Info)
return ErrImagePullBackOff, msg return ErrImagePullBackOff, msg
} }
puller.logIt(ref, "Pulling", logPrefix, fmt.Sprintf("pulling image %q", container.Image), glog.Info) puller.logIt(ref, api.EventTypeNormal, "Pulling", logPrefix, fmt.Sprintf("pulling image %q", container.Image), glog.Info)
if err := puller.runtime.PullImage(spec, pullSecrets); err != nil { if err := puller.runtime.PullImage(spec, pullSecrets); err != nil {
puller.logIt(ref, "Failed", logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning) puller.logIt(ref, api.EventTypeWarning, "Failed", logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning)
puller.backOff.Next(backOffKey, puller.backOff.Clock.Now()) puller.backOff.Next(backOffKey, puller.backOff.Clock.Now())
if err == RegistryUnavailable { if err == RegistryUnavailable {
msg := fmt.Sprintf("image pull failed for %s because the registry is temporarily unavailable.", container.Image) msg := fmt.Sprintf("image pull failed for %s because the registry is temporarily unavailable.", container.Image)
@ -116,7 +116,7 @@ func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pul
return ErrImagePull, err.Error() return ErrImagePull, err.Error()
} }
} }
puller.logIt(ref, "Pulled", logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info) puller.logIt(ref, api.EventTypeNormal, "Pulled", logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info)
puller.backOff.GC() puller.backOff.GC()
return nil, "" return nil, ""
} }

View File

@ -64,9 +64,9 @@ func NewSerializedImagePuller(recorder record.EventRecorder, runtime Runtime, im
} }
// records an event using ref, event msg. log to glog using prefix, msg, logFn // records an event using ref, event msg. log to glog using prefix, msg, logFn
func (puller *serializedImagePuller) logIt(ref *api.ObjectReference, event, prefix, msg string, logFn func(args ...interface{})) { func (puller *serializedImagePuller) logIt(ref *api.ObjectReference, eventtype, event, prefix, msg string, logFn func(args ...interface{})) {
if ref != nil { if ref != nil {
puller.recorder.Event(ref, event, msg) puller.recorder.Event(ref, eventtype, event, msg)
} else { } else {
logFn(fmt.Sprint(prefix, " ", msg)) logFn(fmt.Sprint(prefix, " ", msg))
} }
@ -84,18 +84,18 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont
present, err := puller.runtime.IsImagePresent(spec) present, err := puller.runtime.IsImagePresent(spec)
if err != nil { if err != nil {
msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err) msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err)
puller.logIt(ref, FailedToInspectImage, logPrefix, msg, glog.Warning) puller.logIt(ref, api.EventTypeWarning, FailedToInspectImage, logPrefix, msg, glog.Warning)
return ErrImageInspect, msg return ErrImageInspect, msg
} }
if !shouldPullImage(container, present) { if !shouldPullImage(container, present) {
if present { if present {
msg := fmt.Sprintf("Container image %q already present on machine", container.Image) msg := fmt.Sprintf("Container image %q already present on machine", container.Image)
puller.logIt(ref, PulledImage, logPrefix, msg, glog.Info) puller.logIt(ref, api.EventTypeNormal, PulledImage, logPrefix, msg, glog.Info)
return nil, "" return nil, ""
} else { } else {
msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image) msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image)
puller.logIt(ref, ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning) puller.logIt(ref, api.EventTypeWarning, ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning)
return ErrImageNeverPull, msg return ErrImageNeverPull, msg
} }
} }
@ -103,7 +103,7 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont
backOffKey := fmt.Sprintf("%s_%s", pod.Name, container.Image) backOffKey := fmt.Sprintf("%s_%s", pod.Name, container.Image)
if puller.backOff.IsInBackOffSinceUpdate(backOffKey, puller.backOff.Clock.Now()) { if puller.backOff.IsInBackOffSinceUpdate(backOffKey, puller.backOff.Clock.Now()) {
msg := fmt.Sprintf("Back-off pulling image %q", container.Image) msg := fmt.Sprintf("Back-off pulling image %q", container.Image)
puller.logIt(ref, BackOffPullImage, logPrefix, msg, glog.Info) puller.logIt(ref, api.EventTypeNormal, BackOffPullImage, logPrefix, msg, glog.Info)
return ErrImagePullBackOff, msg return ErrImagePullBackOff, msg
} }
@ -118,7 +118,7 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont
returnChan: returnChan, returnChan: returnChan,
} }
if err = <-returnChan; err != nil { if err = <-returnChan; err != nil {
puller.logIt(ref, FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning) puller.logIt(ref, api.EventTypeWarning, FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning)
puller.backOff.Next(backOffKey, puller.backOff.Clock.Now()) puller.backOff.Next(backOffKey, puller.backOff.Clock.Now())
if err == RegistryUnavailable { if err == RegistryUnavailable {
msg := fmt.Sprintf("image pull failed for %s because the registry is temporarily unavailable.", container.Image) msg := fmt.Sprintf("image pull failed for %s because the registry is temporarily unavailable.", container.Image)
@ -127,14 +127,14 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont
return ErrImagePull, err.Error() return ErrImagePull, err.Error()
} }
} }
puller.logIt(ref, PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info) puller.logIt(ref, api.EventTypeNormal, PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info)
puller.backOff.GC() puller.backOff.GC()
return nil, "" return nil, ""
} }
func (puller *serializedImagePuller) pullImages() { func (puller *serializedImagePuller) pullImages() {
for pullRequest := range puller.pullRequests { for pullRequest := range puller.pullRequests {
puller.logIt(pullRequest.ref, PullingImage, pullRequest.logPrefix, fmt.Sprintf("pulling image %q", pullRequest.container.Image), glog.Info) puller.logIt(pullRequest.ref, api.EventTypeNormal, PullingImage, pullRequest.logPrefix, fmt.Sprintf("pulling image %q", pullRequest.container.Image), glog.Info)
pullRequest.returnChan <- puller.runtime.PullImage(pullRequest.spec, pullRequest.pullSecrets) pullRequest.returnChan <- puller.runtime.PullImage(pullRequest.spec, pullRequest.pullSecrets)
} }
} }

View File

@ -757,11 +757,11 @@ func (dm *DockerManager) runContainer(
securityContextProvider.ModifyContainerConfig(pod, container, dockerOpts.Config) securityContextProvider.ModifyContainerConfig(pod, container, dockerOpts.Config)
dockerContainer, err := dm.client.CreateContainer(dockerOpts) dockerContainer, err := dm.client.CreateContainer(dockerOpts)
if err != nil { if err != nil {
dm.recorder.Eventf(ref, kubecontainer.FailedToCreateContainer, "Failed to create docker container with error: %v", err) dm.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedToCreateContainer, "Failed to create docker container with error: %v", err)
return kubecontainer.ContainerID{}, err return kubecontainer.ContainerID{}, err
} }
dm.recorder.Eventf(ref, kubecontainer.CreatedContainer, "Created container with docker id %v", util.ShortenString(dockerContainer.ID, 12)) dm.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.CreatedContainer, "Created container with docker id %v", util.ShortenString(dockerContainer.ID, 12))
podHasSELinuxLabel := pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.SELinuxOptions != nil podHasSELinuxLabel := pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.SELinuxOptions != nil
binds := makeMountBindings(opts.Mounts, podHasSELinuxLabel) binds := makeMountBindings(opts.Mounts, podHasSELinuxLabel)
@ -817,11 +817,11 @@ func (dm *DockerManager) runContainer(
securityContextProvider.ModifyHostConfig(pod, container, hc) securityContextProvider.ModifyHostConfig(pod, container, hc)
if err = dm.client.StartContainer(dockerContainer.ID, hc); err != nil { if err = dm.client.StartContainer(dockerContainer.ID, hc); err != nil {
dm.recorder.Eventf(ref, kubecontainer.FailedToStartContainer, dm.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedToStartContainer,
"Failed to start container with docker id %v with error: %v", util.ShortenString(dockerContainer.ID, 12), err) "Failed to start container with docker id %v with error: %v", util.ShortenString(dockerContainer.ID, 12), err)
return kubecontainer.ContainerID{}, err return kubecontainer.ContainerID{}, err
} }
dm.recorder.Eventf(ref, kubecontainer.StartedContainer, "Started container with docker id %v", util.ShortenString(dockerContainer.ID, 12)) dm.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.StartedContainer, "Started container with docker id %v", util.ShortenString(dockerContainer.ID, 12))
return kubetypes.DockerID(dockerContainer.ID).ContainerID(), nil return kubetypes.DockerID(dockerContainer.ID).ContainerID(), nil
} }
@ -1437,7 +1437,7 @@ func (dm *DockerManager) killContainer(containerID kubecontainer.ContainerID, co
if reason != "" { if reason != "" {
message = fmt.Sprint(message, ": ", reason) message = fmt.Sprint(message, ": ", reason)
} }
dm.recorder.Event(ref, kubecontainer.KillingContainer, message) dm.recorder.Event(ref, api.EventTypeNormal, kubecontainer.KillingContainer, message)
dm.containerRefManager.ClearRef(containerID) dm.containerRefManager.ClearRef(containerID)
} }
return err return err
@ -1817,7 +1817,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, pod
if err != nil { if err != nil {
glog.Errorf("Couldn't make a ref to pod %q: '%v'", podFullName, err) glog.Errorf("Couldn't make a ref to pod %q: '%v'", podFullName, err)
} }
dm.recorder.Eventf(ref, "InfraChanged", "Pod infrastructure changed, it will be killed and re-created.") dm.recorder.Eventf(ref, api.EventTypeNormal, "InfraChanged", "Pod infrastructure changed, it will be killed and re-created.")
} }
if containerChanges.StartInfraContainer || (len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0) { if containerChanges.StartInfraContainer || (len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0) {
if len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0 { if len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0 {
@ -2049,7 +2049,7 @@ func (dm *DockerManager) doBackOff(pod *api.Pod, container *api.Container, podSt
stableName, _ := BuildDockerName(dockerName, container) stableName, _ := BuildDockerName(dockerName, container)
if backOff.IsInBackOffSince(stableName, ts.Time) { if backOff.IsInBackOffSince(stableName, ts.Time) {
if ref, err := kubecontainer.GenerateContainerRef(pod, container); err == nil { if ref, err := kubecontainer.GenerateContainerRef(pod, container); err == nil {
dm.recorder.Eventf(ref, kubecontainer.BackOffStartContainer, "Back-off restarting failed docker container") dm.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.BackOffStartContainer, "Back-off restarting failed docker container")
} }
err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(stableName), container.Name, kubecontainer.GetPodFullName(pod)) err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(stableName), container.Name, kubecontainer.GetPodFullName(pod))
dm.updateReasonCache(pod, container, kubecontainer.ErrCrashLoopBackOff.Error(), err) dm.updateReasonCache(pod, container, kubecontainer.ErrCrashLoopBackOff.Error(), err)

View File

@ -195,7 +195,7 @@ func (im *realImageManager) GarbageCollect() error {
// Check valid capacity. // Check valid capacity.
if capacity == 0 { if capacity == 0 {
err := fmt.Errorf("invalid capacity %d on device %q at mount point %q", capacity, fsInfo.Device, fsInfo.Mountpoint) err := fmt.Errorf("invalid capacity %d on device %q at mount point %q", capacity, fsInfo.Device, fsInfo.Mountpoint)
im.recorder.Eventf(im.nodeRef, container.InvalidDiskCapacity, err.Error()) im.recorder.Eventf(im.nodeRef, api.EventTypeWarning, container.InvalidDiskCapacity, err.Error())
return err return err
} }
@ -211,7 +211,7 @@ func (im *realImageManager) GarbageCollect() error {
if freed < amountToFree { if freed < amountToFree {
err := fmt.Errorf("failed to garbage collect required amount of images. Wanted to free %d, but freed %d", amountToFree, freed) err := fmt.Errorf("failed to garbage collect required amount of images. Wanted to free %d, but freed %d", amountToFree, freed)
im.recorder.Eventf(im.nodeRef, container.FreeDiskSpaceFailed, err.Error()) im.recorder.Eventf(im.nodeRef, api.EventTypeWarning, container.FreeDiskSpaceFailed, err.Error())
return err return err
} }
} }

View File

@ -870,7 +870,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
glog.Warning("No api server defined - no node status update will be sent.") glog.Warning("No api server defined - no node status update will be sent.")
} }
if err := kl.initializeModules(); err != nil { if err := kl.initializeModules(); err != nil {
kl.recorder.Eventf(kl.nodeRef, kubecontainer.KubeletSetupFailed, err.Error()) kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, kubecontainer.KubeletSetupFailed, err.Error())
glog.Error(err) glog.Error(err)
kl.runtimeState.setInitError(err) kl.runtimeState.setInitError(err)
} }
@ -1536,7 +1536,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
// Mount volumes. // Mount volumes.
podVolumes, err := kl.mountExternalVolumes(pod) podVolumes, err := kl.mountExternalVolumes(pod)
if err != nil { if err != nil {
kl.recorder.Eventf(ref, kubecontainer.FailedMountVolume, "Unable to mount volumes for pod %q: %v", podFullName, err) kl.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedMountVolume, "Unable to mount volumes for pod %q: %v", podFullName, err)
glog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", podFullName, err) glog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", podFullName, err)
return err return err
} }
@ -1601,7 +1601,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
} }
if egress != nil || ingress != nil { if egress != nil || ingress != nil {
if podUsesHostNetwork(pod) { if podUsesHostNetwork(pod) {
kl.recorder.Event(pod, kubecontainer.HostNetworkNotSupported, "Bandwidth shaping is not currently supported on the host network") kl.recorder.Event(pod, api.EventTypeWarning, kubecontainer.HostNetworkNotSupported, "Bandwidth shaping is not currently supported on the host network")
} else if kl.shaper != nil { } else if kl.shaper != nil {
status, found := kl.statusManager.GetPodStatus(pod.UID) status, found := kl.statusManager.GetPodStatus(pod.UID)
if !found { if !found {
@ -1616,7 +1616,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
err = kl.shaper.ReconcileCIDR(fmt.Sprintf("%s/32", status.PodIP), egress, ingress) err = kl.shaper.ReconcileCIDR(fmt.Sprintf("%s/32", status.PodIP), egress, ingress)
} }
} else { } else {
kl.recorder.Event(pod, kubecontainer.UndefinedShaper, "Pod requests bandwidth shaping, but the shaper is undefined") kl.recorder.Event(pod, api.EventTypeWarning, kubecontainer.UndefinedShaper, "Pod requests bandwidth shaping, but the shaper is undefined")
} }
} }
@ -2106,7 +2106,7 @@ func (kl *Kubelet) matchesNodeSelector(pod *api.Pod) bool {
} }
func (kl *Kubelet) rejectPod(pod *api.Pod, reason, message string) { func (kl *Kubelet) rejectPod(pod *api.Pod, reason, message string) {
kl.recorder.Eventf(pod, reason, message) kl.recorder.Eventf(pod, api.EventTypeWarning, reason, message)
kl.statusManager.SetPodStatus(pod, api.PodStatus{ kl.statusManager.SetPodStatus(pod, api.PodStatus{
Phase: api.PodFailed, Phase: api.PodFailed,
Reason: reason, Reason: reason,
@ -2507,11 +2507,11 @@ func (kl *Kubelet) updateNodeStatus() error {
return fmt.Errorf("update node status exceeds retry count") return fmt.Errorf("update node status exceeds retry count")
} }
func (kl *Kubelet) recordNodeStatusEvent(event string) { func (kl *Kubelet) recordNodeStatusEvent(eventtype, event string) {
glog.V(2).Infof("Recording %s event message for node %s", event, kl.nodeName) glog.V(2).Infof("Recording %s event message for node %s", event, kl.nodeName)
// TODO: This requires a transaction, either both node status is updated // TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055. // and event is recorded or neither should happen, see issue #6055.
kl.recorder.Eventf(kl.nodeRef, event, "Node %s status is now: %s", kl.nodeName, event) kl.recorder.Eventf(kl.nodeRef, eventtype, event, "Node %s status is now: %s", kl.nodeName, event)
} }
// Maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus() // Maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
@ -2622,7 +2622,7 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
node.Status.NodeInfo.BootID != info.BootID { node.Status.NodeInfo.BootID != info.BootID {
// TODO: This requires a transaction, either both node status is updated // TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055. // and event is recorded or neither should happen, see issue #6055.
kl.recorder.Eventf(kl.nodeRef, kubecontainer.NodeRebooted, kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, kubecontainer.NodeRebooted,
"Node %s has been rebooted, boot id: %s", kl.nodeName, info.BootID) "Node %s has been rebooted, boot id: %s", kl.nodeName, info.BootID)
} }
node.Status.NodeInfo.BootID = info.BootID node.Status.NodeInfo.BootID = info.BootID
@ -2684,9 +2684,9 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
} }
if !updated || oldNodeReadyConditionStatus != newNodeReadyCondition.Status { if !updated || oldNodeReadyConditionStatus != newNodeReadyCondition.Status {
if newNodeReadyCondition.Status == api.ConditionTrue { if newNodeReadyCondition.Status == api.ConditionTrue {
kl.recordNodeStatusEvent(kubecontainer.NodeReady) kl.recordNodeStatusEvent(api.EventTypeNormal, kubecontainer.NodeReady)
} else { } else {
kl.recordNodeStatusEvent(kubecontainer.NodeNotReady) kl.recordNodeStatusEvent(api.EventTypeNormal, kubecontainer.NodeNotReady)
} }
} }
@ -2728,7 +2728,7 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
nodeOODCondition.Reason = "KubeletOutOfDisk" nodeOODCondition.Reason = "KubeletOutOfDisk"
nodeOODCondition.Message = "out of disk space" nodeOODCondition.Message = "out of disk space"
nodeOODCondition.LastTransitionTime = currentTime nodeOODCondition.LastTransitionTime = currentTime
kl.recordNodeStatusEvent("NodeOutOfDisk") kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeOutOfDisk")
} }
} else { } else {
if nodeOODCondition.Status != api.ConditionFalse { if nodeOODCondition.Status != api.ConditionFalse {
@ -2736,7 +2736,7 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
nodeOODCondition.Reason = "KubeletHasSufficientDisk" nodeOODCondition.Reason = "KubeletHasSufficientDisk"
nodeOODCondition.Message = "kubelet has sufficient disk space available" nodeOODCondition.Message = "kubelet has sufficient disk space available"
nodeOODCondition.LastTransitionTime = currentTime nodeOODCondition.LastTransitionTime = currentTime
kl.recordNodeStatusEvent("NodeHasSufficientDisk") kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasSufficientDisk")
} }
} }
@ -2746,9 +2746,9 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
if oldNodeUnschedulable != node.Spec.Unschedulable { if oldNodeUnschedulable != node.Spec.Unschedulable {
if node.Spec.Unschedulable { if node.Spec.Unschedulable {
kl.recordNodeStatusEvent(kubecontainer.NodeNotSchedulable) kl.recordNodeStatusEvent(api.EventTypeNormal, kubecontainer.NodeNotSchedulable)
} else { } else {
kl.recordNodeStatusEvent(kubecontainer.NodeSchedulable) kl.recordNodeStatusEvent(api.EventTypeNormal, kubecontainer.NodeSchedulable)
} }
oldNodeUnschedulable = node.Spec.Unschedulable oldNodeUnschedulable = node.Spec.Unschedulable
} }
@ -2933,7 +2933,7 @@ func (kl *Kubelet) generatePodStatus(pod *api.Pod) (api.PodStatus, error) {
// TODO: Consider include the container information. // TODO: Consider include the container information.
if kl.pastActiveDeadline(pod) { if kl.pastActiveDeadline(pod) {
reason := "DeadlineExceeded" reason := "DeadlineExceeded"
kl.recorder.Eventf(pod, reason, "Pod was active on the node longer than specified deadline") kl.recorder.Eventf(pod, api.EventTypeNormal, reason, "Pod was active on the node longer than specified deadline")
return api.PodStatus{ return api.PodStatus{
Phase: api.PodFailed, Phase: api.PodFailed,
Reason: reason, Reason: reason,
@ -3058,7 +3058,7 @@ func (kl *Kubelet) PortForward(podFullName string, podUID types.UID, port uint16
// BirthCry sends an event that the kubelet has started up. // BirthCry sends an event that the kubelet has started up.
func (kl *Kubelet) BirthCry() { func (kl *Kubelet) BirthCry() {
// Make an event that kubelet restarted. // Make an event that kubelet restarted.
kl.recorder.Eventf(kl.nodeRef, kubecontainer.StartingKubelet, "Starting kubelet.") kl.recorder.Eventf(kl.nodeRef, api.EventTypeNormal, kubecontainer.StartingKubelet, "Starting kubelet.")
} }
func (kl *Kubelet) StreamingConnectionIdleTimeout() time.Duration { func (kl *Kubelet) StreamingConnectionIdleTimeout() time.Duration {

View File

@ -64,7 +64,7 @@ func (ow *realOOMWatcher) Start(ref *api.ObjectReference) error {
for event := range eventChannel.GetChannel() { for event := range eventChannel.GetChannel() {
glog.V(2).Infof("Got sys oom event from cadvisor: %v", event) glog.V(2).Infof("Got sys oom event from cadvisor: %v", event)
ow.recorder.PastEventf(ref, unversioned.Time{Time: event.Timestamp}, systemOOMEvent, "System OOM encountered") ow.recorder.PastEventf(ref, unversioned.Time{Time: event.Timestamp}, api.EventTypeWarning, systemOOMEvent, "System OOM encountered")
} }
glog.Errorf("Unexpectedly stopped receiving OOM notifications from cAdvisor") glog.Errorf("Unexpectedly stopped receiving OOM notifications from cAdvisor")
}() }()

View File

@ -123,7 +123,7 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan workUpdate) {
minRuntimeCacheTime = time.Now() minRuntimeCacheTime = time.Now()
if err != nil { if err != nil {
glog.Errorf("Error syncing pod %s, skipping: %v", newWork.pod.UID, err) glog.Errorf("Error syncing pod %s, skipping: %v", newWork.pod.UID, err)
p.recorder.Eventf(newWork.pod, kubecontainer.FailedSync, "Error syncing pod, skipping: %v", err) p.recorder.Eventf(newWork.pod, api.EventTypeWarning, kubecontainer.FailedSync, "Error syncing pod, skipping: %v", err)
return err return err
} }
newWork.updateCompleteFn() newWork.updateCompleteFn()

View File

@ -96,12 +96,12 @@ func (pb *prober) probe(probeType probeType, pod *api.Pod, status api.PodStatus,
if err != nil { if err != nil {
glog.V(1).Infof("%s probe for %q errored: %v", probeType, ctrName, err) glog.V(1).Infof("%s probe for %q errored: %v", probeType, ctrName, err)
if hasRef { if hasRef {
pb.recorder.Eventf(ref, kubecontainer.ContainerUnhealthy, "%s probe errored: %v", probeType, err) pb.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.ContainerUnhealthy, "%s probe errored: %v", probeType, err)
} }
} else { // result != probe.Success } else { // result != probe.Success
glog.V(1).Infof("%s probe for %q failed (%v): %s", probeType, ctrName, result, output) glog.V(1).Infof("%s probe for %q failed (%v): %s", probeType, ctrName, result, output)
if hasRef { if hasRef {
pb.recorder.Eventf(ref, kubecontainer.ContainerUnhealthy, "%s probe failed: %s", probeType, output) pb.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.ContainerUnhealthy, "%s probe failed: %s", probeType, output)
} }
} }
return results.Failure, err return results.Failure, err

View File

@ -676,13 +676,13 @@ func (r *Runtime) generateEvents(runtimePod *kubecontainer.Pod, reason string, f
uuid := util.ShortenString(id.uuid, 8) uuid := util.ShortenString(id.uuid, 8)
switch reason { switch reason {
case "Created": case "Created":
r.recorder.Eventf(ref, kubecontainer.CreatedContainer, "Created with rkt id %v", uuid) r.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.CreatedContainer, "Created with rkt id %v", uuid)
case "Started": case "Started":
r.recorder.Eventf(ref, kubecontainer.StartedContainer, "Started with rkt id %v", uuid) r.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.StartedContainer, "Started with rkt id %v", uuid)
case "Failed": case "Failed":
r.recorder.Eventf(ref, kubecontainer.FailedToStartContainer, "Failed to start with rkt id %v with error %v", uuid, failure) r.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedToStartContainer, "Failed to start with rkt id %v with error %v", uuid, failure)
case "Killing": case "Killing":
r.recorder.Eventf(ref, kubecontainer.KillingContainer, "Killing with rkt id %v", uuid) r.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.KillingContainer, "Killing with rkt id %v", uuid)
default: default:
glog.Errorf("rkt: Unexpected event %q", reason) glog.Errorf("rkt: Unexpected event %q", reason)
} }
@ -707,7 +707,7 @@ func (r *Runtime) RunPod(pod *api.Pod, pullSecrets []api.Secret) error {
continue continue
} }
if prepareErr != nil { if prepareErr != nil {
r.recorder.Eventf(ref, kubecontainer.FailedToCreateContainer, "Failed to create rkt container with error: %v", prepareErr) r.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedToCreateContainer, "Failed to create rkt container with error: %v", prepareErr)
continue continue
} }
containerID := runtimePod.Containers[i].ID containerID := runtimePod.Containers[i].ID

View File

@ -125,7 +125,7 @@ func (s *Scheduler) scheduleOne() {
metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInMicroseconds(start)) metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInMicroseconds(start))
if err != nil { if err != nil {
glog.V(1).Infof("Failed to schedule: %+v", pod) glog.V(1).Infof("Failed to schedule: %+v", pod)
s.config.Recorder.Eventf(pod, "FailedScheduling", "%v", err) s.config.Recorder.Eventf(pod, api.EventTypeWarning, "FailedScheduling", "%v", err)
s.config.Error(pod, err) s.config.Error(pod, err)
return return
} }
@ -145,11 +145,11 @@ func (s *Scheduler) scheduleOne() {
metrics.BindingLatency.Observe(metrics.SinceInMicroseconds(bindingStart)) metrics.BindingLatency.Observe(metrics.SinceInMicroseconds(bindingStart))
if err != nil { if err != nil {
glog.V(1).Infof("Failed to bind pod: %+v", err) glog.V(1).Infof("Failed to bind pod: %+v", err)
s.config.Recorder.Eventf(pod, "FailedScheduling", "Binding rejected: %v", err) s.config.Recorder.Eventf(pod, api.EventTypeNormal, "FailedScheduling", "Binding rejected: %v", err)
s.config.Error(pod, err) s.config.Error(pod, err)
return return
} }
s.config.Recorder.Eventf(pod, "Scheduled", "Successfully assigned %v to %v", pod.Name, dest) s.config.Recorder.Eventf(pod, api.EventTypeNormal, "Scheduled", "Successfully assigned %v to %v", pod.Name, dest)
// tell the model to assume that this binding took effect. // tell the model to assume that this binding took effect.
assumed := *pod assumed := *pod
assumed.Spec.NodeName = dest assumed.Spec.NodeName = dest