diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index 53f56a60b60..9c755af7c09 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -389,5 +389,5 @@ func tryIptablesProxy(iptver iptables.IptablesVersioner) string { } func (s *ProxyServer) birthCry() { - s.Recorder.Eventf(s.Config.NodeRef, "Starting", "Starting kube-proxy.") + s.Recorder.Eventf(s.Config.NodeRef, api.EventTypeNormal, "Starting", "Starting kube-proxy.") } diff --git a/contrib/mesos/pkg/scheduler/components/controller/controller.go b/contrib/mesos/pkg/scheduler/components/controller/controller.go index 61d45307403..c2e1ee04c21 100644 --- a/contrib/mesos/pkg/scheduler/components/controller/controller.go +++ b/contrib/mesos/pkg/scheduler/components/controller/controller.go @@ -86,7 +86,7 @@ func (s *controller) scheduleOne() { dest, err := s.algorithm.Schedule(pod) if err != nil { log.V(1).Infof("Failed to schedule: %+v", pod) - s.recorder.Eventf(pod, FailedScheduling, "Error scheduling: %v", err) + s.recorder.Eventf(pod, api.EventTypeWarning, FailedScheduling, "Error scheduling: %v", err) s.error(pod, err) return } @@ -99,9 +99,9 @@ func (s *controller) scheduleOne() { } if err := s.binder.Bind(b); err != nil { log.V(1).Infof("Failed to bind pod: %+v", err) - s.recorder.Eventf(pod, FailedScheduling, "Binding rejected: %v", err) + s.recorder.Eventf(pod, api.EventTypeWarning, FailedScheduling, "Binding rejected: %v", err) s.error(pod, err) return } - s.recorder.Eventf(pod, Scheduled, "Successfully assigned %v to %v", pod.Name, dest) + s.recorder.Eventf(pod, api.EventTypeNormal, Scheduled, "Successfully assigned %v to %v", pod.Name, dest) } diff --git a/pkg/api/deep_copy_generated.go b/pkg/api/deep_copy_generated.go index 141e0a0e7ab..8d3283f6b0b 100644 --- a/pkg/api/deep_copy_generated.go +++ b/pkg/api/deep_copy_generated.go @@ -511,6 +511,7 @@ func deepCopy_api_Event(in Event, out *Event, c *conversion.Cloner) error { return err } out.Count = in.Count + out.Type = in.Type return nil } diff --git a/pkg/api/types.go b/pkg/api/types.go index d5fef2e87cf..1a07db1b121 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -1781,6 +1781,14 @@ type EventSource struct { Host string `json:"host,omitempty"` } +// Valid values for event types (new types could be added in future) +const ( + // Information only and will not cause any problems + EventTypeNormal string = "Normal" + // These events are to warn that something might go wrong + EventTypeWarning string = "Warning" +) + // Event is a report of an event somewhere in the cluster. // TODO: Decide whether to store these separately or with the object they apply to. type Event struct { @@ -1811,6 +1819,9 @@ type Event struct { // The number of times this event has occurred. Count int `json:"count,omitempty"` + + // Type of this event (Normal, Warning), new types could be added in the future. + Type string `json:"type,omitempty"` } // EventList is a list of events. diff --git a/pkg/api/v1/conversion_generated.go b/pkg/api/v1/conversion_generated.go index dfaa0bd69a7..3d5aa596026 100644 --- a/pkg/api/v1/conversion_generated.go +++ b/pkg/api/v1/conversion_generated.go @@ -698,6 +698,7 @@ func autoconvert_api_Event_To_v1_Event(in *api.Event, out *Event, s conversion.S return err } out.Count = in.Count + out.Type = in.Type return nil } @@ -3722,6 +3723,7 @@ func autoconvert_v1_Event_To_api_Event(in *Event, out *api.Event, s conversion.S return err } out.Count = in.Count + out.Type = in.Type return nil } diff --git a/pkg/api/v1/deep_copy_generated.go b/pkg/api/v1/deep_copy_generated.go index cdb5b2743cd..54cc1f46309 100644 --- a/pkg/api/v1/deep_copy_generated.go +++ b/pkg/api/v1/deep_copy_generated.go @@ -545,6 +545,7 @@ func deepCopy_v1_Event(in Event, out *Event, c *conversion.Cloner) error { return err } out.Count = in.Count + out.Type = in.Type return nil } diff --git a/pkg/api/v1/types.go b/pkg/api/v1/types.go index 66986b1e91e..a80cbabd788 100644 --- a/pkg/api/v1/types.go +++ b/pkg/api/v1/types.go @@ -2220,6 +2220,14 @@ type EventSource struct { Host string `json:"host,omitempty"` } +// Valid values for event types (new types could be added in future) +const ( + // Information only and will not cause any problems + EventTypeNormal string = "Normal" + // These events are to warn that something might go wrong + EventTypeWarning string = "Warning" +) + // Event is a report of an event somewhere in the cluster. // TODO: Decide whether to store these separately or with the object they apply to. type Event struct { @@ -2251,6 +2259,9 @@ type Event struct { // The number of times this event has occurred. Count int `json:"count,omitempty"` + + // Type of this event (Normal, Warning), new types could be added in the future + Type string `json:"type,omitempty"` } // EventList is a list of events. diff --git a/pkg/client/record/event.go b/pkg/client/record/event.go index be6e2a4949c..c8b98c86277 100644 --- a/pkg/client/record/event.go +++ b/pkg/client/record/event.go @@ -53,6 +53,7 @@ type EventRecorder interface { // Event constructs an event from the given information and puts it in the queue for sending. // 'object' is the object this event is about. Event will make a reference-- or you may also // pass a reference to the object directly. + // 'type' of this event, and can be one of Normal, Warning. New types could be added in future // 'reason' is the reason this event is generated. 'reason' should be short and unique; it // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used // to automate handling of events, so imagine people writing switch statements to handle them. @@ -60,13 +61,13 @@ type EventRecorder interface { // 'message' is intended to be human readable. // // The resulting event will be created in the same namespace as the reference object. - Event(object runtime.Object, reason, message string) + Event(object runtime.Object, eventtype, reason, message string) // Eventf is just like Event, but with Sprintf for the message field. - Eventf(object runtime.Object, reason, messageFmt string, args ...interface{}) + Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) // PastEventf is just like Eventf, but with an option to specify the event's 'timestamp' field. - PastEventf(object runtime.Object, timestamp unversioned.Time, reason, messageFmt string, args ...interface{}) + PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{}) } // EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. @@ -202,7 +203,7 @@ func recordEvent(sink EventSink, event *api.Event, patch []byte, updateExistingE func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface { return eventBroadcaster.StartEventWatcher( func(e *api.Event) { - logf("Event(%#v): reason: '%v' %v", e.InvolvedObject, e.Reason, e.Message) + logf("Event(%#v): type: '%v' reason: '%v' %v", e.InvolvedObject, e.Type, e.Reason, e.Message) }) } @@ -240,14 +241,19 @@ type recorderImpl struct { clock util.Clock } -func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp unversioned.Time, reason, message string) { +func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp unversioned.Time, eventtype, reason, message string) { ref, err := api.GetReference(object) if err != nil { - glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v'", object, err, reason, message) + glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) return } - event := recorder.makeEvent(ref, reason, message) + if !validateEventType(eventtype) { + glog.Errorf("Unsupported event type: '%v'", eventtype) + return + } + + event := recorder.makeEvent(ref, eventtype, reason, message) event.Source = recorder.source go func() { @@ -256,19 +262,27 @@ func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp unv }() } -func (recorder *recorderImpl) Event(object runtime.Object, reason, message string) { - recorder.generateEvent(object, unversioned.Now(), reason, message) +func validateEventType(eventtype string) bool { + switch eventtype { + case api.EventTypeNormal, api.EventTypeWarning: + return true + } + return false } -func (recorder *recorderImpl) Eventf(object runtime.Object, reason, messageFmt string, args ...interface{}) { - recorder.Event(object, reason, fmt.Sprintf(messageFmt, args...)) +func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) { + recorder.generateEvent(object, unversioned.Now(), eventtype, reason, message) } -func (recorder *recorderImpl) PastEventf(object runtime.Object, timestamp unversioned.Time, reason, messageFmt string, args ...interface{}) { - recorder.generateEvent(object, timestamp, reason, fmt.Sprintf(messageFmt, args...)) +func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.Event(object, eventtype, reason, fmt.Sprintf(messageFmt, args...)) } -func (recorder *recorderImpl) makeEvent(ref *api.ObjectReference, reason, message string) *api.Event { +func (recorder *recorderImpl) PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.generateEvent(object, timestamp, eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder *recorderImpl) makeEvent(ref *api.ObjectReference, eventtype, reason, message string) *api.Event { t := unversioned.Time{recorder.clock.Now()} namespace := ref.Namespace if namespace == "" { @@ -285,5 +299,6 @@ func (recorder *recorderImpl) makeEvent(ref *api.ObjectReference, reason, messag FirstTimestamp: t, LastTimestamp: t, Count: 1, + Type: eventtype, } } diff --git a/pkg/client/record/events_cache.go b/pkg/client/record/events_cache.go index 543af9a3e29..5d93ba6a68a 100644 --- a/pkg/client/record/events_cache.go +++ b/pkg/client/record/events_cache.go @@ -51,6 +51,7 @@ func getEventKey(event *api.Event) string { event.InvolvedObject.Name, string(event.InvolvedObject.UID), event.InvolvedObject.APIVersion, + event.Type, event.Reason, event.Message, }, @@ -71,7 +72,7 @@ func DefaultEventFilterFunc(event *api.Event) bool { // localKey - key that makes this event in the local group type EventAggregatorKeyFunc func(event *api.Event) (aggregateKey string, localKey string) -// EventAggregatorByReasonFunc aggregates events by exact match on event.Source, event.InvolvedObject, and event.Reason +// EventAggregatorByReasonFunc aggregates events by exact match on event.Source, event.InvolvedObject, event.Type and event.Reason func EventAggregatorByReasonFunc(event *api.Event) (string, string) { return strings.Join([]string{ event.Source.Component, @@ -81,6 +82,7 @@ func EventAggregatorByReasonFunc(event *api.Event) (string, string) { event.InvolvedObject.Name, string(event.InvolvedObject.UID), event.InvolvedObject.APIVersion, + event.Type, event.Reason, }, ""), event.Message @@ -179,6 +181,7 @@ func (e *EventAggregator) EventAggregate(newEvent *api.Event) (*api.Event, error InvolvedObject: newEvent.InvolvedObject, LastTimestamp: now, Message: e.messageFunc(newEvent), + Type: newEvent.Type, Reason: newEvent.Reason, Source: newEvent.Source, } diff --git a/pkg/client/record/fake.go b/pkg/client/record/fake.go index 9efed89ac89..7afe1bab205 100644 --- a/pkg/client/record/fake.go +++ b/pkg/client/record/fake.go @@ -28,13 +28,13 @@ type FakeRecorder struct { Events []string } -func (f *FakeRecorder) Event(object runtime.Object, reason, message string) { - f.Events = append(f.Events, fmt.Sprintf("%s %s", reason, message)) +func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) { + f.Events = append(f.Events, fmt.Sprintf("%s %s %s", eventtype, reason, message)) } -func (f *FakeRecorder) Eventf(object runtime.Object, reason, messageFmt string, args ...interface{}) { - f.Events = append(f.Events, fmt.Sprintf(reason+" "+messageFmt, args...)) +func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + f.Events = append(f.Events, fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...)) } -func (f *FakeRecorder) PastEventf(object runtime.Object, timestamp unversioned.Time, reason, messageFmt string, args ...interface{}) { +func (f *FakeRecorder) PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{}) { } diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go index 2601b5b9c8b..3427cd11e45 100644 --- a/pkg/controller/controller_utils.go +++ b/pkg/controller/controller_utils.go @@ -308,11 +308,11 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *api.Pod return fmt.Errorf("unable to create pods, no labels") } if newPod, err := r.KubeClient.Pods(namespace).Create(pod); err != nil { - r.Recorder.Eventf(object, "FailedCreate", "Error creating: %v", err) + r.Recorder.Eventf(object, api.EventTypeWarning, "FailedCreate", "Error creating: %v", err) return fmt.Errorf("unable to create pods: %v", err) } else { glog.V(4).Infof("Controller %v created pod %v", meta.Name, newPod.Name) - r.Recorder.Eventf(object, "SuccessfulCreate", "Created pod: %v", newPod.Name) + r.Recorder.Eventf(object, api.EventTypeNormal, "SuccessfulCreate", "Created pod: %v", newPod.Name) } return nil } diff --git a/pkg/controller/deployment/deployment_controller.go b/pkg/controller/deployment/deployment_controller.go index 7e743eb6ecf..1411a98e7c6 100644 --- a/pkg/controller/deployment/deployment_controller.go +++ b/pkg/controller/deployment/deployment_controller.go @@ -261,7 +261,7 @@ func (d *DeploymentController) scaleRCAndRecordEvent(rc *api.ReplicationControll } newRC, err := d.scaleRC(rc, newScale) if err == nil { - d.eventRecorder.Eventf(&deployment, "ScalingRC", "Scaled %s rc %s to %d", scalingOperation, rc.Name, newScale) + d.eventRecorder.Eventf(&deployment, api.EventTypeNormal, "ScalingRC", "Scaled %s rc %s to %d", scalingOperation, rc.Name, newScale) } return newRC, err } diff --git a/pkg/controller/node/nodecontroller.go b/pkg/controller/node/nodecontroller.go index 4c4b64c5f63..ce390243630 100644 --- a/pkg/controller/node/nodecontroller.go +++ b/pkg/controller/node/nodecontroller.go @@ -247,7 +247,7 @@ func (nc *NodeController) Run(period time.Duration) { if completed { glog.Infof("All pods terminated on %s", value.Value) - nc.recordNodeEvent(value.Value, "TerminatedAllPods", fmt.Sprintf("Terminated all Pods on Node %s.", value.Value)) + nc.recordNodeEvent(value.Value, api.EventTypeNormal, "TerminatedAllPods", fmt.Sprintf("Terminated all Pods on Node %s.", value.Value)) return true, 0 } @@ -354,7 +354,7 @@ func (nc *NodeController) monitorNodeStatus() error { for _, node := range nodes.Items { if !nc.knownNodeSet.Has(node.Name) { glog.V(1).Infof("NodeController observed a new Node: %#v", node) - nc.recordNodeEvent(node.Name, "RegisteredNode", fmt.Sprintf("Registered Node %v in NodeController", node.Name)) + nc.recordNodeEvent(node.Name, api.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in NodeController", node.Name)) nc.cancelPodEviction(node.Name) nc.knownNodeSet.Insert(node.Name) } @@ -369,7 +369,7 @@ func (nc *NodeController) monitorNodeStatus() error { deleted := nc.knownNodeSet.Difference(observedSet) for nodeName := range deleted { glog.V(1).Infof("NodeController observed a Node deletion: %v", nodeName) - nc.recordNodeEvent(nodeName, "RemovingNode", fmt.Sprintf("Removing Node %v from NodeController", nodeName)) + nc.recordNodeEvent(nodeName, api.EventTypeNormal, "RemovingNode", fmt.Sprintf("Removing Node %v from NodeController", nodeName)) nc.evictPods(nodeName) nc.knownNodeSet.Delete(nodeName) } @@ -440,7 +440,7 @@ func (nc *NodeController) monitorNodeStatus() error { } if _, err := instances.ExternalID(node.Name); err != nil && err == cloudprovider.InstanceNotFound { glog.Infof("Deleting node (no longer present in cloud provider): %s", node.Name) - nc.recordNodeEvent(node.Name, "DeletingNode", fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name)) + nc.recordNodeEvent(node.Name, api.EventTypeNormal, "DeletingNode", fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name)) remaining, err := nc.hasPods(node.Name) if err != nil { @@ -494,7 +494,7 @@ func (nc *NodeController) reconcileNodeCIDRs(nodes *api.NodeList) { } } -func (nc *NodeController) recordNodeEvent(nodeName string, reason string, event string) { +func (nc *NodeController) recordNodeEvent(nodeName, eventtype, reason, event string) { ref := &api.ObjectReference{ Kind: "Node", Name: nodeName, @@ -502,7 +502,7 @@ func (nc *NodeController) recordNodeEvent(nodeName string, reason string, event Namespace: "", } glog.V(2).Infof("Recording %s event message for node %s", event, nodeName) - nc.recorder.Eventf(ref, reason, "Node %s event: %s", nodeName, event) + nc.recorder.Eventf(ref, eventtype, reason, "Node %s event: %s", nodeName, event) } func (nc *NodeController) recordNodeStatusChange(node *api.Node, new_status string) { @@ -515,7 +515,7 @@ func (nc *NodeController) recordNodeStatusChange(node *api.Node, new_status stri glog.V(2).Infof("Recording status change %s event message for node %s", new_status, node.Name) // TODO: This requires a transaction, either both node status is updated // and event is recorded or neither should happen, see issue #6055. - nc.recorder.Eventf(ref, new_status, "Node %s status is now: %s", node.Name, new_status) + nc.recorder.Eventf(ref, api.EventTypeNormal, new_status, "Node %s status is now: %s", node.Name, new_status) } // For a given node checks its conditions and tries to update it. Returns grace period to which given node @@ -723,7 +723,7 @@ func (nc *NodeController) deletePods(nodeName string) (bool, error) { } if len(pods.Items) > 0 { - nc.recordNodeEvent(nodeName, "DeletingAllPods", fmt.Sprintf("Deleting all Pods from Node %v.", nodeName)) + nc.recordNodeEvent(nodeName, api.EventTypeNormal, "DeletingAllPods", fmt.Sprintf("Deleting all Pods from Node %v.", nodeName)) } for _, pod := range pods.Items { @@ -737,7 +737,7 @@ func (nc *NodeController) deletePods(nodeName string) (bool, error) { } glog.V(2).Infof("Starting deletion of pod %v", pod.Name) - nc.recorder.Eventf(&pod, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName) + nc.recorder.Eventf(&pod, api.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName) if err := nc.kubeClient.Pods(pod.Namespace).Delete(pod.Name, nil); err != nil { return false, err } @@ -784,7 +784,7 @@ func (nc *NodeController) terminatePods(nodeName string, since time.Time) (bool, if remaining < 0 { remaining = 0 glog.V(2).Infof("Removing pod %v after %s grace period", pod.Name, grace) - nc.recordNodeEvent(nodeName, "TerminatingEvictedPod", fmt.Sprintf("Pod %s has exceeded the grace period for deletion after being evicted from Node %q and is being force killed", pod.Name, nodeName)) + nc.recordNodeEvent(nodeName, api.EventTypeNormal, "TerminatingEvictedPod", fmt.Sprintf("Pod %s has exceeded the grace period for deletion after being evicted from Node %q and is being force killed", pod.Name, nodeName)) if err := nc.kubeClient.Pods(pod.Namespace).Delete(pod.Name, api.NewDeleteOptions(0)); err != nil { glog.Errorf("Error completing deletion of pod %s: %v", pod.Name, err) complete = false diff --git a/pkg/controller/podautoscaler/horizontal.go b/pkg/controller/podautoscaler/horizontal.go index 5c5bc78cc52..ecffd8b7ca7 100644 --- a/pkg/controller/podautoscaler/horizontal.go +++ b/pkg/controller/podautoscaler/horizontal.go @@ -80,7 +80,7 @@ func (a *HorizontalController) computeReplicasForCPUUtilization(hpa extensions.H // TODO: what to do on partial errors (like metrics obtained for 75% of pods). if err != nil { - a.eventRecorder.Event(&hpa, "FailedGetMetrics", err.Error()) + a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedGetMetrics", err.Error()) return 0, nil, fmt.Errorf("failed to get cpu utilization: %v", err) } @@ -97,14 +97,14 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA scale, err := a.client.Extensions().Scales(hpa.Namespace).Get(hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Name) if err != nil { - a.eventRecorder.Event(&hpa, "FailedGetScale", err.Error()) + a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedGetScale", err.Error()) return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err) } currentReplicas := scale.Status.Replicas desiredReplicas, currentUtilization, err := a.computeReplicasForCPUUtilization(hpa, scale) if err != nil { - a.eventRecorder.Event(&hpa, "FailedComputeReplicas", err.Error()) + a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedComputeReplicas", err.Error()) return fmt.Errorf("failed to compute desired number of replicas based on CPU utilization for %s: %v", reference, err) } @@ -145,10 +145,10 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA scale.Spec.Replicas = desiredReplicas _, err = a.client.Extensions().Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale) if err != nil { - a.eventRecorder.Eventf(&hpa, "FailedRescale", "New size: %d; error: %v", desiredReplicas, err.Error()) + a.eventRecorder.Eventf(&hpa, api.EventTypeWarning, "FailedRescale", "New size: %d; error: %v", desiredReplicas, err.Error()) return fmt.Errorf("failed to rescale %s: %v", reference, err) } - a.eventRecorder.Eventf(&hpa, "SuccessfulRescale", "New size: %d", desiredReplicas) + a.eventRecorder.Eventf(&hpa, api.EventTypeNormal, "SuccessfulRescale", "New size: %d", desiredReplicas) glog.Infof("Successfull rescale of %s, old size: %d, new size: %d", hpa.Name, currentReplicas, desiredReplicas) } else { @@ -168,7 +168,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA _, err = a.client.Extensions().HorizontalPodAutoscalers(hpa.Namespace).UpdateStatus(&hpa) if err != nil { - a.eventRecorder.Event(&hpa, "FailedUpdateStatus", err.Error()) + a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedUpdateStatus", err.Error()) return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err) } return nil diff --git a/pkg/controller/service/servicecontroller.go b/pkg/controller/service/servicecontroller.go index 8f238657487..be331bd3f44 100644 --- a/pkg/controller/service/servicecontroller.go +++ b/pkg/controller/service/servicecontroller.go @@ -245,7 +245,7 @@ func (s *ServiceController) processDelta(delta *cache.Delta) (error, bool) { message += " (will not retry): " } message += err.Error() - s.eventRecorder.Event(service, "CreatingLoadBalancerFailed", message) + s.eventRecorder.Event(service, api.EventTypeWarning, "CreatingLoadBalancerFailed", message) return err, retry } // Always update the cache upon success. @@ -255,14 +255,14 @@ func (s *ServiceController) processDelta(delta *cache.Delta) (error, bool) { cachedService.appliedState = service s.cache.set(namespacedName.String(), cachedService) case cache.Deleted: - s.eventRecorder.Event(service, "DeletingLoadBalancer", "Deleting load balancer") + s.eventRecorder.Event(service, api.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer") err := s.balancer.EnsureTCPLoadBalancerDeleted(s.loadBalancerName(service), s.zone.Region) if err != nil { message := "Error deleting load balancer (will retry): " + err.Error() - s.eventRecorder.Event(service, "DeletingLoadBalancerFailed", message) + s.eventRecorder.Event(service, api.EventTypeWarning, "DeletingLoadBalancerFailed", message) return err, retryable } - s.eventRecorder.Event(service, "DeletedLoadBalancer", "Deleted load balancer") + s.eventRecorder.Event(service, api.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer") s.cache.delete(namespacedName.String()) default: glog.Errorf("Unexpected delta type: %v", delta.Type) @@ -305,11 +305,11 @@ func (s *ServiceController) createLoadBalancerIfNeeded(namespacedName types.Name if needDelete { glog.Infof("Deleting existing load balancer for service %s that no longer needs a load balancer.", namespacedName) - s.eventRecorder.Event(service, "DeletingLoadBalancer", "Deleting load balancer") + s.eventRecorder.Event(service, api.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer") if err := s.balancer.EnsureTCPLoadBalancerDeleted(s.loadBalancerName(service), s.zone.Region); err != nil { return err, retryable } - s.eventRecorder.Event(service, "DeletedLoadBalancer", "Deleted load balancer") + s.eventRecorder.Event(service, api.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer") } service.Status.LoadBalancer = api.LoadBalancerStatus{} @@ -319,12 +319,12 @@ func (s *ServiceController) createLoadBalancerIfNeeded(namespacedName types.Name // TODO: We could do a dry-run here if wanted to avoid the spurious cloud-calls & events when we restart // The load balancer doesn't exist yet, so create it. - s.eventRecorder.Event(service, "CreatingLoadBalancer", "Creating load balancer") + s.eventRecorder.Event(service, api.EventTypeNormal, "CreatingLoadBalancer", "Creating load balancer") err := s.createLoadBalancer(service) if err != nil { return fmt.Errorf("Failed to create load balancer for service %s: %v", namespacedName, err), retryable } - s.eventRecorder.Event(service, "CreatedLoadBalancer", "Created load balancer") + s.eventRecorder.Event(service, api.EventTypeNormal, "CreatedLoadBalancer", "Created load balancer") } // Write the state if changed @@ -686,7 +686,7 @@ func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *api.Service, name := cloudprovider.GetLoadBalancerName(service) err := s.balancer.UpdateTCPLoadBalancer(name, s.zone.Region, hosts) if err == nil { - s.eventRecorder.Event(service, "UpdatedLoadBalancer", "Updated load balancer with new hosts") + s.eventRecorder.Event(service, api.EventTypeNormal, "UpdatedLoadBalancer", "Updated load balancer with new hosts") return nil } @@ -697,7 +697,7 @@ func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *api.Service, return nil } - s.eventRecorder.Eventf(service, "LoadBalancerUpdateFailed", "Error updating load balancer with new hosts %v: %v", hosts, err) + s.eventRecorder.Eventf(service, api.EventTypeWarning, "LoadBalancerUpdateFailed", "Error updating load balancer with new hosts %v: %v", hosts, err) return err } diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index c5679cb66d3..f645e0c24bb 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -1512,15 +1512,16 @@ func DescribeEvents(el *api.EventList, w io.Writer) { return } sort.Sort(SortableEvents(el.Items)) - fmt.Fprint(w, "Events:\n FirstSeen\tLastSeen\tCount\tFrom\tSubobjectPath\tReason\tMessage\n") - fmt.Fprint(w, " ─────────\t────────\t─────\t────\t─────────────\t──────\t───────\n") + fmt.Fprint(w, "Events:\n FirstSeen\tLastSeen\tCount\tFrom\tSubobjectPath\tType\tReason\tMessage\n") + fmt.Fprint(w, " ─────────\t────────\t─────\t────\t─────────────\t────────\t──────\t───────\n") for _, e := range el.Items { - fmt.Fprintf(w, " %s\t%s\t%d\t%v\t%v\t%v\t%v\n", + fmt.Fprintf(w, " %s\t%s\t%d\t%v\t%v\t%v\t%v\t%v\n", translateTimestamp(e.FirstTimestamp), translateTimestamp(e.LastTimestamp), e.Count, e.Source, e.InvolvedObject.FieldPath, + e.Type, e.Reason, e.Message) } diff --git a/pkg/kubectl/resource_printer.go b/pkg/kubectl/resource_printer.go index 77d1085bfe9..42ac2cdf2a3 100644 --- a/pkg/kubectl/resource_printer.go +++ b/pkg/kubectl/resource_printer.go @@ -394,7 +394,7 @@ var ingressColumns = []string{"NAME", "RULE", "BACKEND", "ADDRESS"} var endpointColumns = []string{"NAME", "ENDPOINTS", "AGE"} var nodeColumns = []string{"NAME", "LABELS", "STATUS", "AGE"} var daemonSetColumns = []string{"NAME", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "NODE-SELECTOR"} -var eventColumns = []string{"FIRSTSEEN", "LASTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "REASON", "SOURCE", "MESSAGE"} +var eventColumns = []string{"FIRSTSEEN", "LASTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "TYPE", "REASON", "SOURCE", "MESSAGE"} var limitRangeColumns = []string{"NAME", "AGE"} var resourceQuotaColumns = []string{"NAME", "AGE"} var namespaceColumns = []string{"NAME", "LABELS", "STATUS", "AGE"} @@ -1203,13 +1203,14 @@ func printEvent(event *api.Event, w io.Writer, withNamespace bool, wide bool, sh } } if _, err := fmt.Fprintf( - w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s", + w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s", translateTimestamp(event.FirstTimestamp), translateTimestamp(event.LastTimestamp), event.Count, event.InvolvedObject.Name, event.InvolvedObject.Kind, event.InvolvedObject.FieldPath, + event.Type, event.Reason, event.Source, event.Message, diff --git a/pkg/kubelet/config/config.go b/pkg/kubelet/config/config.go index ff0bd1a0459..10d921e008c 100644 --- a/pkg/kubelet/config/config.go +++ b/pkg/kubelet/config/config.go @@ -327,7 +327,7 @@ func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventReco name := bestPodIdentString(pod) err := utilerrors.NewAggregate(errlist) glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err) - recorder.Eventf(pod, kubecontainer.FailedValidation, "Error validating pod %s from %s, ignoring: %v", name, source, err) + recorder.Eventf(pod, api.EventTypeWarning, kubecontainer.FailedValidation, "Error validating pod %s from %s, ignoring: %v", name, source, err) continue } filtered = append(filtered, pod) diff --git a/pkg/kubelet/container/helpers.go b/pkg/kubelet/container/helpers.go index 50619ad7fe6..577be5ec477 100644 --- a/pkg/kubelet/container/helpers.go +++ b/pkg/kubelet/container/helpers.go @@ -132,21 +132,21 @@ func (irecorder *innerEventRecorder) shouldRecordEvent(object runtime.Object) (* return nil, false } -func (irecorder *innerEventRecorder) Event(object runtime.Object, reason, message string) { +func (irecorder *innerEventRecorder) Event(object runtime.Object, eventtype, reason, message string) { if ref, ok := irecorder.shouldRecordEvent(object); ok { - irecorder.recorder.Event(ref, reason, message) + irecorder.recorder.Event(ref, eventtype, reason, message) } } -func (irecorder *innerEventRecorder) Eventf(object runtime.Object, reason, messageFmt string, args ...interface{}) { +func (irecorder *innerEventRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { if ref, ok := irecorder.shouldRecordEvent(object); ok { - irecorder.recorder.Eventf(ref, reason, messageFmt, args...) + irecorder.recorder.Eventf(ref, eventtype, reason, messageFmt, args...) } } -func (irecorder *innerEventRecorder) PastEventf(object runtime.Object, timestamp unversioned.Time, reason, messageFmt string, args ...interface{}) { +func (irecorder *innerEventRecorder) PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{}) { if ref, ok := irecorder.shouldRecordEvent(object); ok { - irecorder.recorder.PastEventf(ref, timestamp, reason, messageFmt, args...) + irecorder.recorder.PastEventf(ref, timestamp, eventtype, reason, messageFmt, args...) } } diff --git a/pkg/kubelet/container/image_puller.go b/pkg/kubelet/container/image_puller.go index 0bed58b6f9f..07a313f150d 100644 --- a/pkg/kubelet/container/image_puller.go +++ b/pkg/kubelet/container/image_puller.go @@ -63,9 +63,9 @@ func shouldPullImage(container *api.Container, imagePresent bool) bool { } // records an event using ref, event msg. log to glog using prefix, msg, logFn -func (puller *imagePuller) logIt(ref *api.ObjectReference, event, prefix, msg string, logFn func(args ...interface{})) { +func (puller *imagePuller) logIt(ref *api.ObjectReference, eventtype, event, prefix, msg string, logFn func(args ...interface{})) { if ref != nil { - puller.recorder.Event(ref, event, msg) + puller.recorder.Event(ref, eventtype, event, msg) } else { logFn(fmt.Sprint(prefix, " ", msg)) } @@ -83,18 +83,18 @@ func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pul present, err := puller.runtime.IsImagePresent(spec) if err != nil { msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err) - puller.logIt(ref, FailedToInspectImage, logPrefix, msg, glog.Warning) + puller.logIt(ref, api.EventTypeWarning, FailedToInspectImage, logPrefix, msg, glog.Warning) return ErrImageInspect, msg } if !shouldPullImage(container, present) { if present { msg := fmt.Sprintf("Container image %q already present on machine", container.Image) - puller.logIt(ref, "Pulled", logPrefix, msg, glog.Info) + puller.logIt(ref, api.EventTypeNormal, "Pulled", logPrefix, msg, glog.Info) return nil, "" } else { msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image) - puller.logIt(ref, ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning) + puller.logIt(ref, api.EventTypeWarning, ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning) return ErrImageNeverPull, msg } } @@ -102,12 +102,12 @@ func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pul backOffKey := fmt.Sprintf("%s_%s", pod.Name, container.Image) if puller.backOff.IsInBackOffSinceUpdate(backOffKey, puller.backOff.Clock.Now()) { msg := fmt.Sprintf("Back-off pulling image %q", container.Image) - puller.logIt(ref, BackOffPullImage, logPrefix, msg, glog.Info) + puller.logIt(ref, api.EventTypeNormal, BackOffPullImage, logPrefix, msg, glog.Info) return ErrImagePullBackOff, msg } - puller.logIt(ref, "Pulling", logPrefix, fmt.Sprintf("pulling image %q", container.Image), glog.Info) + puller.logIt(ref, api.EventTypeNormal, "Pulling", logPrefix, fmt.Sprintf("pulling image %q", container.Image), glog.Info) if err := puller.runtime.PullImage(spec, pullSecrets); err != nil { - puller.logIt(ref, "Failed", logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning) + puller.logIt(ref, api.EventTypeWarning, "Failed", logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning) puller.backOff.Next(backOffKey, puller.backOff.Clock.Now()) if err == RegistryUnavailable { msg := fmt.Sprintf("image pull failed for %s because the registry is temporarily unavailable.", container.Image) @@ -116,7 +116,7 @@ func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pul return ErrImagePull, err.Error() } } - puller.logIt(ref, "Pulled", logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info) + puller.logIt(ref, api.EventTypeNormal, "Pulled", logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info) puller.backOff.GC() return nil, "" } diff --git a/pkg/kubelet/container/serialized_image_puller.go b/pkg/kubelet/container/serialized_image_puller.go index 1458cf647ac..e8d3deffb64 100644 --- a/pkg/kubelet/container/serialized_image_puller.go +++ b/pkg/kubelet/container/serialized_image_puller.go @@ -64,9 +64,9 @@ func NewSerializedImagePuller(recorder record.EventRecorder, runtime Runtime, im } // records an event using ref, event msg. log to glog using prefix, msg, logFn -func (puller *serializedImagePuller) logIt(ref *api.ObjectReference, event, prefix, msg string, logFn func(args ...interface{})) { +func (puller *serializedImagePuller) logIt(ref *api.ObjectReference, eventtype, event, prefix, msg string, logFn func(args ...interface{})) { if ref != nil { - puller.recorder.Event(ref, event, msg) + puller.recorder.Event(ref, eventtype, event, msg) } else { logFn(fmt.Sprint(prefix, " ", msg)) } @@ -84,18 +84,18 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont present, err := puller.runtime.IsImagePresent(spec) if err != nil { msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err) - puller.logIt(ref, FailedToInspectImage, logPrefix, msg, glog.Warning) + puller.logIt(ref, api.EventTypeWarning, FailedToInspectImage, logPrefix, msg, glog.Warning) return ErrImageInspect, msg } if !shouldPullImage(container, present) { if present { msg := fmt.Sprintf("Container image %q already present on machine", container.Image) - puller.logIt(ref, PulledImage, logPrefix, msg, glog.Info) + puller.logIt(ref, api.EventTypeNormal, PulledImage, logPrefix, msg, glog.Info) return nil, "" } else { msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image) - puller.logIt(ref, ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning) + puller.logIt(ref, api.EventTypeWarning, ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning) return ErrImageNeverPull, msg } } @@ -103,7 +103,7 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont backOffKey := fmt.Sprintf("%s_%s", pod.Name, container.Image) if puller.backOff.IsInBackOffSinceUpdate(backOffKey, puller.backOff.Clock.Now()) { msg := fmt.Sprintf("Back-off pulling image %q", container.Image) - puller.logIt(ref, BackOffPullImage, logPrefix, msg, glog.Info) + puller.logIt(ref, api.EventTypeNormal, BackOffPullImage, logPrefix, msg, glog.Info) return ErrImagePullBackOff, msg } @@ -118,7 +118,7 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont returnChan: returnChan, } if err = <-returnChan; err != nil { - puller.logIt(ref, FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning) + puller.logIt(ref, api.EventTypeWarning, FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning) puller.backOff.Next(backOffKey, puller.backOff.Clock.Now()) if err == RegistryUnavailable { msg := fmt.Sprintf("image pull failed for %s because the registry is temporarily unavailable.", container.Image) @@ -127,14 +127,14 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont return ErrImagePull, err.Error() } } - puller.logIt(ref, PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info) + puller.logIt(ref, api.EventTypeNormal, PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info) puller.backOff.GC() return nil, "" } func (puller *serializedImagePuller) pullImages() { for pullRequest := range puller.pullRequests { - puller.logIt(pullRequest.ref, PullingImage, pullRequest.logPrefix, fmt.Sprintf("pulling image %q", pullRequest.container.Image), glog.Info) + puller.logIt(pullRequest.ref, api.EventTypeNormal, PullingImage, pullRequest.logPrefix, fmt.Sprintf("pulling image %q", pullRequest.container.Image), glog.Info) pullRequest.returnChan <- puller.runtime.PullImage(pullRequest.spec, pullRequest.pullSecrets) } } diff --git a/pkg/kubelet/dockertools/manager.go b/pkg/kubelet/dockertools/manager.go index e83205cdaba..76a416fd3c3 100644 --- a/pkg/kubelet/dockertools/manager.go +++ b/pkg/kubelet/dockertools/manager.go @@ -757,11 +757,11 @@ func (dm *DockerManager) runContainer( securityContextProvider.ModifyContainerConfig(pod, container, dockerOpts.Config) dockerContainer, err := dm.client.CreateContainer(dockerOpts) if err != nil { - dm.recorder.Eventf(ref, kubecontainer.FailedToCreateContainer, "Failed to create docker container with error: %v", err) + dm.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedToCreateContainer, "Failed to create docker container with error: %v", err) return kubecontainer.ContainerID{}, err } - dm.recorder.Eventf(ref, kubecontainer.CreatedContainer, "Created container with docker id %v", util.ShortenString(dockerContainer.ID, 12)) + dm.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.CreatedContainer, "Created container with docker id %v", util.ShortenString(dockerContainer.ID, 12)) podHasSELinuxLabel := pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.SELinuxOptions != nil binds := makeMountBindings(opts.Mounts, podHasSELinuxLabel) @@ -817,11 +817,11 @@ func (dm *DockerManager) runContainer( securityContextProvider.ModifyHostConfig(pod, container, hc) if err = dm.client.StartContainer(dockerContainer.ID, hc); err != nil { - dm.recorder.Eventf(ref, kubecontainer.FailedToStartContainer, + dm.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedToStartContainer, "Failed to start container with docker id %v with error: %v", util.ShortenString(dockerContainer.ID, 12), err) return kubecontainer.ContainerID{}, err } - dm.recorder.Eventf(ref, kubecontainer.StartedContainer, "Started container with docker id %v", util.ShortenString(dockerContainer.ID, 12)) + dm.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.StartedContainer, "Started container with docker id %v", util.ShortenString(dockerContainer.ID, 12)) return kubetypes.DockerID(dockerContainer.ID).ContainerID(), nil } @@ -1437,7 +1437,7 @@ func (dm *DockerManager) killContainer(containerID kubecontainer.ContainerID, co if reason != "" { message = fmt.Sprint(message, ": ", reason) } - dm.recorder.Event(ref, kubecontainer.KillingContainer, message) + dm.recorder.Event(ref, api.EventTypeNormal, kubecontainer.KillingContainer, message) dm.containerRefManager.ClearRef(containerID) } return err @@ -1817,7 +1817,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, pod if err != nil { glog.Errorf("Couldn't make a ref to pod %q: '%v'", podFullName, err) } - dm.recorder.Eventf(ref, "InfraChanged", "Pod infrastructure changed, it will be killed and re-created.") + dm.recorder.Eventf(ref, api.EventTypeNormal, "InfraChanged", "Pod infrastructure changed, it will be killed and re-created.") } if containerChanges.StartInfraContainer || (len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0) { if len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0 { @@ -2049,7 +2049,7 @@ func (dm *DockerManager) doBackOff(pod *api.Pod, container *api.Container, podSt stableName, _ := BuildDockerName(dockerName, container) if backOff.IsInBackOffSince(stableName, ts.Time) { if ref, err := kubecontainer.GenerateContainerRef(pod, container); err == nil { - dm.recorder.Eventf(ref, kubecontainer.BackOffStartContainer, "Back-off restarting failed docker container") + dm.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.BackOffStartContainer, "Back-off restarting failed docker container") } err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(stableName), container.Name, kubecontainer.GetPodFullName(pod)) dm.updateReasonCache(pod, container, kubecontainer.ErrCrashLoopBackOff.Error(), err) diff --git a/pkg/kubelet/image_manager.go b/pkg/kubelet/image_manager.go index 6ffd0a839c1..dd31551f453 100644 --- a/pkg/kubelet/image_manager.go +++ b/pkg/kubelet/image_manager.go @@ -195,7 +195,7 @@ func (im *realImageManager) GarbageCollect() error { // Check valid capacity. if capacity == 0 { err := fmt.Errorf("invalid capacity %d on device %q at mount point %q", capacity, fsInfo.Device, fsInfo.Mountpoint) - im.recorder.Eventf(im.nodeRef, container.InvalidDiskCapacity, err.Error()) + im.recorder.Eventf(im.nodeRef, api.EventTypeWarning, container.InvalidDiskCapacity, err.Error()) return err } @@ -211,7 +211,7 @@ func (im *realImageManager) GarbageCollect() error { if freed < amountToFree { err := fmt.Errorf("failed to garbage collect required amount of images. Wanted to free %d, but freed %d", amountToFree, freed) - im.recorder.Eventf(im.nodeRef, container.FreeDiskSpaceFailed, err.Error()) + im.recorder.Eventf(im.nodeRef, api.EventTypeWarning, container.FreeDiskSpaceFailed, err.Error()) return err } } diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 2da474b2c97..a92dacf902f 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -870,7 +870,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { glog.Warning("No api server defined - no node status update will be sent.") } if err := kl.initializeModules(); err != nil { - kl.recorder.Eventf(kl.nodeRef, kubecontainer.KubeletSetupFailed, err.Error()) + kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, kubecontainer.KubeletSetupFailed, err.Error()) glog.Error(err) kl.runtimeState.setInitError(err) } @@ -1536,7 +1536,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont // Mount volumes. podVolumes, err := kl.mountExternalVolumes(pod) if err != nil { - kl.recorder.Eventf(ref, kubecontainer.FailedMountVolume, "Unable to mount volumes for pod %q: %v", podFullName, err) + kl.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedMountVolume, "Unable to mount volumes for pod %q: %v", podFullName, err) glog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", podFullName, err) return err } @@ -1601,7 +1601,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont } if egress != nil || ingress != nil { if podUsesHostNetwork(pod) { - kl.recorder.Event(pod, kubecontainer.HostNetworkNotSupported, "Bandwidth shaping is not currently supported on the host network") + kl.recorder.Event(pod, api.EventTypeWarning, kubecontainer.HostNetworkNotSupported, "Bandwidth shaping is not currently supported on the host network") } else if kl.shaper != nil { status, found := kl.statusManager.GetPodStatus(pod.UID) if !found { @@ -1616,7 +1616,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont err = kl.shaper.ReconcileCIDR(fmt.Sprintf("%s/32", status.PodIP), egress, ingress) } } else { - kl.recorder.Event(pod, kubecontainer.UndefinedShaper, "Pod requests bandwidth shaping, but the shaper is undefined") + kl.recorder.Event(pod, api.EventTypeWarning, kubecontainer.UndefinedShaper, "Pod requests bandwidth shaping, but the shaper is undefined") } } @@ -2106,7 +2106,7 @@ func (kl *Kubelet) matchesNodeSelector(pod *api.Pod) bool { } func (kl *Kubelet) rejectPod(pod *api.Pod, reason, message string) { - kl.recorder.Eventf(pod, reason, message) + kl.recorder.Eventf(pod, api.EventTypeWarning, reason, message) kl.statusManager.SetPodStatus(pod, api.PodStatus{ Phase: api.PodFailed, Reason: reason, @@ -2507,11 +2507,11 @@ func (kl *Kubelet) updateNodeStatus() error { return fmt.Errorf("update node status exceeds retry count") } -func (kl *Kubelet) recordNodeStatusEvent(event string) { +func (kl *Kubelet) recordNodeStatusEvent(eventtype, event string) { glog.V(2).Infof("Recording %s event message for node %s", event, kl.nodeName) // TODO: This requires a transaction, either both node status is updated // and event is recorded or neither should happen, see issue #6055. - kl.recorder.Eventf(kl.nodeRef, event, "Node %s status is now: %s", kl.nodeName, event) + kl.recorder.Eventf(kl.nodeRef, eventtype, event, "Node %s status is now: %s", kl.nodeName, event) } // Maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus() @@ -2622,7 +2622,7 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error { node.Status.NodeInfo.BootID != info.BootID { // TODO: This requires a transaction, either both node status is updated // and event is recorded or neither should happen, see issue #6055. - kl.recorder.Eventf(kl.nodeRef, kubecontainer.NodeRebooted, + kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, kubecontainer.NodeRebooted, "Node %s has been rebooted, boot id: %s", kl.nodeName, info.BootID) } node.Status.NodeInfo.BootID = info.BootID @@ -2684,9 +2684,9 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error { } if !updated || oldNodeReadyConditionStatus != newNodeReadyCondition.Status { if newNodeReadyCondition.Status == api.ConditionTrue { - kl.recordNodeStatusEvent(kubecontainer.NodeReady) + kl.recordNodeStatusEvent(api.EventTypeNormal, kubecontainer.NodeReady) } else { - kl.recordNodeStatusEvent(kubecontainer.NodeNotReady) + kl.recordNodeStatusEvent(api.EventTypeNormal, kubecontainer.NodeNotReady) } } @@ -2728,7 +2728,7 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error { nodeOODCondition.Reason = "KubeletOutOfDisk" nodeOODCondition.Message = "out of disk space" nodeOODCondition.LastTransitionTime = currentTime - kl.recordNodeStatusEvent("NodeOutOfDisk") + kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeOutOfDisk") } } else { if nodeOODCondition.Status != api.ConditionFalse { @@ -2736,7 +2736,7 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error { nodeOODCondition.Reason = "KubeletHasSufficientDisk" nodeOODCondition.Message = "kubelet has sufficient disk space available" nodeOODCondition.LastTransitionTime = currentTime - kl.recordNodeStatusEvent("NodeHasSufficientDisk") + kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasSufficientDisk") } } @@ -2746,9 +2746,9 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error { if oldNodeUnschedulable != node.Spec.Unschedulable { if node.Spec.Unschedulable { - kl.recordNodeStatusEvent(kubecontainer.NodeNotSchedulable) + kl.recordNodeStatusEvent(api.EventTypeNormal, kubecontainer.NodeNotSchedulable) } else { - kl.recordNodeStatusEvent(kubecontainer.NodeSchedulable) + kl.recordNodeStatusEvent(api.EventTypeNormal, kubecontainer.NodeSchedulable) } oldNodeUnschedulable = node.Spec.Unschedulable } @@ -2933,7 +2933,7 @@ func (kl *Kubelet) generatePodStatus(pod *api.Pod) (api.PodStatus, error) { // TODO: Consider include the container information. if kl.pastActiveDeadline(pod) { reason := "DeadlineExceeded" - kl.recorder.Eventf(pod, reason, "Pod was active on the node longer than specified deadline") + kl.recorder.Eventf(pod, api.EventTypeNormal, reason, "Pod was active on the node longer than specified deadline") return api.PodStatus{ Phase: api.PodFailed, Reason: reason, @@ -3058,7 +3058,7 @@ func (kl *Kubelet) PortForward(podFullName string, podUID types.UID, port uint16 // BirthCry sends an event that the kubelet has started up. func (kl *Kubelet) BirthCry() { // Make an event that kubelet restarted. - kl.recorder.Eventf(kl.nodeRef, kubecontainer.StartingKubelet, "Starting kubelet.") + kl.recorder.Eventf(kl.nodeRef, api.EventTypeNormal, kubecontainer.StartingKubelet, "Starting kubelet.") } func (kl *Kubelet) StreamingConnectionIdleTimeout() time.Duration { diff --git a/pkg/kubelet/oom_watcher.go b/pkg/kubelet/oom_watcher.go index a568ed49bdc..2f9feaddf5f 100644 --- a/pkg/kubelet/oom_watcher.go +++ b/pkg/kubelet/oom_watcher.go @@ -64,7 +64,7 @@ func (ow *realOOMWatcher) Start(ref *api.ObjectReference) error { for event := range eventChannel.GetChannel() { glog.V(2).Infof("Got sys oom event from cadvisor: %v", event) - ow.recorder.PastEventf(ref, unversioned.Time{Time: event.Timestamp}, systemOOMEvent, "System OOM encountered") + ow.recorder.PastEventf(ref, unversioned.Time{Time: event.Timestamp}, api.EventTypeWarning, systemOOMEvent, "System OOM encountered") } glog.Errorf("Unexpectedly stopped receiving OOM notifications from cAdvisor") }() diff --git a/pkg/kubelet/pod_workers.go b/pkg/kubelet/pod_workers.go index a3301c7d723..d05b647d650 100644 --- a/pkg/kubelet/pod_workers.go +++ b/pkg/kubelet/pod_workers.go @@ -123,7 +123,7 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan workUpdate) { minRuntimeCacheTime = time.Now() if err != nil { glog.Errorf("Error syncing pod %s, skipping: %v", newWork.pod.UID, err) - p.recorder.Eventf(newWork.pod, kubecontainer.FailedSync, "Error syncing pod, skipping: %v", err) + p.recorder.Eventf(newWork.pod, api.EventTypeWarning, kubecontainer.FailedSync, "Error syncing pod, skipping: %v", err) return err } newWork.updateCompleteFn() diff --git a/pkg/kubelet/prober/prober.go b/pkg/kubelet/prober/prober.go index 66d7ba64a49..7a7d85676ce 100644 --- a/pkg/kubelet/prober/prober.go +++ b/pkg/kubelet/prober/prober.go @@ -96,12 +96,12 @@ func (pb *prober) probe(probeType probeType, pod *api.Pod, status api.PodStatus, if err != nil { glog.V(1).Infof("%s probe for %q errored: %v", probeType, ctrName, err) if hasRef { - pb.recorder.Eventf(ref, kubecontainer.ContainerUnhealthy, "%s probe errored: %v", probeType, err) + pb.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.ContainerUnhealthy, "%s probe errored: %v", probeType, err) } } else { // result != probe.Success glog.V(1).Infof("%s probe for %q failed (%v): %s", probeType, ctrName, result, output) if hasRef { - pb.recorder.Eventf(ref, kubecontainer.ContainerUnhealthy, "%s probe failed: %s", probeType, output) + pb.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.ContainerUnhealthy, "%s probe failed: %s", probeType, output) } } return results.Failure, err diff --git a/pkg/kubelet/rkt/rkt.go b/pkg/kubelet/rkt/rkt.go index 7cf400cffd5..84acf4ffc80 100644 --- a/pkg/kubelet/rkt/rkt.go +++ b/pkg/kubelet/rkt/rkt.go @@ -676,13 +676,13 @@ func (r *Runtime) generateEvents(runtimePod *kubecontainer.Pod, reason string, f uuid := util.ShortenString(id.uuid, 8) switch reason { case "Created": - r.recorder.Eventf(ref, kubecontainer.CreatedContainer, "Created with rkt id %v", uuid) + r.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.CreatedContainer, "Created with rkt id %v", uuid) case "Started": - r.recorder.Eventf(ref, kubecontainer.StartedContainer, "Started with rkt id %v", uuid) + r.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.StartedContainer, "Started with rkt id %v", uuid) case "Failed": - r.recorder.Eventf(ref, kubecontainer.FailedToStartContainer, "Failed to start with rkt id %v with error %v", uuid, failure) + r.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedToStartContainer, "Failed to start with rkt id %v with error %v", uuid, failure) case "Killing": - r.recorder.Eventf(ref, kubecontainer.KillingContainer, "Killing with rkt id %v", uuid) + r.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.KillingContainer, "Killing with rkt id %v", uuid) default: glog.Errorf("rkt: Unexpected event %q", reason) } @@ -707,7 +707,7 @@ func (r *Runtime) RunPod(pod *api.Pod, pullSecrets []api.Secret) error { continue } if prepareErr != nil { - r.recorder.Eventf(ref, kubecontainer.FailedToCreateContainer, "Failed to create rkt container with error: %v", prepareErr) + r.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedToCreateContainer, "Failed to create rkt container with error: %v", prepareErr) continue } containerID := runtimePod.Containers[i].ID diff --git a/plugin/pkg/scheduler/scheduler.go b/plugin/pkg/scheduler/scheduler.go index 189909e1238..2362f1eb0d6 100644 --- a/plugin/pkg/scheduler/scheduler.go +++ b/plugin/pkg/scheduler/scheduler.go @@ -125,7 +125,7 @@ func (s *Scheduler) scheduleOne() { metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInMicroseconds(start)) if err != nil { glog.V(1).Infof("Failed to schedule: %+v", pod) - s.config.Recorder.Eventf(pod, "FailedScheduling", "%v", err) + s.config.Recorder.Eventf(pod, api.EventTypeWarning, "FailedScheduling", "%v", err) s.config.Error(pod, err) return } @@ -145,11 +145,11 @@ func (s *Scheduler) scheduleOne() { metrics.BindingLatency.Observe(metrics.SinceInMicroseconds(bindingStart)) if err != nil { glog.V(1).Infof("Failed to bind pod: %+v", err) - s.config.Recorder.Eventf(pod, "FailedScheduling", "Binding rejected: %v", err) + s.config.Recorder.Eventf(pod, api.EventTypeNormal, "FailedScheduling", "Binding rejected: %v", err) s.config.Error(pod, err) return } - s.config.Recorder.Eventf(pod, "Scheduled", "Successfully assigned %v to %v", pod.Name, dest) + s.config.Recorder.Eventf(pod, api.EventTypeNormal, "Scheduled", "Successfully assigned %v to %v", pod.Name, dest) // tell the model to assume that this binding took effect. assumed := *pod assumed.Spec.NodeName = dest