Merge pull request #16272 from jiangyaoguo/remove-infra-container-event

Auto commit by PR queue bot
This commit is contained in:
k8s-merge-robot 2015-11-09 06:05:35 -08:00
commit 5fe3733f95
6 changed files with 70 additions and 22 deletions

View File

@ -18,8 +18,12 @@ package container
import (
"hash/adler32"
"strings"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/third_party/golang/expansion"
@ -104,3 +108,45 @@ func ExpandContainerCommandAndArgs(container *api.Container, envs []EnvVar) (com
return command, args
}
// Create an event recorder to record object's event except implicitly required container's, like infra container.
func FilterEventRecorder(recorder record.EventRecorder) record.EventRecorder {
return &innerEventRecorder{
recorder: recorder,
}
}
type innerEventRecorder struct {
recorder record.EventRecorder
}
func (irecorder *innerEventRecorder) shouldRecordEvent(object runtime.Object) (*api.ObjectReference, bool) {
if object == nil {
return nil, false
}
if ref, ok := object.(*api.ObjectReference); ok {
if !strings.HasPrefix(ref.FieldPath, ImplicitContainerPrefix) {
return ref, true
}
}
return nil, false
}
func (irecorder *innerEventRecorder) Event(object runtime.Object, reason, message string) {
if ref, ok := irecorder.shouldRecordEvent(object); ok {
irecorder.recorder.Event(ref, reason, message)
}
}
func (irecorder *innerEventRecorder) Eventf(object runtime.Object, reason, messageFmt string, args ...interface{}) {
if ref, ok := irecorder.shouldRecordEvent(object); ok {
irecorder.recorder.Eventf(ref, reason, messageFmt, args...)
}
}
func (irecorder *innerEventRecorder) PastEventf(object runtime.Object, timestamp unversioned.Time, reason, messageFmt string, args ...interface{}) {
if ref, ok := irecorder.shouldRecordEvent(object); ok {
irecorder.recorder.PastEventf(ref, timestamp, reason, messageFmt, args...)
}
}

View File

@ -65,7 +65,7 @@ func shouldPullImage(container *api.Container, imagePresent bool) bool {
// records an event using ref, event msg. log to glog using prefix, msg, logFn
func (puller *imagePuller) logIt(ref *api.ObjectReference, event, prefix, msg string, logFn func(args ...interface{})) {
if ref != nil {
puller.recorder.Eventf(ref, event, msg)
puller.recorder.Event(ref, event, msg)
} else {
logFn(fmt.Sprint(prefix, " ", msg))
}

View File

@ -22,6 +22,8 @@ import (
"k8s.io/kubernetes/pkg/api"
)
var ImplicitContainerPrefix string = "implicitly required container "
// GenerateContainerRef returns an *api.ObjectReference which references the given container
// within the given pod. Returns an error if the reference can't be constructed or the
// container doesn't actually belong to the pod.
@ -33,7 +35,7 @@ func GenerateContainerRef(pod *api.Pod, container *api.Container) (*api.ObjectRe
if err != nil {
// TODO: figure out intelligent way to refer to containers that we implicitly
// start (like the pod infra container). This is not a good way, ugh.
fieldPath = "implicitly required container " + container.Name
fieldPath = ImplicitContainerPrefix + container.Name
}
ref, err := api.GetPartialReference(pod, fieldPath)
if err != nil {

View File

@ -66,7 +66,7 @@ func NewSerializedImagePuller(recorder record.EventRecorder, runtime Runtime, im
// records an event using ref, event msg. log to glog using prefix, msg, logFn
func (puller *serializedImagePuller) logIt(ref *api.ObjectReference, event, prefix, msg string, logFn func(args ...interface{})) {
if ref != nil {
puller.recorder.Eventf(ref, event, msg)
puller.recorder.Event(ref, event, msg)
} else {
logFn(fmt.Sprint(prefix, " ", msg))
}

View File

@ -216,9 +216,9 @@ func NewDockerManager(
}
dm.runner = lifecycle.NewHandlerRunner(httpClient, dm, dm)
if serializeImagePulls {
dm.imagePuller = kubecontainer.NewSerializedImagePuller(recorder, dm, imageBackOff)
dm.imagePuller = kubecontainer.NewSerializedImagePuller(kubecontainer.FilterEventRecorder(recorder), dm, imageBackOff)
} else {
dm.imagePuller = kubecontainer.NewImagePuller(recorder, dm, imageBackOff)
dm.imagePuller = kubecontainer.NewImagePuller(kubecontainer.FilterEventRecorder(recorder), dm, imageBackOff)
}
dm.containerGC = NewContainerGC(client, containerLogsDir)
@ -766,15 +766,11 @@ func (dm *DockerManager) runContainer(
securityContextProvider.ModifyContainerConfig(pod, container, dockerOpts.Config)
dockerContainer, err := dm.client.CreateContainer(dockerOpts)
if err != nil {
if ref != nil {
dm.recorder.Eventf(ref, "Failed", "Failed to create docker container with error: %v", err)
}
dm.recorder.Eventf(ref, "Failed", "Failed to create docker container with error: %v", err)
return kubecontainer.ContainerID{}, err
}
if ref != nil {
dm.recorder.Eventf(ref, "Created", "Created with docker id %v", util.ShortenString(dockerContainer.ID, 12))
}
dm.recorder.Eventf(ref, "Created", "Created with docker id %v", util.ShortenString(dockerContainer.ID, 12))
podHasSELinuxLabel := pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.SELinuxOptions != nil
binds := makeMountBindings(opts.Mounts, podHasSELinuxLabel)
@ -830,15 +826,11 @@ func (dm *DockerManager) runContainer(
securityContextProvider.ModifyHostConfig(pod, container, hc)
if err = dm.client.StartContainer(dockerContainer.ID, hc); err != nil {
if ref != nil {
dm.recorder.Eventf(ref, "Failed",
"Failed to start with docker id %v with error: %v", util.ShortenString(dockerContainer.ID, 12), err)
}
dm.recorder.Eventf(ref, "Failed",
"Failed to start with docker id %v with error: %v", util.ShortenString(dockerContainer.ID, 12), err)
return kubecontainer.ContainerID{}, err
}
if ref != nil {
dm.recorder.Eventf(ref, "Started", "Started with docker id %v", util.ShortenString(dockerContainer.ID, 12))
}
dm.recorder.Eventf(ref, "Started", "Started with docker id %v", util.ShortenString(dockerContainer.ID, 12))
return kubetypes.DockerID(dockerContainer.ID).ContainerID(), nil
}
@ -1680,6 +1672,7 @@ func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubetypes.Docker
type empty struct{}
type PodContainerChangesSpec struct {
StartInfraContainer bool
InfraChanged bool
InfraContainerId kubetypes.DockerID
ContainersToStart map[int]empty
ContainersToKeep map[kubetypes.DockerID]int
@ -1785,6 +1778,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, runningPod kub
return PodContainerChangesSpec{
StartInfraContainer: createPodInfraContainer,
InfraChanged: changed,
InfraContainerId: podInfraContainerID,
ContainersToStart: containersToStart,
ContainersToKeep: containersToKeep,
@ -1819,10 +1813,18 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, pod
}
glog.V(3).Infof("Got container changes for pod %q: %+v", podFullName, containerChanges)
if containerChanges.InfraChanged {
ref, err := api.GetReference(pod)
if err != nil {
glog.Errorf("Couldn't make a ref to pod %q: '%v'", podFullName, err)
}
dm.recorder.Eventf(ref, "InfraChanged", "Pod infrastructure changed, it will be killed and re-created.")
}
if containerChanges.StartInfraContainer || (len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0) {
if len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0 {
glog.V(4).Infof("Killing Infra Container for %q because all other containers are dead.", podFullName)
} else {
glog.V(4).Infof("Killing Infra Container for %q, will start new one", podFullName)
}

View File

@ -331,7 +331,7 @@ func NewMainKubelet(
// Only supported one for now, continue.
klet.containerRuntime = dockertools.NewDockerManager(
dockerClient,
recorder,
kubecontainer.FilterEventRecorder(recorder),
klet.livenessManager,
containerRefManager,
machineInfo,
@ -1471,9 +1471,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
// Mount volumes.
podVolumes, err := kl.mountExternalVolumes(pod)
if err != nil {
if ref != nil {
kl.recorder.Eventf(ref, "FailedMount", "Unable to mount volumes for pod %q: %v", podFullName, err)
}
kl.recorder.Eventf(ref, "FailedMount", "Unable to mount volumes for pod %q: %v", podFullName, err)
glog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", podFullName, err)
return err
}