mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 12:15:52 +00:00
Merge pull request #28865 from ronnielai/image-gc-0
Automatic merge from submit-queue Moving event.go from kubelet/container to kubelet/events
This commit is contained in:
commit
3786701280
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/validation"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/util/config"
|
||||
@ -340,7 +341,7 @@ func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventReco
|
||||
name := bestPodIdentString(pod)
|
||||
err := errlist.ToAggregate()
|
||||
glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err)
|
||||
recorder.Eventf(pod, api.EventTypeWarning, kubecontainer.FailedValidation, "Error validating pod %s from %s, ignoring: %v", name, source, err)
|
||||
recorder.Eventf(pod, api.EventTypeWarning, events.FailedValidation, "Error validating pod %s from %s, ignoring: %v", name, source, err)
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, pod)
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
)
|
||||
|
||||
@ -83,18 +84,18 @@ func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pul
|
||||
present, err := puller.runtime.IsImagePresent(spec)
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err)
|
||||
puller.logIt(ref, api.EventTypeWarning, FailedToInspectImage, logPrefix, msg, glog.Warning)
|
||||
puller.logIt(ref, api.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, glog.Warning)
|
||||
return ErrImageInspect, msg
|
||||
}
|
||||
|
||||
if !shouldPullImage(container, present) {
|
||||
if present {
|
||||
msg := fmt.Sprintf("Container image %q already present on machine", container.Image)
|
||||
puller.logIt(ref, api.EventTypeNormal, PulledImage, logPrefix, msg, glog.Info)
|
||||
puller.logIt(ref, api.EventTypeNormal, events.PulledImage, logPrefix, msg, glog.Info)
|
||||
return nil, ""
|
||||
} else {
|
||||
msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image)
|
||||
puller.logIt(ref, api.EventTypeWarning, ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning)
|
||||
puller.logIt(ref, api.EventTypeWarning, events.ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning)
|
||||
return ErrImageNeverPull, msg
|
||||
}
|
||||
}
|
||||
@ -102,12 +103,12 @@ func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pul
|
||||
backOffKey := fmt.Sprintf("%s_%s", pod.UID, container.Image)
|
||||
if puller.backOff.IsInBackOffSinceUpdate(backOffKey, puller.backOff.Clock.Now()) {
|
||||
msg := fmt.Sprintf("Back-off pulling image %q", container.Image)
|
||||
puller.logIt(ref, api.EventTypeNormal, BackOffPullImage, logPrefix, msg, glog.Info)
|
||||
puller.logIt(ref, api.EventTypeNormal, events.BackOffPullImage, logPrefix, msg, glog.Info)
|
||||
return ErrImagePullBackOff, msg
|
||||
}
|
||||
puller.logIt(ref, api.EventTypeNormal, PullingImage, logPrefix, fmt.Sprintf("pulling image %q", container.Image), glog.Info)
|
||||
puller.logIt(ref, api.EventTypeNormal, events.PullingImage, logPrefix, fmt.Sprintf("pulling image %q", container.Image), glog.Info)
|
||||
if err := puller.runtime.PullImage(spec, pullSecrets); err != nil {
|
||||
puller.logIt(ref, api.EventTypeWarning, FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning)
|
||||
puller.logIt(ref, api.EventTypeWarning, events.FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning)
|
||||
puller.backOff.Next(backOffKey, puller.backOff.Clock.Now())
|
||||
if err == RegistryUnavailable {
|
||||
msg := fmt.Sprintf("image pull failed for %s because the registry is unavailable.", container.Image)
|
||||
@ -116,7 +117,7 @@ func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pul
|
||||
return ErrImagePull, err.Error()
|
||||
}
|
||||
}
|
||||
puller.logIt(ref, api.EventTypeNormal, PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info)
|
||||
puller.logIt(ref, api.EventTypeNormal, events.PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info)
|
||||
puller.backOff.DeleteEntry(backOffKey)
|
||||
puller.backOff.GC()
|
||||
return nil, ""
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
)
|
||||
@ -85,18 +86,18 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont
|
||||
present, err := puller.runtime.IsImagePresent(spec)
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err)
|
||||
puller.logIt(ref, api.EventTypeWarning, FailedToInspectImage, logPrefix, msg, glog.Warning)
|
||||
puller.logIt(ref, api.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, glog.Warning)
|
||||
return ErrImageInspect, msg
|
||||
}
|
||||
|
||||
if !shouldPullImage(container, present) {
|
||||
if present {
|
||||
msg := fmt.Sprintf("Container image %q already present on machine", container.Image)
|
||||
puller.logIt(ref, api.EventTypeNormal, PulledImage, logPrefix, msg, glog.Info)
|
||||
puller.logIt(ref, api.EventTypeNormal, events.PulledImage, logPrefix, msg, glog.Info)
|
||||
return nil, ""
|
||||
} else {
|
||||
msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image)
|
||||
puller.logIt(ref, api.EventTypeWarning, ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning)
|
||||
puller.logIt(ref, api.EventTypeWarning, events.ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning)
|
||||
return ErrImageNeverPull, msg
|
||||
}
|
||||
}
|
||||
@ -104,7 +105,7 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont
|
||||
backOffKey := fmt.Sprintf("%s_%s", pod.Name, container.Image)
|
||||
if puller.backOff.IsInBackOffSinceUpdate(backOffKey, puller.backOff.Clock.Now()) {
|
||||
msg := fmt.Sprintf("Back-off pulling image %q", container.Image)
|
||||
puller.logIt(ref, api.EventTypeNormal, BackOffPullImage, logPrefix, msg, glog.Info)
|
||||
puller.logIt(ref, api.EventTypeNormal, events.BackOffPullImage, logPrefix, msg, glog.Info)
|
||||
return ErrImagePullBackOff, msg
|
||||
}
|
||||
|
||||
@ -119,7 +120,7 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont
|
||||
returnChan: returnChan,
|
||||
}
|
||||
if err = <-returnChan; err != nil {
|
||||
puller.logIt(ref, api.EventTypeWarning, FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning)
|
||||
puller.logIt(ref, api.EventTypeWarning, events.FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning)
|
||||
puller.backOff.Next(backOffKey, puller.backOff.Clock.Now())
|
||||
if err == RegistryUnavailable {
|
||||
msg := fmt.Sprintf("image pull failed for %s because the registry is unavailable.", container.Image)
|
||||
@ -128,14 +129,14 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont
|
||||
return ErrImagePull, err.Error()
|
||||
}
|
||||
}
|
||||
puller.logIt(ref, api.EventTypeNormal, PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info)
|
||||
puller.logIt(ref, api.EventTypeNormal, events.PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info)
|
||||
puller.backOff.GC()
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
func (puller *serializedImagePuller) pullImages() {
|
||||
for pullRequest := range puller.pullRequests {
|
||||
puller.logIt(pullRequest.ref, api.EventTypeNormal, PullingImage, pullRequest.logPrefix, fmt.Sprintf("pulling image %q", pullRequest.container.Image), glog.Info)
|
||||
puller.logIt(pullRequest.ref, api.EventTypeNormal, events.PullingImage, pullRequest.logPrefix, fmt.Sprintf("pulling image %q", pullRequest.container.Image), glog.Info)
|
||||
pullRequest.returnChan <- puller.runtime.PullImage(pullRequest.spec, pullRequest.pullSecrets)
|
||||
}
|
||||
}
|
||||
|
@ -45,6 +45,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
@ -695,20 +696,20 @@ func (dm *DockerManager) runContainer(
|
||||
securityContextProvider.ModifyHostConfig(pod, container, dockerOpts.HostConfig)
|
||||
createResp, err := dm.client.CreateContainer(dockerOpts)
|
||||
if err != nil {
|
||||
dm.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedToCreateContainer, "Failed to create docker container with error: %v", err)
|
||||
dm.recorder.Eventf(ref, api.EventTypeWarning, events.FailedToCreateContainer, "Failed to create docker container with error: %v", err)
|
||||
return kubecontainer.ContainerID{}, err
|
||||
}
|
||||
if len(createResp.Warnings) != 0 {
|
||||
glog.V(2).Infof("Container %q of pod %q created with warnings: %v", container.Name, format.Pod(pod), createResp.Warnings)
|
||||
}
|
||||
dm.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.CreatedContainer, "Created container with docker id %v", utilstrings.ShortenString(createResp.ID, 12))
|
||||
dm.recorder.Eventf(ref, api.EventTypeNormal, events.CreatedContainer, "Created container with docker id %v", utilstrings.ShortenString(createResp.ID, 12))
|
||||
|
||||
if err = dm.client.StartContainer(createResp.ID); err != nil {
|
||||
dm.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedToStartContainer,
|
||||
dm.recorder.Eventf(ref, api.EventTypeWarning, events.FailedToStartContainer,
|
||||
"Failed to start container with docker id %v with error: %v", utilstrings.ShortenString(createResp.ID, 12), err)
|
||||
return kubecontainer.ContainerID{}, err
|
||||
}
|
||||
dm.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.StartedContainer, "Started container with docker id %v", utilstrings.ShortenString(createResp.ID, 12))
|
||||
dm.recorder.Eventf(ref, api.EventTypeNormal, events.StartedContainer, "Started container with docker id %v", utilstrings.ShortenString(createResp.ID, 12))
|
||||
|
||||
return kubecontainer.DockerID(createResp.ID).ContainerID(), nil
|
||||
}
|
||||
@ -1377,7 +1378,7 @@ func (dm *DockerManager) killContainer(containerID kubecontainer.ContainerID, co
|
||||
defer utilruntime.HandleCrash()
|
||||
if msg, err := dm.runner.Run(containerID, pod, container, container.Lifecycle.PreStop); err != nil {
|
||||
glog.Errorf("preStop hook for container %q failed: %v", name, err)
|
||||
dm.generateFailedContainerEvent(containerID, pod.Name, kubecontainer.FailedPreStopHook, msg)
|
||||
dm.generateFailedContainerEvent(containerID, pod.Name, events.FailedPreStopHook, msg)
|
||||
}
|
||||
}()
|
||||
select {
|
||||
@ -1417,7 +1418,7 @@ func (dm *DockerManager) killContainer(containerID kubecontainer.ContainerID, co
|
||||
if reason != "" {
|
||||
message = fmt.Sprint(message, ": ", reason)
|
||||
}
|
||||
dm.recorder.Event(ref, api.EventTypeNormal, kubecontainer.KillingContainer, message)
|
||||
dm.recorder.Event(ref, api.EventTypeNormal, events.KillingContainer, message)
|
||||
dm.containerRefManager.ClearRef(containerID)
|
||||
}
|
||||
return err
|
||||
@ -1551,7 +1552,7 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe
|
||||
msg, handlerErr := dm.runner.Run(id, pod, container, container.Lifecycle.PostStart)
|
||||
if handlerErr != nil {
|
||||
err := fmt.Errorf("PostStart handler: %v", handlerErr)
|
||||
dm.generateFailedContainerEvent(id, pod.Name, kubecontainer.FailedPostStartHook, msg)
|
||||
dm.generateFailedContainerEvent(id, pod.Name, events.FailedPostStartHook, msg)
|
||||
dm.KillContainerInPod(id, container, pod, err.Error(), nil)
|
||||
return kubecontainer.ContainerID{}, err
|
||||
}
|
||||
@ -2325,7 +2326,7 @@ func (dm *DockerManager) doBackOff(pod *api.Pod, container *api.Container, podSt
|
||||
stableName, _, _ := BuildDockerName(dockerName, container)
|
||||
if backOff.IsInBackOffSince(stableName, ts) {
|
||||
if ref, err := kubecontainer.GenerateContainerRef(pod, container); err == nil {
|
||||
dm.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.BackOffStartContainer, "Back-off restarting failed docker container")
|
||||
dm.recorder.Eventf(ref, api.EventTypeWarning, events.BackOffStartContainer, "Back-off restarting failed docker container")
|
||||
}
|
||||
err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(stableName), container.Name, format.Pod(pod))
|
||||
glog.Infof("%s", err.Error())
|
||||
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package container
|
||||
package events
|
||||
|
||||
const (
|
||||
// Container event reason list
|
@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
@ -228,7 +229,7 @@ func (im *realImageManager) GarbageCollect() error {
|
||||
// Check valid capacity.
|
||||
if capacity == 0 {
|
||||
err := fmt.Errorf("invalid capacity %d on device %q at mount point %q", capacity, fsInfo.Device, fsInfo.Mountpoint)
|
||||
im.recorder.Eventf(im.nodeRef, api.EventTypeWarning, container.InvalidDiskCapacity, err.Error())
|
||||
im.recorder.Eventf(im.nodeRef, api.EventTypeWarning, events.InvalidDiskCapacity, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
@ -244,7 +245,7 @@ func (im *realImageManager) GarbageCollect() error {
|
||||
|
||||
if freed < amountToFree {
|
||||
err := fmt.Errorf("failed to garbage collect required amount of images. Wanted to free %d, but freed %d", amountToFree, freed)
|
||||
im.recorder.Eventf(im.nodeRef, api.EventTypeWarning, container.FreeDiskSpaceFailed, err.Error())
|
||||
im.recorder.Eventf(im.nodeRef, api.EventTypeWarning, events.FreeDiskSpaceFailed, err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -55,6 +55,7 @@ import (
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockertools"
|
||||
"k8s.io/kubernetes/pkg/kubelet/envvars"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/eviction"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
@ -953,7 +954,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
|
||||
glog.Warning("No api server defined - no node status update will be sent.")
|
||||
}
|
||||
if err := kl.initializeModules(); err != nil {
|
||||
kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, kubecontainer.KubeletSetupFailed, err.Error())
|
||||
kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, events.KubeletSetupFailed, err.Error())
|
||||
glog.Error(err)
|
||||
kl.runtimeState.setInitError(err)
|
||||
}
|
||||
@ -1820,7 +1821,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
|
||||
return err
|
||||
}
|
||||
if err := kl.volumeManager.WaitForAttachAndMount(defaultedPod); err != nil {
|
||||
kl.recorder.Eventf(pod, api.EventTypeWarning, kubecontainer.FailedMountVolume, "Unable to mount volumes for pod %q: %v", format.Pod(pod), err)
|
||||
kl.recorder.Eventf(pod, api.EventTypeWarning, events.FailedMountVolume, "Unable to mount volumes for pod %q: %v", format.Pod(pod), err)
|
||||
glog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", format.Pod(pod), err)
|
||||
return err
|
||||
}
|
||||
@ -1851,13 +1852,13 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
|
||||
}
|
||||
if egress != nil || ingress != nil {
|
||||
if podUsesHostNetwork(pod) {
|
||||
kl.recorder.Event(pod, api.EventTypeWarning, kubecontainer.HostNetworkNotSupported, "Bandwidth shaping is not currently supported on the host network")
|
||||
kl.recorder.Event(pod, api.EventTypeWarning, events.HostNetworkNotSupported, "Bandwidth shaping is not currently supported on the host network")
|
||||
} else if kl.shaper != nil {
|
||||
if len(apiPodStatus.PodIP) > 0 {
|
||||
err = kl.shaper.ReconcileCIDR(fmt.Sprintf("%s/32", apiPodStatus.PodIP), egress, ingress)
|
||||
}
|
||||
} else {
|
||||
kl.recorder.Event(pod, api.EventTypeWarning, kubecontainer.UndefinedShaper, "Pod requests bandwidth shaping, but the shaper is undefined")
|
||||
kl.recorder.Event(pod, api.EventTypeWarning, events.UndefinedShaper, "Pod requests bandwidth shaping, but the shaper is undefined")
|
||||
}
|
||||
}
|
||||
|
||||
@ -2777,7 +2778,7 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *api.Node) {
|
||||
node.Status.NodeInfo.BootID != info.BootID {
|
||||
// TODO: This requires a transaction, either both node status is updated
|
||||
// and event is recorded or neither should happen, see issue #6055.
|
||||
kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, kubecontainer.NodeRebooted,
|
||||
kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, events.NodeRebooted,
|
||||
"Node %s has been rebooted, boot id: %s", kl.nodeName, info.BootID)
|
||||
}
|
||||
node.Status.NodeInfo.BootID = info.BootID
|
||||
@ -2928,9 +2929,9 @@ func (kl *Kubelet) setNodeReadyCondition(node *api.Node) {
|
||||
}
|
||||
if needToRecordEvent {
|
||||
if newNodeReadyCondition.Status == api.ConditionTrue {
|
||||
kl.recordNodeStatusEvent(api.EventTypeNormal, kubecontainer.NodeReady)
|
||||
kl.recordNodeStatusEvent(api.EventTypeNormal, events.NodeReady)
|
||||
} else {
|
||||
kl.recordNodeStatusEvent(api.EventTypeNormal, kubecontainer.NodeNotReady)
|
||||
kl.recordNodeStatusEvent(api.EventTypeNormal, events.NodeNotReady)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3064,9 +3065,9 @@ var oldNodeUnschedulable bool
|
||||
func (kl *Kubelet) recordNodeSchedulableEvent(node *api.Node) {
|
||||
if oldNodeUnschedulable != node.Spec.Unschedulable {
|
||||
if node.Spec.Unschedulable {
|
||||
kl.recordNodeStatusEvent(api.EventTypeNormal, kubecontainer.NodeNotSchedulable)
|
||||
kl.recordNodeStatusEvent(api.EventTypeNormal, events.NodeNotSchedulable)
|
||||
} else {
|
||||
kl.recordNodeStatusEvent(api.EventTypeNormal, kubecontainer.NodeSchedulable)
|
||||
kl.recordNodeStatusEvent(api.EventTypeNormal, events.NodeSchedulable)
|
||||
}
|
||||
oldNodeUnschedulable = node.Spec.Unschedulable
|
||||
}
|
||||
@ -3564,7 +3565,7 @@ func (kl *Kubelet) PortForward(podFullName string, podUID types.UID, port uint16
|
||||
// BirthCry sends an event that the kubelet has started up.
|
||||
func (kl *Kubelet) BirthCry() {
|
||||
// Make an event that kubelet restarted.
|
||||
kl.recorder.Eventf(kl.nodeRef, api.EventTypeNormal, kubecontainer.StartingKubelet, "Starting kubelet.")
|
||||
kl.recorder.Eventf(kl.nodeRef, api.EventTypeNormal, events.StartingKubelet, "Starting kubelet.")
|
||||
}
|
||||
|
||||
// StreamingConnectionIdleTimeout returns the timeout for streaming connections to the HTTP server.
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/eviction"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/queue"
|
||||
@ -181,7 +182,7 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan UpdatePodOptions) {
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("Error syncing pod %s, skipping: %v", update.Pod.UID, err)
|
||||
p.recorder.Eventf(update.Pod, api.EventTypeWarning, kubecontainer.FailedSync, "Error syncing pod, skipping: %v", err)
|
||||
p.recorder.Eventf(update.Pod, api.EventTypeWarning, events.FailedSync, "Error syncing pod, skipping: %v", err)
|
||||
}
|
||||
p.wrapUp(update.Pod.UID, err)
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/ioutils"
|
||||
@ -100,12 +101,12 @@ func (pb *prober) probe(probeType probeType, pod *api.Pod, status api.PodStatus,
|
||||
if err != nil {
|
||||
glog.V(1).Infof("%s probe for %q errored: %v", probeType, ctrName, err)
|
||||
if hasRef {
|
||||
pb.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.ContainerUnhealthy, "%s probe errored: %v", probeType, err)
|
||||
pb.recorder.Eventf(ref, api.EventTypeWarning, events.ContainerUnhealthy, "%s probe errored: %v", probeType, err)
|
||||
}
|
||||
} else { // result != probe.Success
|
||||
glog.V(1).Infof("%s probe for %q failed (%v): %s", probeType, ctrName, result, output)
|
||||
if hasRef {
|
||||
pb.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.ContainerUnhealthy, "%s probe failed: %s", probeType, output)
|
||||
pb.recorder.Eventf(ref, api.EventTypeWarning, events.ContainerUnhealthy, "%s probe failed: %s", probeType, output)
|
||||
}
|
||||
}
|
||||
return results.Failure, err
|
||||
|
@ -45,6 +45,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/credentialprovider"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/leaky"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
@ -1222,13 +1223,13 @@ func (r *Runtime) generateEvents(runtimePod *kubecontainer.Pod, reason string, f
|
||||
uuid := utilstrings.ShortenString(id.uuid, 8)
|
||||
switch reason {
|
||||
case "Created":
|
||||
r.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.CreatedContainer, "Created with rkt id %v", uuid)
|
||||
r.recorder.Eventf(ref, api.EventTypeNormal, events.CreatedContainer, "Created with rkt id %v", uuid)
|
||||
case "Started":
|
||||
r.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.StartedContainer, "Started with rkt id %v", uuid)
|
||||
r.recorder.Eventf(ref, api.EventTypeNormal, events.StartedContainer, "Started with rkt id %v", uuid)
|
||||
case "Failed":
|
||||
r.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedToStartContainer, "Failed to start with rkt id %v with error %v", uuid, failure)
|
||||
r.recorder.Eventf(ref, api.EventTypeWarning, events.FailedToStartContainer, "Failed to start with rkt id %v with error %v", uuid, failure)
|
||||
case "Killing":
|
||||
r.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.KillingContainer, "Killing with rkt id %v", uuid)
|
||||
r.recorder.Eventf(ref, api.EventTypeNormal, events.KillingContainer, "Killing with rkt id %v", uuid)
|
||||
default:
|
||||
glog.Errorf("rkt: Unexpected event %q", reason)
|
||||
}
|
||||
@ -1314,7 +1315,7 @@ func (r *Runtime) RunPod(pod *api.Pod, pullSecrets []api.Secret) error {
|
||||
continue
|
||||
}
|
||||
if prepareErr != nil {
|
||||
r.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedToCreateContainer, "Failed to create rkt container with error: %v", prepareErr)
|
||||
r.recorder.Eventf(ref, api.EventTypeWarning, events.FailedToCreateContainer, "Failed to create rkt container with error: %v", prepareErr)
|
||||
continue
|
||||
}
|
||||
containerID := runtimePod.Containers[i].ID
|
||||
@ -1369,7 +1370,7 @@ func (r *Runtime) runPreStopHook(containerID kubecontainer.ContainerID, pod *api
|
||||
if !ok {
|
||||
glog.Warningf("No ref for container %q", containerID)
|
||||
} else {
|
||||
r.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedPreStopHook, msg)
|
||||
r.recorder.Eventf(ref, api.EventTypeWarning, events.FailedPreStopHook, msg)
|
||||
}
|
||||
}
|
||||
return err
|
||||
@ -1411,7 +1412,7 @@ func (r *Runtime) runPostStartHook(containerID kubecontainer.ContainerID, pod *a
|
||||
if !ok {
|
||||
glog.Warningf("No ref for container %q", containerID)
|
||||
} else {
|
||||
r.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedPostStartHook, msg)
|
||||
r.recorder.Eventf(ref, api.EventTypeWarning, events.FailedPostStartHook, msg)
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
Loading…
Reference in New Issue
Block a user