mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-05 02:09:56 +00:00
Merge pull request #15464 from jiangyaoguo/bubble-up-reason-when-killing-pod
Auto commit by PR queue bot
This commit is contained in:
commit
8761ad3ec1
@ -327,7 +327,7 @@ func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventReco
|
|||||||
name := bestPodIdentString(pod)
|
name := bestPodIdentString(pod)
|
||||||
err := utilerrors.NewAggregate(errlist)
|
err := utilerrors.NewAggregate(errlist)
|
||||||
glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err)
|
glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err)
|
||||||
recorder.Eventf(pod, "FailedValidation", "Error validating pod %s from %s, ignoring: %v", name, source, err)
|
recorder.Eventf(pod, kubecontainer.FailedValidation, "Error validating pod %s from %s, ignoring: %v", name, source, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
filtered = append(filtered, pod)
|
filtered = append(filtered, pod)
|
||||||
|
65
pkg/kubelet/container/event.go
Normal file
65
pkg/kubelet/container/event.go
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package container
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Container event reason list
|
||||||
|
CreatedContainer = "Created"
|
||||||
|
StartedContainer = "Started"
|
||||||
|
FailedToCreateContainer = "Failed"
|
||||||
|
FailedToStartContainer = "Failed"
|
||||||
|
KillingContainer = "Killing"
|
||||||
|
BackOffStartContainer = "BackOff"
|
||||||
|
|
||||||
|
// Image event reason list
|
||||||
|
PullingImage = "Pulling"
|
||||||
|
PulledImage = "Pulled"
|
||||||
|
FailedToPullImage = "Failed"
|
||||||
|
FailedToInspectImage = "InspectFailed"
|
||||||
|
ErrImageNeverPullPolicy = "ErrImageNeverPull"
|
||||||
|
BackOffPullImage = "BackOff"
|
||||||
|
|
||||||
|
// kubelet event reason list
|
||||||
|
NodeReady = "NodeReady"
|
||||||
|
NodeNotReady = "NodeReady"
|
||||||
|
NodeSchedulable = "NodeSchedulable"
|
||||||
|
NodeNotSchedulable = "NodeNotSchedulable"
|
||||||
|
StartingKubelet = "Starting"
|
||||||
|
KubeletSetupFailed = "KubeletSetupFailed"
|
||||||
|
FailedMountVolume = "FailedMount"
|
||||||
|
HostPortConflict = "HostPortConflict"
|
||||||
|
NodeSelectorMismatching = "NodeSelectorMismatching"
|
||||||
|
InsufficientFreeCPU = "InsufficientFreeCPU"
|
||||||
|
InsufficientFreeMemory = "InsufficientFreeMemory"
|
||||||
|
OutOfDisk = "OutOfDisk"
|
||||||
|
HostNetworkNotSupported = "HostNetworkNotSupported"
|
||||||
|
UndefinedShaper = "NilShaper"
|
||||||
|
NodeRebooted = "Rebooted"
|
||||||
|
|
||||||
|
// Image manager event reason list
|
||||||
|
InvalidDiskCapacity = "InvalidDiskCapacity"
|
||||||
|
FreeDiskSpaceFailed = "FreeDiskSpaceFailed"
|
||||||
|
|
||||||
|
// Probe event reason list
|
||||||
|
ContainerUnhealthy = "Unhealthy"
|
||||||
|
|
||||||
|
// Pod worker event reason list
|
||||||
|
FailedSync = "FailedSync"
|
||||||
|
|
||||||
|
// Config event reason list
|
||||||
|
FailedValidation = "FailedValidation"
|
||||||
|
)
|
@ -83,7 +83,7 @@ func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pul
|
|||||||
present, err := puller.runtime.IsImagePresent(spec)
|
present, err := puller.runtime.IsImagePresent(spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err)
|
msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err)
|
||||||
puller.logIt(ref, "Failed", logPrefix, msg, glog.Warning)
|
puller.logIt(ref, FailedToInspectImage, logPrefix, msg, glog.Warning)
|
||||||
return ErrImageInspect, msg
|
return ErrImageInspect, msg
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -94,7 +94,7 @@ func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pul
|
|||||||
return nil, ""
|
return nil, ""
|
||||||
} else {
|
} else {
|
||||||
msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image)
|
msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image)
|
||||||
puller.logIt(ref, "ErrImageNeverPull", logPrefix, msg, glog.Warning)
|
puller.logIt(ref, ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning)
|
||||||
return ErrImageNeverPull, msg
|
return ErrImageNeverPull, msg
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -102,7 +102,7 @@ func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pul
|
|||||||
backOffKey := fmt.Sprintf("%s_%s", pod.Name, container.Image)
|
backOffKey := fmt.Sprintf("%s_%s", pod.Name, container.Image)
|
||||||
if puller.backOff.IsInBackOffSinceUpdate(backOffKey, puller.backOff.Clock.Now()) {
|
if puller.backOff.IsInBackOffSinceUpdate(backOffKey, puller.backOff.Clock.Now()) {
|
||||||
msg := fmt.Sprintf("Back-off pulling image %q", container.Image)
|
msg := fmt.Sprintf("Back-off pulling image %q", container.Image)
|
||||||
puller.logIt(ref, "Back-off", logPrefix, msg, glog.Info)
|
puller.logIt(ref, BackOffPullImage, logPrefix, msg, glog.Info)
|
||||||
return ErrImagePullBackOff, msg
|
return ErrImagePullBackOff, msg
|
||||||
}
|
}
|
||||||
puller.logIt(ref, "Pulling", logPrefix, fmt.Sprintf("pulling image %q", container.Image), glog.Info)
|
puller.logIt(ref, "Pulling", logPrefix, fmt.Sprintf("pulling image %q", container.Image), glog.Info)
|
||||||
|
@ -84,18 +84,18 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont
|
|||||||
present, err := puller.runtime.IsImagePresent(spec)
|
present, err := puller.runtime.IsImagePresent(spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err)
|
msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err)
|
||||||
puller.logIt(ref, "Failed", logPrefix, msg, glog.Warning)
|
puller.logIt(ref, FailedToInspectImage, logPrefix, msg, glog.Warning)
|
||||||
return ErrImageInspect, msg
|
return ErrImageInspect, msg
|
||||||
}
|
}
|
||||||
|
|
||||||
if !shouldPullImage(container, present) {
|
if !shouldPullImage(container, present) {
|
||||||
if present {
|
if present {
|
||||||
msg := fmt.Sprintf("Container image %q already present on machine", container.Image)
|
msg := fmt.Sprintf("Container image %q already present on machine", container.Image)
|
||||||
puller.logIt(ref, "Pulled", logPrefix, msg, glog.Info)
|
puller.logIt(ref, PulledImage, logPrefix, msg, glog.Info)
|
||||||
return nil, ""
|
return nil, ""
|
||||||
} else {
|
} else {
|
||||||
msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image)
|
msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image)
|
||||||
puller.logIt(ref, "ErrImageNeverPull", logPrefix, msg, glog.Warning)
|
puller.logIt(ref, ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning)
|
||||||
return ErrImageNeverPull, msg
|
return ErrImageNeverPull, msg
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -103,7 +103,7 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont
|
|||||||
backOffKey := fmt.Sprintf("%s_%s", pod.Name, container.Image)
|
backOffKey := fmt.Sprintf("%s_%s", pod.Name, container.Image)
|
||||||
if puller.backOff.IsInBackOffSinceUpdate(backOffKey, puller.backOff.Clock.Now()) {
|
if puller.backOff.IsInBackOffSinceUpdate(backOffKey, puller.backOff.Clock.Now()) {
|
||||||
msg := fmt.Sprintf("Back-off pulling image %q", container.Image)
|
msg := fmt.Sprintf("Back-off pulling image %q", container.Image)
|
||||||
puller.logIt(ref, "Back-off", logPrefix, msg, glog.Info)
|
puller.logIt(ref, BackOffPullImage, logPrefix, msg, glog.Info)
|
||||||
return ErrImagePullBackOff, msg
|
return ErrImagePullBackOff, msg
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -118,7 +118,7 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont
|
|||||||
returnChan: returnChan,
|
returnChan: returnChan,
|
||||||
}
|
}
|
||||||
if err = <-returnChan; err != nil {
|
if err = <-returnChan; err != nil {
|
||||||
puller.logIt(ref, "Failed", logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning)
|
puller.logIt(ref, FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning)
|
||||||
puller.backOff.Next(backOffKey, puller.backOff.Clock.Now())
|
puller.backOff.Next(backOffKey, puller.backOff.Clock.Now())
|
||||||
if err == RegistryUnavailable {
|
if err == RegistryUnavailable {
|
||||||
msg := fmt.Sprintf("image pull failed for %s because the registry is temporarily unavailable.", container.Image)
|
msg := fmt.Sprintf("image pull failed for %s because the registry is temporarily unavailable.", container.Image)
|
||||||
@ -127,14 +127,14 @@ func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Cont
|
|||||||
return ErrImagePull, err.Error()
|
return ErrImagePull, err.Error()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
puller.logIt(ref, "Pulled", logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info)
|
puller.logIt(ref, PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info)
|
||||||
puller.backOff.GC()
|
puller.backOff.GC()
|
||||||
return nil, ""
|
return nil, ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (puller *serializedImagePuller) pullImages() {
|
func (puller *serializedImagePuller) pullImages() {
|
||||||
for pullRequest := range puller.pullRequests {
|
for pullRequest := range puller.pullRequests {
|
||||||
puller.logIt(pullRequest.ref, "Pulling", pullRequest.logPrefix, fmt.Sprintf("pulling image %q", pullRequest.container.Image), glog.Info)
|
puller.logIt(pullRequest.ref, PullingImage, pullRequest.logPrefix, fmt.Sprintf("pulling image %q", pullRequest.container.Image), glog.Info)
|
||||||
pullRequest.returnChan <- puller.runtime.PullImage(pullRequest.spec, pullRequest.pullSecrets)
|
pullRequest.returnChan <- puller.runtime.PullImage(pullRequest.spec, pullRequest.pullSecrets)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -766,11 +766,11 @@ func (dm *DockerManager) runContainer(
|
|||||||
securityContextProvider.ModifyContainerConfig(pod, container, dockerOpts.Config)
|
securityContextProvider.ModifyContainerConfig(pod, container, dockerOpts.Config)
|
||||||
dockerContainer, err := dm.client.CreateContainer(dockerOpts)
|
dockerContainer, err := dm.client.CreateContainer(dockerOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dm.recorder.Eventf(ref, "Failed", "Failed to create docker container with error: %v", err)
|
dm.recorder.Eventf(ref, kubecontainer.FailedToCreateContainer, "Failed to create docker container with error: %v", err)
|
||||||
return kubecontainer.ContainerID{}, err
|
return kubecontainer.ContainerID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
dm.recorder.Eventf(ref, "Created", "Created with docker id %v", util.ShortenString(dockerContainer.ID, 12))
|
dm.recorder.Eventf(ref, kubecontainer.CreatedContainer, "Created container with docker id %v", util.ShortenString(dockerContainer.ID, 12))
|
||||||
|
|
||||||
podHasSELinuxLabel := pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.SELinuxOptions != nil
|
podHasSELinuxLabel := pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.SELinuxOptions != nil
|
||||||
binds := makeMountBindings(opts.Mounts, podHasSELinuxLabel)
|
binds := makeMountBindings(opts.Mounts, podHasSELinuxLabel)
|
||||||
@ -826,11 +826,12 @@ func (dm *DockerManager) runContainer(
|
|||||||
securityContextProvider.ModifyHostConfig(pod, container, hc)
|
securityContextProvider.ModifyHostConfig(pod, container, hc)
|
||||||
|
|
||||||
if err = dm.client.StartContainer(dockerContainer.ID, hc); err != nil {
|
if err = dm.client.StartContainer(dockerContainer.ID, hc); err != nil {
|
||||||
dm.recorder.Eventf(ref, "Failed",
|
dm.recorder.Eventf(ref, kubecontainer.FailedToStartContainer,
|
||||||
"Failed to start with docker id %v with error: %v", util.ShortenString(dockerContainer.ID, 12), err)
|
"Failed to start container with docker id %v with error: %v", util.ShortenString(dockerContainer.ID, 12), err)
|
||||||
return kubecontainer.ContainerID{}, err
|
return kubecontainer.ContainerID{}, err
|
||||||
}
|
}
|
||||||
dm.recorder.Eventf(ref, "Started", "Started with docker id %v", util.ShortenString(dockerContainer.ID, 12))
|
dm.recorder.Eventf(ref, kubecontainer.StartedContainer, "Started container with docker id %v", util.ShortenString(dockerContainer.ID, 12))
|
||||||
|
|
||||||
return kubetypes.DockerID(dockerContainer.ID).ContainerID(), nil
|
return kubetypes.DockerID(dockerContainer.ID).ContainerID(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1314,7 +1315,7 @@ func (dm *DockerManager) KillPod(pod *api.Pod, runningPod kubecontainer.Pod) err
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err := dm.KillContainerInPod(container.ID, containerSpec, pod)
|
err := dm.KillContainerInPod(container.ID, containerSpec, pod, "Need to kill pod.")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Failed to delete container: %v; Skipping pod %q", err, runningPod.ID)
|
glog.Errorf("Failed to delete container: %v; Skipping pod %q", err, runningPod.ID)
|
||||||
errs <- err
|
errs <- err
|
||||||
@ -1327,7 +1328,7 @@ func (dm *DockerManager) KillPod(pod *api.Pod, runningPod kubecontainer.Pod) err
|
|||||||
glog.Errorf("Failed tearing down the infra container: %v", err)
|
glog.Errorf("Failed tearing down the infra container: %v", err)
|
||||||
errs <- err
|
errs <- err
|
||||||
}
|
}
|
||||||
if err := dm.KillContainerInPod(networkContainer.ID, networkSpec, pod); err != nil {
|
if err := dm.KillContainerInPod(networkContainer.ID, networkSpec, pod, "Need to kill pod."); err != nil {
|
||||||
glog.Errorf("Failed to delete container: %v; Skipping pod %q", err, runningPod.ID)
|
glog.Errorf("Failed to delete container: %v; Skipping pod %q", err, runningPod.ID)
|
||||||
errs <- err
|
errs <- err
|
||||||
}
|
}
|
||||||
@ -1345,7 +1346,7 @@ func (dm *DockerManager) KillPod(pod *api.Pod, runningPod kubecontainer.Pod) err
|
|||||||
|
|
||||||
// KillContainerInPod kills a container in the pod. It must be passed either a container ID or a container and pod,
|
// KillContainerInPod kills a container in the pod. It must be passed either a container ID or a container and pod,
|
||||||
// and will attempt to lookup the other information if missing.
|
// and will attempt to lookup the other information if missing.
|
||||||
func (dm *DockerManager) KillContainerInPod(containerID kubecontainer.ContainerID, container *api.Container, pod *api.Pod) error {
|
func (dm *DockerManager) KillContainerInPod(containerID kubecontainer.ContainerID, container *api.Container, pod *api.Pod, message string) error {
|
||||||
switch {
|
switch {
|
||||||
case containerID.IsEmpty():
|
case containerID.IsEmpty():
|
||||||
// Locate the container.
|
// Locate the container.
|
||||||
@ -1377,12 +1378,12 @@ func (dm *DockerManager) KillContainerInPod(containerID kubecontainer.ContainerI
|
|||||||
pod = storedPod
|
pod = storedPod
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return dm.killContainer(containerID, container, pod)
|
return dm.killContainer(containerID, container, pod, message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// killContainer accepts a containerID and an optional container or pod containing shutdown policies. Invoke
|
// killContainer accepts a containerID and an optional container or pod containing shutdown policies. Invoke
|
||||||
// KillContainerInPod if information must be retrieved first.
|
// KillContainerInPod if information must be retrieved first.
|
||||||
func (dm *DockerManager) killContainer(containerID kubecontainer.ContainerID, container *api.Container, pod *api.Pod) error {
|
func (dm *DockerManager) killContainer(containerID kubecontainer.ContainerID, container *api.Container, pod *api.Pod, reason string) error {
|
||||||
ID := containerID.ID
|
ID := containerID.ID
|
||||||
name := ID
|
name := ID
|
||||||
if container != nil {
|
if container != nil {
|
||||||
@ -1441,8 +1442,11 @@ func (dm *DockerManager) killContainer(containerID kubecontainer.ContainerID, co
|
|||||||
if !ok {
|
if !ok {
|
||||||
glog.Warningf("No ref for pod '%q'", name)
|
glog.Warningf("No ref for pod '%q'", name)
|
||||||
} else {
|
} else {
|
||||||
// TODO: pass reason down here, and state, or move this call up the stack.
|
message := fmt.Sprintf("Killing container with docker id %v", util.ShortenString(ID, 12))
|
||||||
dm.recorder.Eventf(ref, "Killing", "Killing with docker id %v", util.ShortenString(ID, 12))
|
if reason != "" {
|
||||||
|
message = fmt.Sprint(message, ": ", reason)
|
||||||
|
}
|
||||||
|
dm.recorder.Event(ref, kubecontainer.KillingContainer, message)
|
||||||
dm.containerRefManager.ClearRef(containerID)
|
dm.containerRefManager.ClearRef(containerID)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
@ -1525,8 +1529,9 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe
|
|||||||
if container.Lifecycle != nil && container.Lifecycle.PostStart != nil {
|
if container.Lifecycle != nil && container.Lifecycle.PostStart != nil {
|
||||||
handlerErr := dm.runner.Run(id, pod, container, container.Lifecycle.PostStart)
|
handlerErr := dm.runner.Run(id, pod, container, container.Lifecycle.PostStart)
|
||||||
if handlerErr != nil {
|
if handlerErr != nil {
|
||||||
dm.KillContainerInPod(id, container, pod)
|
err := fmt.Errorf("failed to call event handler: %v", handlerErr)
|
||||||
return kubecontainer.ContainerID{}, fmt.Errorf("failed to call event handler: %v", handlerErr)
|
dm.KillContainerInPod(id, container, pod, err.Error())
|
||||||
|
return kubecontainer.ContainerID{}, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1663,18 +1668,17 @@ func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubetypes.Docker
|
|||||||
// - startInfraContainer is true if new Infra Containers have to be started and old one (if running) killed.
|
// - startInfraContainer is true if new Infra Containers have to be started and old one (if running) killed.
|
||||||
// Additionally if it is true then containersToKeep have to be empty
|
// Additionally if it is true then containersToKeep have to be empty
|
||||||
// - infraContainerId have to be set if and only if startInfraContainer is false. It stores dockerID of running Infra Container
|
// - infraContainerId have to be set if and only if startInfraContainer is false. It stores dockerID of running Infra Container
|
||||||
// - containersToStart keeps indices of Specs of containers that have to be started.
|
// - containersToStart keeps indices of Specs of containers that have to be started and reasons why containers will be started.
|
||||||
// - containersToKeep stores mapping from dockerIDs of running containers to indices of their Specs for containers that
|
// - containersToKeep stores mapping from dockerIDs of running containers to indices of their Specs for containers that
|
||||||
// should be kept running. If startInfraContainer is false then it contains an entry for infraContainerId (mapped to -1).
|
// should be kept running. If startInfraContainer is false then it contains an entry for infraContainerId (mapped to -1).
|
||||||
// It shouldn't be the case where containersToStart is empty and containersToKeep contains only infraContainerId. In such case
|
// It shouldn't be the case where containersToStart is empty and containersToKeep contains only infraContainerId. In such case
|
||||||
// Infra Container should be killed, hence it's removed from this map.
|
// Infra Container should be killed, hence it's removed from this map.
|
||||||
// - all running containers which are NOT contained in containersToKeep should be killed.
|
// - all running containers which are NOT contained in containersToKeep should be killed.
|
||||||
type empty struct{}
|
|
||||||
type PodContainerChangesSpec struct {
|
type PodContainerChangesSpec struct {
|
||||||
StartInfraContainer bool
|
StartInfraContainer bool
|
||||||
InfraChanged bool
|
InfraChanged bool
|
||||||
InfraContainerId kubetypes.DockerID
|
InfraContainerId kubetypes.DockerID
|
||||||
ContainersToStart map[int]empty
|
ContainersToStart map[int]string
|
||||||
ContainersToKeep map[kubetypes.DockerID]int
|
ContainersToKeep map[kubetypes.DockerID]int
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1688,7 +1692,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, runningPod kub
|
|||||||
uid := pod.UID
|
uid := pod.UID
|
||||||
glog.V(4).Infof("Syncing Pod %+v, podFullName: %q, uid: %q", pod, podFullName, uid)
|
glog.V(4).Infof("Syncing Pod %+v, podFullName: %q, uid: %q", pod, podFullName, uid)
|
||||||
|
|
||||||
containersToStart := make(map[int]empty)
|
containersToStart := make(map[int]string)
|
||||||
containersToKeep := make(map[kubetypes.DockerID]int)
|
containersToKeep := make(map[kubetypes.DockerID]int)
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
@ -1724,8 +1728,9 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, runningPod kub
|
|||||||
// If we are here it means that the container is dead and should be restarted, or never existed and should
|
// If we are here it means that the container is dead and should be restarted, or never existed and should
|
||||||
// be created. We may be inserting this ID again if the container has changed and it has
|
// be created. We may be inserting this ID again if the container has changed and it has
|
||||||
// RestartPolicy::Always, but it's not a big deal.
|
// RestartPolicy::Always, but it's not a big deal.
|
||||||
glog.V(3).Infof("Container %+v is dead, but RestartPolicy says that we should restart it.", container)
|
message := fmt.Sprintf("Container %+v is dead, but RestartPolicy says that we should restart it.", container)
|
||||||
containersToStart[index] = empty{}
|
glog.V(3).Info(message)
|
||||||
|
containersToStart[index] = message
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -1740,8 +1745,9 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, runningPod kub
|
|||||||
// If RestartPolicy is Always or OnFailure we restart containers that were running before we
|
// If RestartPolicy is Always or OnFailure we restart containers that were running before we
|
||||||
// killed them when restarting Infra Container.
|
// killed them when restarting Infra Container.
|
||||||
if pod.Spec.RestartPolicy != api.RestartPolicyNever {
|
if pod.Spec.RestartPolicy != api.RestartPolicyNever {
|
||||||
glog.V(1).Infof("Infra Container is being recreated. %q will be restarted.", container.Name)
|
message := fmt.Sprintf("Infra Container is being recreated. %q will be restarted.", container.Name)
|
||||||
containersToStart[index] = empty{}
|
glog.V(1).Info(message)
|
||||||
|
containersToStart[index] = message
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -1750,8 +1756,9 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, runningPod kub
|
|||||||
// We will look for changes and check healthiness for the container.
|
// We will look for changes and check healthiness for the container.
|
||||||
containerChanged := hash != 0 && hash != expectedHash
|
containerChanged := hash != 0 && hash != expectedHash
|
||||||
if containerChanged {
|
if containerChanged {
|
||||||
glog.Infof("pod %q container %q hash changed (%d vs %d), it will be killed and re-created.", podFullName, container.Name, hash, expectedHash)
|
message := fmt.Sprintf("pod %q container %q hash changed (%d vs %d), it will be killed and re-created.", podFullName, container.Name, hash, expectedHash)
|
||||||
containersToStart[index] = empty{}
|
glog.Info(message)
|
||||||
|
containersToStart[index] = message
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1761,8 +1768,9 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, runningPod kub
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if pod.Spec.RestartPolicy != api.RestartPolicyNever {
|
if pod.Spec.RestartPolicy != api.RestartPolicyNever {
|
||||||
glog.Infof("pod %q container %q is unhealthy, it will be killed and re-created.", podFullName, container.Name)
|
message := fmt.Sprintf("pod %q container %q is unhealthy, it will be killed and re-created.", podFullName, container.Name)
|
||||||
containersToStart[index] = empty{}
|
glog.Info(message)
|
||||||
|
containersToStart[index] = message
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1840,13 +1848,15 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, pod
|
|||||||
glog.V(3).Infof("Killing unwanted container %+v", container)
|
glog.V(3).Infof("Killing unwanted container %+v", container)
|
||||||
// attempt to find the appropriate container policy
|
// attempt to find the appropriate container policy
|
||||||
var podContainer *api.Container
|
var podContainer *api.Container
|
||||||
|
var killMessage string
|
||||||
for i, c := range pod.Spec.Containers {
|
for i, c := range pod.Spec.Containers {
|
||||||
if c.Name == container.Name {
|
if c.Name == container.Name {
|
||||||
podContainer = &pod.Spec.Containers[i]
|
podContainer = &pod.Spec.Containers[i]
|
||||||
|
killMessage = containerChanges.ContainersToStart[i]
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := dm.KillContainerInPod(container.ID, podContainer, pod); err != nil {
|
if err := dm.KillContainerInPod(container.ID, podContainer, pod, killMessage); err != nil {
|
||||||
glog.Errorf("Error killing container: %v", err)
|
glog.Errorf("Error killing container: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1867,11 +1877,12 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, pod
|
|||||||
// Call the networking plugin
|
// Call the networking plugin
|
||||||
err = dm.networkPlugin.SetUpPod(pod.Namespace, pod.Name, podInfraContainerID)
|
err = dm.networkPlugin.SetUpPod(pod.Namespace, pod.Name, podInfraContainerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Failed to setup networking for pod %q using network plugins: %v; Skipping pod", podFullName, err)
|
message := fmt.Sprintf("Failed to setup networking for pod %q using network plugins: %v; Skipping pod", podFullName, err)
|
||||||
|
glog.Error(message)
|
||||||
// Delete infra container
|
// Delete infra container
|
||||||
if delErr := dm.KillContainerInPod(kubecontainer.ContainerID{
|
if delErr := dm.KillContainerInPod(kubecontainer.ContainerID{
|
||||||
ID: string(podInfraContainerID),
|
ID: string(podInfraContainerID),
|
||||||
Type: "docker"}, nil, pod); delErr != nil {
|
Type: "docker"}, nil, pod, message); delErr != nil {
|
||||||
glog.Warningf("Clear infra container failed for pod %q: %v", podFullName, delErr)
|
glog.Warningf("Clear infra container failed for pod %q: %v", podFullName, delErr)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
@ -2047,7 +2058,7 @@ func (dm *DockerManager) doBackOff(pod *api.Pod, container *api.Container, podSt
|
|||||||
stableName, _ := BuildDockerName(dockerName, container)
|
stableName, _ := BuildDockerName(dockerName, container)
|
||||||
if backOff.IsInBackOffSince(stableName, ts.Time) {
|
if backOff.IsInBackOffSince(stableName, ts.Time) {
|
||||||
if ref, err := kubecontainer.GenerateContainerRef(pod, container); err == nil {
|
if ref, err := kubecontainer.GenerateContainerRef(pod, container); err == nil {
|
||||||
dm.recorder.Eventf(ref, "Backoff", "Back-off restarting failed docker container")
|
dm.recorder.Eventf(ref, kubecontainer.BackOffStartContainer, "Back-off restarting failed docker container")
|
||||||
}
|
}
|
||||||
err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(stableName), container.Name, kubecontainer.GetPodFullName(pod))
|
err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(stableName), container.Name, kubecontainer.GetPodFullName(pod))
|
||||||
dm.updateReasonCache(pod, container, kubecontainer.ErrCrashLoopBackOff.Error(), err)
|
dm.updateReasonCache(pod, container, kubecontainer.ErrCrashLoopBackOff.Error(), err)
|
||||||
|
8
pkg/kubelet/dockertools/manager_test.go
Normal file → Executable file
8
pkg/kubelet/dockertools/manager_test.go
Normal file → Executable file
@ -403,7 +403,7 @@ func TestKillContainerInPod(t *testing.T) {
|
|||||||
containerToSpare := &containers[1]
|
containerToSpare := &containers[1]
|
||||||
fakeDocker.ContainerList = containers
|
fakeDocker.ContainerList = containers
|
||||||
|
|
||||||
if err := manager.KillContainerInPod(kubecontainer.ContainerID{}, &pod.Spec.Containers[0], pod); err != nil {
|
if err := manager.KillContainerInPod(kubecontainer.ContainerID{}, &pod.Spec.Containers[0], pod, "test kill container in pod."); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
// Assert the container has been stopped.
|
// Assert the container has been stopped.
|
||||||
@ -468,7 +468,7 @@ func TestKillContainerInPodWithPreStop(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := manager.KillContainerInPod(kubecontainer.ContainerID{}, &pod.Spec.Containers[0], pod); err != nil {
|
if err := manager.KillContainerInPod(kubecontainer.ContainerID{}, &pod.Spec.Containers[0], pod, "test kill container with preStop."); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
// Assert the container has been stopped.
|
// Assert the container has been stopped.
|
||||||
@ -505,7 +505,7 @@ func TestKillContainerInPodWithError(t *testing.T) {
|
|||||||
fakeDocker.ContainerList = containers
|
fakeDocker.ContainerList = containers
|
||||||
fakeDocker.Errors["stop"] = fmt.Errorf("sample error")
|
fakeDocker.Errors["stop"] = fmt.Errorf("sample error")
|
||||||
|
|
||||||
if err := manager.KillContainerInPod(kubecontainer.ContainerID{}, &pod.Spec.Containers[0], pod); err == nil {
|
if err := manager.KillContainerInPod(kubecontainer.ContainerID{}, &pod.Spec.Containers[0], pod, "test kill container with error."); err == nil {
|
||||||
t.Errorf("expected error, found nil")
|
t.Errorf("expected error, found nil")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1565,7 +1565,7 @@ func TestGetRestartCount(t *testing.T) {
|
|||||||
t.Fatalf("unexpected error %v", err)
|
t.Fatalf("unexpected error %v", err)
|
||||||
}
|
}
|
||||||
containerID := kubecontainer.ParseContainerID(status.ContainerStatuses[0].ContainerID)
|
containerID := kubecontainer.ParseContainerID(status.ContainerStatuses[0].ContainerID)
|
||||||
dm.KillContainerInPod(containerID, &pod.Spec.Containers[0], pod)
|
dm.KillContainerInPod(containerID, &pod.Spec.Containers[0], pod, "test container restart count.")
|
||||||
}
|
}
|
||||||
// Container "bar" starts the first time.
|
// Container "bar" starts the first time.
|
||||||
// TODO: container lists are expected to be sorted reversely by time.
|
// TODO: container lists are expected to be sorted reversely by time.
|
||||||
|
@ -191,7 +191,7 @@ func (im *realImageManager) GarbageCollect() error {
|
|||||||
// Check valid capacity.
|
// Check valid capacity.
|
||||||
if capacity == 0 {
|
if capacity == 0 {
|
||||||
err := fmt.Errorf("invalid capacity %d on device %q at mount point %q", capacity, fsInfo.Device, fsInfo.Mountpoint)
|
err := fmt.Errorf("invalid capacity %d on device %q at mount point %q", capacity, fsInfo.Device, fsInfo.Mountpoint)
|
||||||
im.recorder.Eventf(im.nodeRef, "InvalidDiskCapacity", err.Error())
|
im.recorder.Eventf(im.nodeRef, container.InvalidDiskCapacity, err.Error())
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -207,7 +207,7 @@ func (im *realImageManager) GarbageCollect() error {
|
|||||||
|
|
||||||
if freed < amountToFree {
|
if freed < amountToFree {
|
||||||
err := fmt.Errorf("failed to garbage collect required amount of images. Wanted to free %d, but freed %d", amountToFree, freed)
|
err := fmt.Errorf("failed to garbage collect required amount of images. Wanted to free %d, but freed %d", amountToFree, freed)
|
||||||
im.recorder.Eventf(im.nodeRef, "FreeDiskSpaceFailed", err.Error())
|
im.recorder.Eventf(im.nodeRef, container.FreeDiskSpaceFailed, err.Error())
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -836,22 +836,22 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := kl.imageManager.Start(); err != nil {
|
if err := kl.imageManager.Start(); err != nil {
|
||||||
kl.recorder.Eventf(kl.nodeRef, "KubeletSetupFailed", "Failed to start ImageManager %v", err)
|
kl.recorder.Eventf(kl.nodeRef, kubecontainer.KubeletSetupFailed, "Failed to start ImageManager %v", err)
|
||||||
glog.Errorf("Failed to start ImageManager, images may not be garbage collected: %v", err)
|
glog.Errorf("Failed to start ImageManager, images may not be garbage collected: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := kl.cadvisor.Start(); err != nil {
|
if err := kl.cadvisor.Start(); err != nil {
|
||||||
kl.recorder.Eventf(kl.nodeRef, "KubeletSetupFailed", "Failed to start CAdvisor %v", err)
|
kl.recorder.Eventf(kl.nodeRef, kubecontainer.KubeletSetupFailed, "Failed to start CAdvisor %v", err)
|
||||||
glog.Errorf("Failed to start CAdvisor, system may not be properly monitored: %v", err)
|
glog.Errorf("Failed to start CAdvisor, system may not be properly monitored: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := kl.containerManager.Start(); err != nil {
|
if err := kl.containerManager.Start(); err != nil {
|
||||||
kl.recorder.Eventf(kl.nodeRef, "KubeletSetupFailed", "Failed to start ContainerManager %v", err)
|
kl.recorder.Eventf(kl.nodeRef, kubecontainer.KubeletSetupFailed, "Failed to start ContainerManager %v", err)
|
||||||
glog.Errorf("Failed to start ContainerManager, system may not be properly isolated: %v", err)
|
glog.Errorf("Failed to start ContainerManager, system may not be properly isolated: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := kl.oomWatcher.Start(kl.nodeRef); err != nil {
|
if err := kl.oomWatcher.Start(kl.nodeRef); err != nil {
|
||||||
kl.recorder.Eventf(kl.nodeRef, "KubeletSetupFailed", "Failed to start OOM watcher %v", err)
|
kl.recorder.Eventf(kl.nodeRef, kubecontainer.KubeletSetupFailed, "Failed to start OOM watcher %v", err)
|
||||||
glog.Errorf("Failed to start OOM watching: %v", err)
|
glog.Errorf("Failed to start OOM watching: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1469,7 +1469,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
|
|||||||
// Mount volumes.
|
// Mount volumes.
|
||||||
podVolumes, err := kl.mountExternalVolumes(pod)
|
podVolumes, err := kl.mountExternalVolumes(pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
kl.recorder.Eventf(ref, "FailedMount", "Unable to mount volumes for pod %q: %v", podFullName, err)
|
kl.recorder.Eventf(ref, kubecontainer.FailedMountVolume, "Unable to mount volumes for pod %q: %v", podFullName, err)
|
||||||
glog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", podFullName, err)
|
glog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", podFullName, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1531,7 +1531,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
|
|||||||
}
|
}
|
||||||
if egress != nil || ingress != nil {
|
if egress != nil || ingress != nil {
|
||||||
if podUsesHostNetwork(pod) {
|
if podUsesHostNetwork(pod) {
|
||||||
kl.recorder.Event(pod, "HostNetworkNotSupported", "Bandwidth shaping is not currently supported on the host network")
|
kl.recorder.Event(pod, kubecontainer.HostNetworkNotSupported, "Bandwidth shaping is not currently supported on the host network")
|
||||||
} else if kl.shaper != nil {
|
} else if kl.shaper != nil {
|
||||||
status, found := kl.statusManager.GetPodStatus(pod.UID)
|
status, found := kl.statusManager.GetPodStatus(pod.UID)
|
||||||
if !found {
|
if !found {
|
||||||
@ -1546,7 +1546,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
|
|||||||
err = kl.shaper.ReconcileCIDR(fmt.Sprintf("%s/32", status.PodIP), egress, ingress)
|
err = kl.shaper.ReconcileCIDR(fmt.Sprintf("%s/32", status.PodIP), egress, ingress)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
kl.recorder.Event(pod, "NilShaper", "Pod requests bandwidth shaping, but the shaper is undefined")
|
kl.recorder.Event(pod, kubecontainer.UndefinedShaper, "Pod requests bandwidth shaping, but the shaper is undefined")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2535,7 +2535,7 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
|
|||||||
node.Status.NodeInfo.BootID != info.BootID {
|
node.Status.NodeInfo.BootID != info.BootID {
|
||||||
// TODO: This requires a transaction, either both node status is updated
|
// TODO: This requires a transaction, either both node status is updated
|
||||||
// and event is recorded or neither should happen, see issue #6055.
|
// and event is recorded or neither should happen, see issue #6055.
|
||||||
kl.recorder.Eventf(kl.nodeRef, "Rebooted",
|
kl.recorder.Eventf(kl.nodeRef, kubecontainer.NodeRebooted,
|
||||||
"Node %s has been rebooted, boot id: %s", kl.nodeName, info.BootID)
|
"Node %s has been rebooted, boot id: %s", kl.nodeName, info.BootID)
|
||||||
}
|
}
|
||||||
node.Status.NodeInfo.BootID = info.BootID
|
node.Status.NodeInfo.BootID = info.BootID
|
||||||
@ -2614,9 +2614,9 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
|
|||||||
}
|
}
|
||||||
if !updated || oldNodeReadyConditionStatus != newNodeReadyCondition.Status {
|
if !updated || oldNodeReadyConditionStatus != newNodeReadyCondition.Status {
|
||||||
if newNodeReadyCondition.Status == api.ConditionTrue {
|
if newNodeReadyCondition.Status == api.ConditionTrue {
|
||||||
kl.recordNodeStatusEvent("NodeReady")
|
kl.recordNodeStatusEvent(kubecontainer.NodeReady)
|
||||||
} else {
|
} else {
|
||||||
kl.recordNodeStatusEvent("NodeNotReady")
|
kl.recordNodeStatusEvent(kubecontainer.NodeNotReady)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2676,9 +2676,9 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
|
|||||||
|
|
||||||
if oldNodeUnschedulable != node.Spec.Unschedulable {
|
if oldNodeUnschedulable != node.Spec.Unschedulable {
|
||||||
if node.Spec.Unschedulable {
|
if node.Spec.Unschedulable {
|
||||||
kl.recordNodeStatusEvent("NodeNotSchedulable")
|
kl.recordNodeStatusEvent(kubecontainer.NodeNotSchedulable)
|
||||||
} else {
|
} else {
|
||||||
kl.recordNodeStatusEvent("NodeSchedulable")
|
kl.recordNodeStatusEvent(kubecontainer.NodeSchedulable)
|
||||||
}
|
}
|
||||||
oldNodeUnschedulable = node.Spec.Unschedulable
|
oldNodeUnschedulable = node.Spec.Unschedulable
|
||||||
}
|
}
|
||||||
@ -2999,7 +2999,7 @@ func (kl *Kubelet) PortForward(podFullName string, podUID types.UID, port uint16
|
|||||||
// BirthCry sends an event that the kubelet has started up.
|
// BirthCry sends an event that the kubelet has started up.
|
||||||
func (kl *Kubelet) BirthCry() {
|
func (kl *Kubelet) BirthCry() {
|
||||||
// Make an event that kubelet restarted.
|
// Make an event that kubelet restarted.
|
||||||
kl.recorder.Eventf(kl.nodeRef, "Starting", "Starting kubelet.")
|
kl.recorder.Eventf(kl.nodeRef, kubecontainer.StartingKubelet, "Starting kubelet.")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kl *Kubelet) StreamingConnectionIdleTimeout() time.Duration {
|
func (kl *Kubelet) StreamingConnectionIdleTimeout() time.Duration {
|
||||||
|
@ -123,7 +123,7 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan workUpdate) {
|
|||||||
minRuntimeCacheTime = time.Now()
|
minRuntimeCacheTime = time.Now()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Error syncing pod %s, skipping: %v", newWork.pod.UID, err)
|
glog.Errorf("Error syncing pod %s, skipping: %v", newWork.pod.UID, err)
|
||||||
p.recorder.Eventf(newWork.pod, "FailedSync", "Error syncing pod, skipping: %v", err)
|
p.recorder.Eventf(newWork.pod, kubecontainer.FailedSync, "Error syncing pod, skipping: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
newWork.updateCompleteFn()
|
newWork.updateCompleteFn()
|
||||||
|
@ -96,12 +96,12 @@ func (pb *prober) probe(probeType probeType, pod *api.Pod, status api.PodStatus,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(1).Infof("%s probe for %q errored: %v", probeType, ctrName, err)
|
glog.V(1).Infof("%s probe for %q errored: %v", probeType, ctrName, err)
|
||||||
if hasRef {
|
if hasRef {
|
||||||
pb.recorder.Eventf(ref, "Unhealthy", "%s probe errored: %v", probeType, err)
|
pb.recorder.Eventf(ref, kubecontainer.ContainerUnhealthy, "%s probe errored: %v", probeType, err)
|
||||||
}
|
}
|
||||||
} else { // result != probe.Success
|
} else { // result != probe.Success
|
||||||
glog.V(1).Infof("%s probe for %q failed (%v): %s", probeType, ctrName, result, output)
|
glog.V(1).Infof("%s probe for %q failed (%v): %s", probeType, ctrName, result, output)
|
||||||
if hasRef {
|
if hasRef {
|
||||||
pb.recorder.Eventf(ref, "Unhealthy", "%s probe failed: %s", probeType, output)
|
pb.recorder.Eventf(ref, kubecontainer.ContainerUnhealthy, "%s probe failed: %s", probeType, output)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return results.Failure, err
|
return results.Failure, err
|
||||||
|
@ -676,13 +676,13 @@ func (r *Runtime) generateEvents(runtimePod *kubecontainer.Pod, reason string, f
|
|||||||
uuid := util.ShortenString(id.uuid, 8)
|
uuid := util.ShortenString(id.uuid, 8)
|
||||||
switch reason {
|
switch reason {
|
||||||
case "Created":
|
case "Created":
|
||||||
r.recorder.Eventf(ref, "Created", "Created with rkt id %v", uuid)
|
r.recorder.Eventf(ref, kubecontainer.CreatedContainer, "Created with rkt id %v", uuid)
|
||||||
case "Started":
|
case "Started":
|
||||||
r.recorder.Eventf(ref, "Started", "Started with rkt id %v", uuid)
|
r.recorder.Eventf(ref, kubecontainer.StartedContainer, "Started with rkt id %v", uuid)
|
||||||
case "Failed":
|
case "Failed":
|
||||||
r.recorder.Eventf(ref, "Failed", "Failed to start with rkt id %v with error %v", uuid, failure)
|
r.recorder.Eventf(ref, kubecontainer.FailedToStartContainer, "Failed to start with rkt id %v with error %v", uuid, failure)
|
||||||
case "Killing":
|
case "Killing":
|
||||||
r.recorder.Eventf(ref, "Killing", "Killing with rkt id %v", uuid)
|
r.recorder.Eventf(ref, kubecontainer.KillingContainer, "Killing with rkt id %v", uuid)
|
||||||
default:
|
default:
|
||||||
glog.Errorf("rkt: Unexpected event %q", reason)
|
glog.Errorf("rkt: Unexpected event %q", reason)
|
||||||
}
|
}
|
||||||
@ -707,7 +707,7 @@ func (r *Runtime) RunPod(pod *api.Pod, pullSecrets []api.Secret) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if prepareErr != nil {
|
if prepareErr != nil {
|
||||||
r.recorder.Eventf(ref, "Failed", "Failed to create rkt container with error: %v", prepareErr)
|
r.recorder.Eventf(ref, kubecontainer.FailedToCreateContainer, "Failed to create rkt container with error: %v", prepareErr)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
containerID := runtimePod.Containers[i].ID
|
containerID := runtimePod.Containers[i].ID
|
||||||
|
Loading…
Reference in New Issue
Block a user