kubelet: Introduce PodInfraContainerChanged().

This functions computes in ahead whether we need to restart the pod
infra container.
This commit is contained in:
Yifan Gu 2015-04-13 10:02:19 -07:00
parent 26f8bc1a68
commit d14cb8f1dd
3 changed files with 60 additions and 23 deletions

View File

@ -222,9 +222,11 @@ func (f *FakeDockerClient) StopContainer(id string, timeout uint) error {
f.Stopped = append(f.Stopped, id)
var newList []docker.APIContainers
for _, container := range f.ContainerList {
if container.ID != id {
newList = append(newList, container)
if container.ID == id {
f.ExitedContainerList = append(f.ExitedContainerList, container)
continue
}
newList = append(newList, container)
}
f.ContainerList = newList
}

View File

@ -584,3 +584,37 @@ func (self *DockerManager) Pull(image string) error {
func (self *DockerManager) IsImagePresent(image string) (bool, error) {
return self.Puller.IsImagePresent(image)
}
// PodInfraContainer returns true if the pod infra container has changed.
func (self *DockerManager) PodInfraContainerChanged(pod *api.Pod, podInfraContainer *kubecontainer.Container) (bool, error) {
networkMode := ""
var ports []api.ContainerPort
dockerPodInfraContainer, err := self.client.InspectContainer(string(podInfraContainer.ID))
if err != nil {
return false, err
}
// Check network mode.
if dockerPodInfraContainer.HostConfig != nil {
networkMode = dockerPodInfraContainer.HostConfig.NetworkMode
}
if pod.Spec.HostNetwork {
if networkMode != "host" {
glog.V(4).Infof("host: %v, %v", pod.Spec.HostNetwork, networkMode)
return true, nil
}
} else {
// Docker only exports ports from the pod infra container. Let's
// collect all of the relevant ports and export them.
for _, container := range pod.Spec.Containers {
ports = append(ports, container.Ports...)
}
}
expectedPodInfraContainer := &api.Container{
Name: PodInfraContainerName,
Image: self.PodInfraContainerImage,
Ports: ports,
}
return podInfraContainer.Hash != HashContainer(expectedPodInfraContainer), nil
}

View File

@ -1080,15 +1080,26 @@ func (kl *Kubelet) computePodContainerChanges(pod *api.Pod, runningPod kubeconta
createPodInfraContainer := false
var podInfraContainerID dockertools.DockerID
var changed bool
podInfraContainer := runningPod.FindContainerByName(dockertools.PodInfraContainerName)
if podInfraContainer != nil {
glog.V(4).Infof("Found infra pod for %q", podFullName)
glog.V(4).Infof("Found pod infra container for %q", podFullName)
changed, err = kl.containerManager.PodInfraContainerChanged(pod, podInfraContainer)
if err != nil {
return podContainerChangesSpec{}, err
}
}
createPodInfraContainer = true
if podInfraContainer == nil {
glog.V(2).Infof("Need to restart pod infra container for %q because it is not found", podFullName)
} else if changed {
glog.V(2).Infof("Need to restart pod infra container for %q because it is changed", podFullName)
} else {
glog.V(4).Infof("Pod infra container looks good, keep it %q", podFullName)
createPodInfraContainer = false
podInfraContainerID = dockertools.DockerID(podInfraContainer.ID)
containersToKeep[podInfraContainerID] = -1
} else {
glog.V(2).Infof("No Infra Container for %q found. All containers will be restarted.", podFullName)
createPodInfraContainer = true
}
// Do not use the cache here since we need the newest status to check
@ -1129,22 +1140,11 @@ func (kl *Kubelet) computePodContainerChanges(pod *api.Pod, runningPod kubeconta
containersToKeep[containerID] = index
continue
}
glog.Infof("pod %q container %q is unhealthy (probe result: %v). Container will be killed and re-created.", podFullName, container.Name, result)
containersToStart[index] = empty{}
glog.Infof("pod %q container %q is unhealthy (probe result: %v), it will be killed and re-created.", podFullName, container.Name, result)
} else {
glog.Infof("pod %q container %q hash changed (%d vs %d). Pod will be killed and re-created.", podFullName, container.Name, hash, expectedHash)
createPodInfraContainer = true
delete(containersToKeep, podInfraContainerID)
// If we are to restart Infra Container then we move containersToKeep into containersToStart
// if RestartPolicy allows restarting failed containers.
if pod.Spec.RestartPolicy != api.RestartPolicyNever {
for _, v := range containersToKeep {
containersToStart[v] = empty{}
}
}
containersToStart[index] = empty{}
containersToKeep = make(map[dockertools.DockerID]int)
glog.Infof("pod %q container %q hash changed (%d vs %d), it will be killed and re-created.", podFullName, container.Name, hash, expectedHash)
}
containersToStart[index] = empty{}
} else { // createPodInfraContainer == true and Container exists
// If we're creating infra containere everything will be killed anyway
// If RestartPolicy is Always or OnFailure we restart containers that were running before we
@ -1167,7 +1167,8 @@ func (kl *Kubelet) computePodContainerChanges(pod *api.Pod, runningPod kubeconta
}
// After the loop one of the following should be true:
// - createPodInfraContainer is true and containersToKeep is empty
// - createPodInfraContainer is true and containersToKeep is empty.
// (In fact, when createPodInfraContainer is false, containersToKeep will not be touched).
// - createPodInfraContainer is false and containersToKeep contains at least ID of Infra Container
// If Infra container is the last running one, we don't want to keep it.
@ -1221,7 +1222,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
if containerChanges.startInfraContainer || (len(containerChanges.containersToKeep) == 0 && len(containerChanges.containersToStart) == 0) {
if len(containerChanges.containersToKeep) == 0 && len(containerChanges.containersToStart) == 0 {
glog.V(4).Infof("Killing Infra Container for %q becase all other containers are dead.", podFullName)
glog.V(4).Infof("Killing Infra Container for %q because all other containers are dead.", podFullName)
} else {
glog.V(4).Infof("Killing Infra Container for %q, will start new one", podFullName)
}