Check pod RestartPolicy before restarting unhealthy containers

This commit is contained in:
Tim St. Clair 2015-10-06 13:14:26 -07:00
parent 7ba48583fa
commit f67879eac7
2 changed files with 5 additions and 3 deletions

View File

@ -1724,8 +1724,10 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, runningPod kub
containersToKeep[containerID] = index
continue
}
glog.Infof("pod %q container %q is unhealthy (probe result: %v), it will be killed and re-created.", podFullName, container.Name, result)
containersToStart[index] = empty{}
if pod.Spec.RestartPolicy != api.RestartPolicyNever {
glog.Infof("pod %q container %q is unhealthy (probe result: %v), it will be killed and re-created.", podFullName, container.Name, result)
containersToStart[index] = empty{}
}
}
// After the loop one of the following should be true:

View File

@ -1022,7 +1022,7 @@ func (r *runtime) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, podStatus
result, err := r.prober.ProbeLiveness(pod, podStatus, container, string(c.ID), c.Created)
// TODO(vmarmol): examine this logic.
if err == nil && result != probe.Success {
if err == nil && result != probe.Success && pod.Spec.RestartPolicy != api.RestartPolicyNever {
glog.Infof("Pod %q container %q is unhealthy (probe result: %v), it will be killed and re-created.", podFullName, container.Name, result)
restartPod = true
break