Kubelet should not create a new pod sandbox if all containers are done

This commit is contained in:
Derek Carr 2018-09-22 17:09:18 -04:00
parent 8e6172dec2
commit 5f473bc8e1
2 changed files with 27 additions and 0 deletions

View File

@ -466,6 +466,10 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku
if createPodSandbox {
if !shouldRestartOnFailure(pod) && attempt != 0 {
// Should not restart the pod, just return.
// we should not create a sandbox for a pod if it is already done.
// if all containers are done and should not be started, there is no need to create a new sandbox.
// this stops confusing logs on pods whose containers all have exit codes, but we recreate a sandbox before terminating it.
changes.CreateSandbox = false
return changes
}
if len(pod.Spec.InitContainers) != 0 {

View File

@ -906,6 +906,29 @@ func TestComputePodActions(t *testing.T) {
// TODO: Add a test case for containers which failed the liveness
// check. Will need to fake the livessness check result.
},
"Verify we do not create a pod sandbox if no ready sandbox for pod with RestartPolicy=Never and all containers exited": {
mutatePodFn: func(pod *v1.Pod) {
pod.Spec.RestartPolicy = v1.RestartPolicyNever
},
mutateStatusFn: func(status *kubecontainer.PodStatus) {
// no ready sandbox
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
status.SandboxStatuses[0].Metadata.Attempt = uint32(1)
// all containers exited
for i := range status.ContainerStatuses {
status.ContainerStatuses[i].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[i].ExitCode = 0
}
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(2),
CreateSandbox: false,
KillPod: true,
ContainersToStart: []int{},
ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
},
},
} {
pod, status := makeBasePodAndStatus()
if test.mutatePodFn != nil {