mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-09 03:57:41 +00:00
sidecar containers restart on definition change
This commit is contained in:
parent
e9f0ea6f86
commit
51883d5821
@ -1082,6 +1082,9 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message := fmt.Sprintf("Init container %s", container.Name)
|
||||||
|
var reason containerKillReason
|
||||||
|
restartContainer := false
|
||||||
switch status.State {
|
switch status.State {
|
||||||
case kubecontainer.ContainerStateCreated:
|
case kubecontainer.ContainerStateCreated:
|
||||||
// The main sync loop should have created and started the container
|
// The main sync loop should have created and started the container
|
||||||
@ -1107,13 +1110,9 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod
|
|||||||
if startup == proberesults.Failure {
|
if startup == proberesults.Failure {
|
||||||
// If the restartable init container failed the startup probe,
|
// If the restartable init container failed the startup probe,
|
||||||
// restart it.
|
// restart it.
|
||||||
changes.ContainersToKill[status.ID] = containerToKillInfo{
|
message = fmt.Sprintf("%s failed startup probe, will be restarted", message)
|
||||||
name: container.Name,
|
reason = reasonStartupProbe
|
||||||
container: container,
|
restartContainer = true
|
||||||
message: fmt.Sprintf("Init container %s failed startup probe", container.Name),
|
|
||||||
reason: reasonStartupProbe,
|
|
||||||
}
|
|
||||||
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
|
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -1127,6 +1126,13 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod
|
|||||||
changes.InitContainersToStart = append(changes.InitContainersToStart, i+1)
|
changes.InitContainersToStart = append(changes.InitContainersToStart, i+1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Restart running sidecar containers which have had their definition changed.
|
||||||
|
if _, _, changed := containerChanged(container, status); changed {
|
||||||
|
message = fmt.Sprintf("%s definition changed, will be restarted", message)
|
||||||
|
restartContainer = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
// A restartable init container does not have to take into account its
|
// A restartable init container does not have to take into account its
|
||||||
// liveness probe when it determines to start the next init container.
|
// liveness probe when it determines to start the next init container.
|
||||||
if container.LivenessProbe != nil {
|
if container.LivenessProbe != nil {
|
||||||
@ -1138,15 +1144,13 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod
|
|||||||
if liveness == proberesults.Failure {
|
if liveness == proberesults.Failure {
|
||||||
// If the restartable init container failed the liveness probe,
|
// If the restartable init container failed the liveness probe,
|
||||||
// restart it.
|
// restart it.
|
||||||
changes.ContainersToKill[status.ID] = containerToKillInfo{
|
message = fmt.Sprintf("%s failed liveness probe, will be restarted", message)
|
||||||
name: container.Name,
|
reason = reasonLivenessProbe
|
||||||
container: container,
|
restartContainer = true
|
||||||
message: fmt.Sprintf("Init container %s failed liveness probe", container.Name),
|
break
|
||||||
reason: reasonLivenessProbe,
|
|
||||||
}
|
|
||||||
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} else { // init container
|
} else { // init container
|
||||||
// nothing do to but wait for it to finish
|
// nothing do to but wait for it to finish
|
||||||
break
|
break
|
||||||
@ -1180,14 +1184,9 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod
|
|||||||
default: // kubecontainer.ContainerStatusUnknown or other unknown states
|
default: // kubecontainer.ContainerStatusUnknown or other unknown states
|
||||||
if types.IsRestartableInitContainer(container) {
|
if types.IsRestartableInitContainer(container) {
|
||||||
// If the restartable init container is in unknown state, restart it.
|
// If the restartable init container is in unknown state, restart it.
|
||||||
changes.ContainersToKill[status.ID] = containerToKillInfo{
|
message = fmt.Sprintf("%s is in %q state, try killing it before restart", message, status.State)
|
||||||
name: container.Name,
|
reason = reasonUnknown
|
||||||
container: container,
|
restartContainer = true
|
||||||
message: fmt.Sprintf("Init container is in %q state, try killing it before restart",
|
|
||||||
status.State),
|
|
||||||
reason: reasonUnknown,
|
|
||||||
}
|
|
||||||
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
|
|
||||||
} else { // init container
|
} else { // init container
|
||||||
if !isInitContainerFailed(status) {
|
if !isInitContainerFailed(status) {
|
||||||
klog.V(4).InfoS("This should not happen, init container is in unknown state but not failed", "pod", klog.KObj(pod), "containerStatus", status)
|
klog.V(4).InfoS("This should not happen, init container is in unknown state but not failed", "pod", klog.KObj(pod), "containerStatus", status)
|
||||||
@ -1200,17 +1199,23 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If the init container is in unknown state, restart it.
|
// If the init container is in unknown state, restart it.
|
||||||
changes.ContainersToKill[status.ID] = containerToKillInfo{
|
message = fmt.Sprintf("%s is in %q state, try killing it before restart", message, status.State)
|
||||||
name: container.Name,
|
reason = reasonUnknown
|
||||||
container: container,
|
restartContainer = true
|
||||||
message: fmt.Sprintf("Init container is in %q state, try killing it before restart",
|
|
||||||
status.State),
|
|
||||||
reason: reasonUnknown,
|
|
||||||
}
|
|
||||||
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if restartContainer {
|
||||||
|
changes.ContainersToKill[status.ID] = containerToKillInfo{
|
||||||
|
name: container.Name,
|
||||||
|
container: container,
|
||||||
|
message: message,
|
||||||
|
reason: reason,
|
||||||
|
}
|
||||||
|
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
|
||||||
|
klog.V(4).InfoS("Message for Init Container of pod", "containerName", container.Name, "containerStatusID", status.ID, "pod", klog.KObj(pod), "containerMessage", message)
|
||||||
|
}
|
||||||
|
|
||||||
if !isPreviouslyInitialized {
|
if !isPreviouslyInitialized {
|
||||||
// the one before this init container has been initialized
|
// the one before this init container has been initialized
|
||||||
isPreviouslyInitialized = true
|
isPreviouslyInitialized = true
|
||||||
|
@ -1221,6 +1221,19 @@ func getKillMapWithInitContainers(pod *v1.Pod, status *kubecontainer.PodStatus,
|
|||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func modifyKillMapContainerImage(containersToKill map[kubecontainer.ContainerID]containerToKillInfo, status *kubecontainer.PodStatus, cIndexes []int, imageNames []string) map[kubecontainer.ContainerID]containerToKillInfo {
|
||||||
|
for idx, i := range cIndexes {
|
||||||
|
containerKillInfo := containersToKill[status.ContainerStatuses[i].ID]
|
||||||
|
updatedContainer := containerKillInfo.container.DeepCopy()
|
||||||
|
updatedContainer.Image = imageNames[idx]
|
||||||
|
containersToKill[status.ContainerStatuses[i].ID] = containerToKillInfo{
|
||||||
|
container: updatedContainer,
|
||||||
|
name: containerKillInfo.name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return containersToKill
|
||||||
|
}
|
||||||
|
|
||||||
func verifyActions(t *testing.T, expected, actual *podActions, desc string) {
|
func verifyActions(t *testing.T, expected, actual *podActions, desc string) {
|
||||||
if actual.ContainersToKill != nil {
|
if actual.ContainersToKill != nil {
|
||||||
// Clear the message and reason fields since we don't need to verify them.
|
// Clear the message and reason fields since we don't need to verify them.
|
||||||
@ -1507,12 +1520,12 @@ func makeBasePodAndStatusWithInitContainers() (*v1.Pod, *kubecontainer.PodStatus
|
|||||||
{
|
{
|
||||||
ID: kubecontainer.ContainerID{ID: "initid2"},
|
ID: kubecontainer.ContainerID{ID: "initid2"},
|
||||||
Name: "init2", State: kubecontainer.ContainerStateExited,
|
Name: "init2", State: kubecontainer.ContainerStateExited,
|
||||||
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]),
|
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[1]),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ID: kubecontainer.ContainerID{ID: "initid3"},
|
ID: kubecontainer.ContainerID{ID: "initid3"},
|
||||||
Name: "init3", State: kubecontainer.ContainerStateExited,
|
Name: "init3", State: kubecontainer.ContainerStateExited,
|
||||||
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]),
|
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[2]),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return pod, status
|
return pod, status
|
||||||
@ -1685,6 +1698,18 @@ func TestComputePodActionsWithRestartableInitContainers(t *testing.T) {
|
|||||||
m.startupManager.Remove(status.ContainerStatuses[2].ID)
|
m.startupManager.Remove(status.ContainerStatuses[2].ID)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"kill and recreate the restartable init container if the container definition changes": {
|
||||||
|
mutatePodFn: func(pod *v1.Pod) {
|
||||||
|
pod.Spec.RestartPolicy = v1.RestartPolicyAlways
|
||||||
|
pod.Spec.InitContainers[2].Image = "foo-image"
|
||||||
|
},
|
||||||
|
actions: podActions{
|
||||||
|
SandboxID: baseStatus.SandboxStatuses[0].Id,
|
||||||
|
InitContainersToStart: []int{2},
|
||||||
|
ContainersToKill: modifyKillMapContainerImage(getKillMapWithInitContainers(basePod, baseStatus, []int{2}), baseStatus, []int{2}, []string{"foo-image"}),
|
||||||
|
ContainersToStart: []int{0, 1, 2},
|
||||||
|
},
|
||||||
|
},
|
||||||
"restart terminated restartable init container and next init container": {
|
"restart terminated restartable init container and next init container": {
|
||||||
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
|
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
|
||||||
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
|
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
|
||||||
@ -1911,12 +1936,12 @@ func makeBasePodAndStatusWithRestartableInitContainers() (*v1.Pod, *kubecontaine
|
|||||||
{
|
{
|
||||||
ID: kubecontainer.ContainerID{ID: "initid2"},
|
ID: kubecontainer.ContainerID{ID: "initid2"},
|
||||||
Name: "restartable-init-2", State: kubecontainer.ContainerStateRunning,
|
Name: "restartable-init-2", State: kubecontainer.ContainerStateRunning,
|
||||||
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]),
|
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[1]),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ID: kubecontainer.ContainerID{ID: "initid3"},
|
ID: kubecontainer.ContainerID{ID: "initid3"},
|
||||||
Name: "restartable-init-3", State: kubecontainer.ContainerStateRunning,
|
Name: "restartable-init-3", State: kubecontainer.ContainerStateRunning,
|
||||||
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]),
|
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[2]),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return pod, status
|
return pod, status
|
||||||
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user