kubelet: set failed phase during graceful shutdown

Revert to previous behavior in 1.21/1.20 of setting pod phase to failed
during graceful node shutdown.

Setting pods to failed phase will ensure that external controllers that
manage pods like deployments will create new pods to replace those that
are shutdown. Many customers have taken a dependency on this behavior
and it was breaking change in 1.22, so this change reverts back to the
previous behavior.

Signed-off-by: David Porter <david@porter.me>
This commit is contained in:
David Porter 2021-12-08 17:53:34 -08:00
parent cdf3ad823a
commit 95264a418d
2 changed files with 5 additions and 1 deletions

View File

@ -316,6 +316,10 @@ func (m *managerImpl) processShutdownEvent() error {
klog.V(1).InfoS("Shutdown manager killing pod with gracePeriod", "pod", klog.KObj(pod), "gracePeriod", gracePeriodOverride)
if err := m.killPodFunc(pod, false, &gracePeriodOverride, func(status *v1.PodStatus) {
// set the pod status to failed (unless it was already in a successful terminal phase)
if status.Phase != v1.PodSucceeded {
status.Phase = v1.PodFailed
}
status.Message = nodeShutdownMessage
status.Reason = nodeShutdownReason
}); err != nil {

View File

@ -510,5 +510,5 @@ func isPodShutdown(pod *v1.Pod) bool {
}
}
return pod.Status.Message == podShutdownMessage && pod.Status.Reason == podShutdownReason && hasContainersNotReadyCondition
return pod.Status.Message == podShutdownMessage && pod.Status.Reason == podShutdownReason && hasContainersNotReadyCondition && pod.Status.Phase == v1.PodFailed
}