mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-06 16:06:51 +00:00
Merge pull request #121389 from aleksandra-malinowska/sts-restart-always
Resubmit "Make StatefulSet restart pods with phase Succeeded"
This commit is contained in:
@@ -376,13 +376,27 @@ func (ssc *defaultStatefulSetControl) processReplica(
|
||||
replicas []*v1.Pod,
|
||||
i int) (bool, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
// delete and recreate failed pods
|
||||
if isFailed(replicas[i]) {
|
||||
ssc.recorder.Eventf(set, v1.EventTypeWarning, "RecreatingFailedPod",
|
||||
"StatefulSet %s/%s is recreating failed Pod %s",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
replicas[i].Name)
|
||||
// Delete and recreate pods which finished running.
|
||||
//
|
||||
// Note that pods with phase Succeeded will also trigger this event. This is
|
||||
// because final pod phase of evicted or otherwise forcibly stopped pods
|
||||
// (e.g. terminated on node reboot) is determined by the exit code of the
|
||||
// container, not by the reason for pod termination. We should restart the pod
|
||||
// regardless of the exit code.
|
||||
if isFailed(replicas[i]) || isSucceeded(replicas[i]) {
|
||||
if isFailed(replicas[i]) {
|
||||
ssc.recorder.Eventf(set, v1.EventTypeWarning, "RecreatingFailedPod",
|
||||
"StatefulSet %s/%s is recreating failed Pod %s",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
replicas[i].Name)
|
||||
} else {
|
||||
ssc.recorder.Eventf(set, v1.EventTypeNormal, "RecreatingTerminatedPod",
|
||||
"StatefulSet %s/%s is recreating terminated Pod %s",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
replicas[i].Name)
|
||||
}
|
||||
if err := ssc.podControl.DeleteStatefulPod(set, replicas[i]); err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user