Merge pull request #128516 from gjkim42/group-container-statuses-by-its-sandboxid

Ensure that the pod has proper phase upon re-initialization
This commit is contained in:
Kubernetes Prow Robot 2025-03-20 11:24:31 -07:00 committed by GitHub
commit e9eff7445e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 291 additions and 45 deletions

View File

@ -396,6 +396,8 @@ func MakePortMappings(container *v1.Container) (ports []PortMapping) {
// HasAnyRegularContainerStarted returns true if any regular container has // HasAnyRegularContainerStarted returns true if any regular container has
// started, which indicates all init containers have been initialized. // started, which indicates all init containers have been initialized.
// Deprecated: This function is not accurate when its pod sandbox is recreated.
// Use HasAnyActiveRegularContainerStarted instead.
func HasAnyRegularContainerStarted(spec *v1.PodSpec, statuses []v1.ContainerStatus) bool { func HasAnyRegularContainerStarted(spec *v1.PodSpec, statuses []v1.ContainerStatus) bool {
if len(statuses) == 0 { if len(statuses) == 0 {
return false return false
@ -417,3 +419,26 @@ func HasAnyRegularContainerStarted(spec *v1.PodSpec, statuses []v1.ContainerStat
return false return false
} }
// HasAnyActiveRegularContainerStarted returns true if any regular container of
// the current pod sandbox has started, which indicates all init containers
// have been initialized.
func HasAnyActiveRegularContainerStarted(spec *v1.PodSpec, podStatus *PodStatus) bool {
if podStatus == nil {
return false
}
containerNames := sets.New[string]()
for _, c := range spec.Containers {
containerNames.Insert(c.Name)
}
for _, status := range podStatus.ActiveContainerStatuses {
if !containerNames.Has(status.Name) {
continue
}
return true
}
return false
}

View File

@ -27,6 +27,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
) )
func TestEnvVarsToMap(t *testing.T) { func TestEnvVarsToMap(t *testing.T) {
@ -989,3 +990,142 @@ func TestHashContainerWithoutResources(t *testing.T) {
}) })
} }
} }
func TestHasAnyActiveRegularContainerStarted(t *testing.T) {
testCases := []struct {
desc string
spec *v1.PodSpec
podStatus *PodStatus
expected bool
}{
{
desc: "pod has no active container",
spec: &v1.PodSpec{
InitContainers: []v1.Container{
{
Name: "init",
},
},
Containers: []v1.Container{
{
Name: "regular",
},
},
},
podStatus: &PodStatus{
SandboxStatuses: []*runtimeapi.PodSandboxStatus{
{
Id: "old",
State: runtimeapi.PodSandboxState_SANDBOX_NOTREADY,
},
},
},
expected: false,
},
{
desc: "pod is initializing",
spec: &v1.PodSpec{
InitContainers: []v1.Container{
{
Name: "init",
},
},
Containers: []v1.Container{
{
Name: "regular",
},
},
},
podStatus: &PodStatus{
SandboxStatuses: []*runtimeapi.PodSandboxStatus{
{
Id: "current",
State: runtimeapi.PodSandboxState_SANDBOX_READY,
},
},
ActiveContainerStatuses: []*Status{
{
Name: "init",
State: ContainerStateRunning,
},
},
},
expected: false,
},
{
desc: "pod has initialized",
spec: &v1.PodSpec{
InitContainers: []v1.Container{
{
Name: "init",
},
},
Containers: []v1.Container{
{
Name: "regular",
},
},
},
podStatus: &PodStatus{
SandboxStatuses: []*runtimeapi.PodSandboxStatus{
{
Id: "current",
State: runtimeapi.PodSandboxState_SANDBOX_READY,
},
},
ActiveContainerStatuses: []*Status{
{
Name: "init",
State: ContainerStateExited,
},
{
Name: "regular",
State: ContainerStateRunning,
},
},
},
expected: true,
},
{
desc: "pod is re-initializing after the sandbox recreation",
spec: &v1.PodSpec{
InitContainers: []v1.Container{
{
Name: "init",
},
},
Containers: []v1.Container{
{
Name: "regular",
},
},
},
podStatus: &PodStatus{
SandboxStatuses: []*runtimeapi.PodSandboxStatus{
{
Id: "current",
State: runtimeapi.PodSandboxState_SANDBOX_READY,
},
{
Id: "old",
State: runtimeapi.PodSandboxState_SANDBOX_NOTREADY,
},
},
ActiveContainerStatuses: []*Status{
{
Name: "init",
State: ContainerStateRunning,
},
},
},
expected: false,
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
actual := HasAnyActiveRegularContainerStarted(tc.spec, tc.podStatus)
assert.Equal(t, tc.expected, actual)
})
}
}

View File

@ -321,6 +321,8 @@ type PodStatus struct {
IPs []string IPs []string
// Status of containers in the pod. // Status of containers in the pod.
ContainerStatuses []*Status ContainerStatuses []*Status
// Statuses of containers of the active sandbox in the pod.
ActiveContainerStatuses []*Status
// Status of the pod sandbox. // Status of the pod sandbox.
// Only for kuberuntime now, other runtime may keep it nil. // Only for kuberuntime now, other runtime may keep it nil.
SandboxStatuses []*runtimeapi.PodSandboxStatus SandboxStatuses []*runtimeapi.PodSandboxStatus

View File

@ -1576,7 +1576,7 @@ func (kl *Kubelet) GetKubeletContainerLogs(ctx context.Context, podFullName, con
} }
// getPhase returns the phase of a pod given its container info. // getPhase returns the phase of a pod given its container info.
func getPhase(pod *v1.Pod, info []v1.ContainerStatus, podIsTerminal bool) v1.PodPhase { func getPhase(pod *v1.Pod, info []v1.ContainerStatus, podIsTerminal, podHasInitialized bool) v1.PodPhase {
spec := pod.Spec spec := pod.Spec
pendingRestartableInitContainers := 0 pendingRestartableInitContainers := 0
pendingRegularInitContainers := 0 pendingRegularInitContainers := 0
@ -1695,7 +1695,7 @@ func getPhase(pod *v1.Pod, info []v1.ContainerStatus, podIsTerminal bool) v1.Pod
// This is needed to handle the case where the pod has been initialized but // This is needed to handle the case where the pod has been initialized but
// the restartable init containers are restarting and the pod should not be // the restartable init containers are restarting and the pod should not be
// placed back into v1.PodPending since the regular containers have run. // placed back into v1.PodPending since the regular containers have run.
!kubecontainer.HasAnyRegularContainerStarted(&spec, info)): !podHasInitialized):
fallthrough fallthrough
case waiting > 0: case waiting > 0:
klog.V(5).InfoS("Pod waiting > 0, pending") klog.V(5).InfoS("Pod waiting > 0, pending")
@ -1768,7 +1768,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
s := kl.convertStatusToAPIStatus(pod, podStatus, oldPodStatus) s := kl.convertStatusToAPIStatus(pod, podStatus, oldPodStatus)
// calculate the next phase and preserve reason // calculate the next phase and preserve reason
allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...) allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
s.Phase = getPhase(pod, allStatus, podIsTerminal) s.Phase = getPhase(pod, allStatus, podIsTerminal, kubecontainer.HasAnyActiveRegularContainerStarted(&pod.Spec, podStatus))
klog.V(4).InfoS("Got phase for pod", "pod", klog.KObj(pod), "oldPhase", oldPodStatus.Phase, "phase", s.Phase) klog.V(4).InfoS("Got phase for pod", "pod", klog.KObj(pod), "oldPhase", oldPodStatus.Phase, "phase", s.Phase)
// Perform a three-way merge between the statuses from the status manager, // Perform a three-way merge between the statuses from the status manager,

View File

@ -2306,7 +2306,7 @@ func TestPodPhaseWithRestartAlways(t *testing.T) {
}, },
} }
for _, test := range tests { for _, test := range tests {
status := getPhase(test.pod, test.pod.Status.ContainerStatuses, test.podIsTerminal) status := getPhase(test.pod, test.pod.Status.ContainerStatuses, test.podIsTerminal, false)
assert.Equal(t, test.status, status, "[test %s]", test.test) assert.Equal(t, test.status, status, "[test %s]", test.test)
} }
} }
@ -2410,7 +2410,7 @@ func TestPodPhaseWithRestartAlwaysInitContainers(t *testing.T) {
for _, test := range tests { for _, test := range tests {
statusInfo := test.pod.Status.InitContainerStatuses statusInfo := test.pod.Status.InitContainerStatuses
statusInfo = append(statusInfo, test.pod.Status.ContainerStatuses...) statusInfo = append(statusInfo, test.pod.Status.ContainerStatuses...)
status := getPhase(test.pod, statusInfo, false) status := getPhase(test.pod, statusInfo, false, false)
assert.Equal(t, test.status, status, "[test %s]", test.test) assert.Equal(t, test.status, status, "[test %s]", test.test)
} }
} }
@ -2429,12 +2429,13 @@ func TestPodPhaseWithRestartAlwaysRestartableInitContainers(t *testing.T) {
} }
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
podIsTerminal bool podIsTerminal bool
status v1.PodPhase podHasInitialized bool
test string status v1.PodPhase
test string
}{ }{
{&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, false, v1.PodPending, "empty, waiting"}, {&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, false, false, v1.PodPending, "empty, waiting"},
{ {
&v1.Pod{ &v1.Pod{
Spec: desiredState, Spec: desiredState,
@ -2445,6 +2446,7 @@ func TestPodPhaseWithRestartAlwaysRestartableInitContainers(t *testing.T) {
}, },
}, },
false, false,
false,
v1.PodPending, v1.PodPending,
"restartable init container running", "restartable init container running",
}, },
@ -2458,6 +2460,7 @@ func TestPodPhaseWithRestartAlwaysRestartableInitContainers(t *testing.T) {
}, },
}, },
false, false,
false,
v1.PodPending, v1.PodPending,
"restartable init container stopped", "restartable init container stopped",
}, },
@ -2471,6 +2474,7 @@ func TestPodPhaseWithRestartAlwaysRestartableInitContainers(t *testing.T) {
}, },
}, },
false, false,
false,
v1.PodPending, v1.PodPending,
"restartable init container waiting, terminated zero", "restartable init container waiting, terminated zero",
}, },
@ -2484,6 +2488,7 @@ func TestPodPhaseWithRestartAlwaysRestartableInitContainers(t *testing.T) {
}, },
}, },
false, false,
false,
v1.PodPending, v1.PodPending,
"restartable init container waiting, terminated non-zero", "restartable init container waiting, terminated non-zero",
}, },
@ -2497,6 +2502,7 @@ func TestPodPhaseWithRestartAlwaysRestartableInitContainers(t *testing.T) {
}, },
}, },
false, false,
false,
v1.PodPending, v1.PodPending,
"restartable init container waiting, not terminated", "restartable init container waiting, not terminated",
}, },
@ -2513,6 +2519,7 @@ func TestPodPhaseWithRestartAlwaysRestartableInitContainers(t *testing.T) {
}, },
}, },
false, false,
true,
v1.PodPending, v1.PodPending,
"restartable init container started, 1/2 regular container running", "restartable init container started, 1/2 regular container running",
}, },
@ -2530,6 +2537,7 @@ func TestPodPhaseWithRestartAlwaysRestartableInitContainers(t *testing.T) {
}, },
}, },
false, false,
true,
v1.PodRunning, v1.PodRunning,
"restartable init container started, all regular containers running", "restartable init container started, all regular containers running",
}, },
@ -2547,6 +2555,7 @@ func TestPodPhaseWithRestartAlwaysRestartableInitContainers(t *testing.T) {
}, },
}, },
false, false,
true,
v1.PodRunning, v1.PodRunning,
"restartable init container running, all regular containers running", "restartable init container running, all regular containers running",
}, },
@ -2564,6 +2573,7 @@ func TestPodPhaseWithRestartAlwaysRestartableInitContainers(t *testing.T) {
}, },
}, },
false, false,
true,
v1.PodRunning, v1.PodRunning,
"restartable init container stopped, all regular containers running", "restartable init container stopped, all regular containers running",
}, },
@ -2581,6 +2591,7 @@ func TestPodPhaseWithRestartAlwaysRestartableInitContainers(t *testing.T) {
}, },
}, },
false, false,
true,
v1.PodRunning, v1.PodRunning,
"backoff crashloop restartable init container, all regular containers running", "backoff crashloop restartable init container, all regular containers running",
}, },
@ -2598,6 +2609,7 @@ func TestPodPhaseWithRestartAlwaysRestartableInitContainers(t *testing.T) {
}, },
}, },
true, true,
true,
v1.PodSucceeded, v1.PodSucceeded,
"all regular containers succeeded and restartable init container failed with restart always, but the pod is terminal", "all regular containers succeeded and restartable init container failed with restart always, but the pod is terminal",
}, },
@ -2615,14 +2627,33 @@ func TestPodPhaseWithRestartAlwaysRestartableInitContainers(t *testing.T) {
}, },
}, },
true, true,
true,
v1.PodSucceeded, v1.PodSucceeded,
"all regular containers succeeded and restartable init container succeeded with restart always, but the pod is terminal", "all regular containers succeeded and restartable init container succeeded with restart always, but the pod is terminal",
}, },
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
runningState("containerX"),
},
ContainerStatuses: []v1.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
false,
false,
v1.PodPending,
"re-initializing the pod after the sandbox is recreated",
},
} }
for _, test := range tests { for _, test := range tests {
statusInfo := test.pod.Status.InitContainerStatuses statusInfo := test.pod.Status.InitContainerStatuses
statusInfo = append(statusInfo, test.pod.Status.ContainerStatuses...) statusInfo = append(statusInfo, test.pod.Status.ContainerStatuses...)
status := getPhase(test.pod, statusInfo, test.podIsTerminal) status := getPhase(test.pod, statusInfo, test.podIsTerminal, test.podHasInitialized)
assert.Equal(t, test.status, status, "[test %s]", test.test) assert.Equal(t, test.status, status, "[test %s]", test.test)
} }
} }
@ -2641,9 +2672,10 @@ func TestPodPhaseWithRestartAlwaysAndPodHasRun(t *testing.T) {
} }
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
status v1.PodPhase podHasInitialized bool
test string status v1.PodPhase
test string
}{ }{
{ {
&v1.Pod{ &v1.Pod{
@ -2658,6 +2690,7 @@ func TestPodPhaseWithRestartAlwaysAndPodHasRun(t *testing.T) {
}, },
}, },
}, },
false,
v1.PodPending, v1.PodPending,
"regular init containers, restartable init container and regular container are all running", "regular init containers, restartable init container and regular container are all running",
}, },
@ -2674,6 +2707,7 @@ func TestPodPhaseWithRestartAlwaysAndPodHasRun(t *testing.T) {
}, },
}, },
}, },
false,
v1.PodPending, v1.PodPending,
"regular containers is stopped, restartable init container and regular int container are both running", "regular containers is stopped, restartable init container and regular int container are both running",
}, },
@ -2690,6 +2724,24 @@ func TestPodPhaseWithRestartAlwaysAndPodHasRun(t *testing.T) {
}, },
}, },
}, },
false,
v1.PodPending,
"re-created sandbox: regular init container is succeeded, restartable init container is running, old regular containers is stopped",
},
{
&v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
succeededState("containerX"),
runningState("containerY"),
},
ContainerStatuses: []v1.ContainerStatus{
stoppedState("containerA"),
},
},
},
true,
v1.PodRunning, v1.PodRunning,
"regular init container is succeeded, restartable init container is running, regular containers is stopped", "regular init container is succeeded, restartable init container is running, regular containers is stopped",
}, },
@ -2706,6 +2758,7 @@ func TestPodPhaseWithRestartAlwaysAndPodHasRun(t *testing.T) {
}, },
}, },
}, },
true,
v1.PodRunning, v1.PodRunning,
"regular init container is succeeded, restartable init container and regular containers are both running", "regular init container is succeeded, restartable init container and regular containers are both running",
}, },
@ -2713,7 +2766,7 @@ func TestPodPhaseWithRestartAlwaysAndPodHasRun(t *testing.T) {
for _, test := range tests { for _, test := range tests {
statusInfo := test.pod.Status.InitContainerStatuses statusInfo := test.pod.Status.InitContainerStatuses
statusInfo = append(statusInfo, test.pod.Status.ContainerStatuses...) statusInfo = append(statusInfo, test.pod.Status.ContainerStatuses...)
status := getPhase(test.pod, statusInfo, false) status := getPhase(test.pod, statusInfo, false, test.podHasInitialized)
assert.Equal(t, test.status, status, "[test %s]", test.test) assert.Equal(t, test.status, status, "[test %s]", test.test)
} }
} }
@ -2813,7 +2866,7 @@ func TestPodPhaseWithRestartNever(t *testing.T) {
}, },
} }
for _, test := range tests { for _, test := range tests {
status := getPhase(test.pod, test.pod.Status.ContainerStatuses, false) status := getPhase(test.pod, test.pod.Status.ContainerStatuses, false, false)
assert.Equal(t, test.status, status, "[test %s]", test.test) assert.Equal(t, test.status, status, "[test %s]", test.test)
} }
} }
@ -2917,7 +2970,7 @@ func TestPodPhaseWithRestartNeverInitContainers(t *testing.T) {
for _, test := range tests { for _, test := range tests {
statusInfo := test.pod.Status.InitContainerStatuses statusInfo := test.pod.Status.InitContainerStatuses
statusInfo = append(statusInfo, test.pod.Status.ContainerStatuses...) statusInfo = append(statusInfo, test.pod.Status.ContainerStatuses...)
status := getPhase(test.pod, statusInfo, false) status := getPhase(test.pod, statusInfo, false, false)
assert.Equal(t, test.status, status, "[test %s]", test.test) assert.Equal(t, test.status, status, "[test %s]", test.test)
} }
} }
@ -2936,11 +2989,12 @@ func TestPodPhaseWithRestartNeverRestartableInitContainers(t *testing.T) {
} }
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
status v1.PodPhase podHasInitialized bool
test string status v1.PodPhase
test string
}{ }{
{&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, v1.PodPending, "empty, waiting"}, {&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, false, v1.PodPending, "empty, waiting"},
{ {
&v1.Pod{ &v1.Pod{
Spec: desiredState, Spec: desiredState,
@ -2950,6 +3004,7 @@ func TestPodPhaseWithRestartNeverRestartableInitContainers(t *testing.T) {
}, },
}, },
}, },
false,
v1.PodPending, v1.PodPending,
"restartable init container running", "restartable init container running",
}, },
@ -2962,6 +3017,7 @@ func TestPodPhaseWithRestartNeverRestartableInitContainers(t *testing.T) {
}, },
}, },
}, },
false,
v1.PodPending, v1.PodPending,
"restartable init container stopped", "restartable init container stopped",
}, },
@ -2974,6 +3030,7 @@ func TestPodPhaseWithRestartNeverRestartableInitContainers(t *testing.T) {
}, },
}, },
}, },
false,
v1.PodPending, v1.PodPending,
"restartable init container waiting, terminated zero", "restartable init container waiting, terminated zero",
}, },
@ -2986,6 +3043,7 @@ func TestPodPhaseWithRestartNeverRestartableInitContainers(t *testing.T) {
}, },
}, },
}, },
false,
v1.PodPending, v1.PodPending,
"restartable init container waiting, terminated non-zero", "restartable init container waiting, terminated non-zero",
}, },
@ -2998,6 +3056,7 @@ func TestPodPhaseWithRestartNeverRestartableInitContainers(t *testing.T) {
}, },
}, },
}, },
false,
v1.PodPending, v1.PodPending,
"restartable init container waiting, not terminated", "restartable init container waiting, not terminated",
}, },
@ -3013,6 +3072,7 @@ func TestPodPhaseWithRestartNeverRestartableInitContainers(t *testing.T) {
}, },
}, },
}, },
true,
v1.PodPending, v1.PodPending,
"restartable init container started, one main container running", "restartable init container started, one main container running",
}, },
@ -3029,6 +3089,7 @@ func TestPodPhaseWithRestartNeverRestartableInitContainers(t *testing.T) {
}, },
}, },
}, },
true,
v1.PodRunning, v1.PodRunning,
"restartable init container started, main containers succeeded", "restartable init container started, main containers succeeded",
}, },
@ -3045,8 +3106,9 @@ func TestPodPhaseWithRestartNeverRestartableInitContainers(t *testing.T) {
}, },
}, },
}, },
true,
v1.PodRunning, v1.PodRunning,
"restartable init container running, main containers succeeded", "restartable init container re-running, main containers succeeded",
}, },
{ {
&v1.Pod{ &v1.Pod{
@ -3061,6 +3123,7 @@ func TestPodPhaseWithRestartNeverRestartableInitContainers(t *testing.T) {
}, },
}, },
}, },
true,
v1.PodSucceeded, v1.PodSucceeded,
"all containers succeeded", "all containers succeeded",
}, },
@ -3077,6 +3140,7 @@ func TestPodPhaseWithRestartNeverRestartableInitContainers(t *testing.T) {
}, },
}, },
}, },
true,
v1.PodSucceeded, v1.PodSucceeded,
"restartable init container terminated non-zero, main containers succeeded", "restartable init container terminated non-zero, main containers succeeded",
}, },
@ -3093,6 +3157,7 @@ func TestPodPhaseWithRestartNeverRestartableInitContainers(t *testing.T) {
}, },
}, },
}, },
true,
v1.PodSucceeded, v1.PodSucceeded,
"backoff crashloop restartable init container, main containers succeeded", "backoff crashloop restartable init container, main containers succeeded",
}, },
@ -3109,6 +3174,7 @@ func TestPodPhaseWithRestartNeverRestartableInitContainers(t *testing.T) {
}, },
}, },
}, },
true,
v1.PodSucceeded, v1.PodSucceeded,
"backoff crashloop with non-zero restartable init container, main containers succeeded", "backoff crashloop with non-zero restartable init container, main containers succeeded",
}, },
@ -3116,7 +3182,7 @@ func TestPodPhaseWithRestartNeverRestartableInitContainers(t *testing.T) {
for _, test := range tests { for _, test := range tests {
statusInfo := test.pod.Status.InitContainerStatuses statusInfo := test.pod.Status.InitContainerStatuses
statusInfo = append(statusInfo, test.pod.Status.ContainerStatuses...) statusInfo = append(statusInfo, test.pod.Status.ContainerStatuses...)
status := getPhase(test.pod, statusInfo, false) status := getPhase(test.pod, statusInfo, false, test.podHasInitialized)
assert.Equal(t, test.status, status, "[test %s]", test.test) assert.Equal(t, test.status, status, "[test %s]", test.test)
} }
} }
@ -3229,7 +3295,7 @@ func TestPodPhaseWithRestartOnFailure(t *testing.T) {
}, },
} }
for _, test := range tests { for _, test := range tests {
status := getPhase(test.pod, test.pod.Status.ContainerStatuses, false) status := getPhase(test.pod, test.pod.Status.ContainerStatuses, false, false)
assert.Equal(t, test.status, status, "[test %s]", test.test) assert.Equal(t, test.status, status, "[test %s]", test.test)
} }
} }

View File

@ -605,17 +605,18 @@ func (m *kubeGenericRuntimeManager) convertToKubeContainerStatus(status *runtime
} }
// getPodContainerStatuses gets all containers' statuses for the pod. // getPodContainerStatuses gets all containers' statuses for the pod.
func (m *kubeGenericRuntimeManager) getPodContainerStatuses(ctx context.Context, uid kubetypes.UID, name, namespace string) ([]*kubecontainer.Status, error) { func (m *kubeGenericRuntimeManager) getPodContainerStatuses(ctx context.Context, uid kubetypes.UID, name, namespace, activePodSandboxID string) ([]*kubecontainer.Status, []*kubecontainer.Status, error) {
// Select all containers of the given pod. // Select all containers of the given pod.
containers, err := m.runtimeService.ListContainers(ctx, &runtimeapi.ContainerFilter{ containers, err := m.runtimeService.ListContainers(ctx, &runtimeapi.ContainerFilter{
LabelSelector: map[string]string{kubelettypes.KubernetesPodUIDLabel: string(uid)}, LabelSelector: map[string]string{kubelettypes.KubernetesPodUIDLabel: string(uid)},
}) })
if err != nil { if err != nil {
klog.ErrorS(err, "ListContainers error") klog.ErrorS(err, "ListContainers error")
return nil, err return nil, nil, err
} }
statuses := []*kubecontainer.Status{} statuses := []*kubecontainer.Status{}
activeContainerStatuses := []*kubecontainer.Status{}
// TODO: optimization: set maximum number of containers per container name to examine. // TODO: optimization: set maximum number of containers per container name to examine.
for _, c := range containers { for _, c := range containers {
resp, err := m.runtimeService.ContainerStatus(ctx, c.Id, false) resp, err := m.runtimeService.ContainerStatus(ctx, c.Id, false)
@ -629,18 +630,22 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(ctx context.Context,
if err != nil { if err != nil {
// Merely log this here; GetPodStatus will actually report the error out. // Merely log this here; GetPodStatus will actually report the error out.
klog.V(4).InfoS("ContainerStatus return error", "containerID", c.Id, "err", err) klog.V(4).InfoS("ContainerStatus return error", "containerID", c.Id, "err", err)
return nil, err return nil, nil, err
} }
status := resp.GetStatus() status := resp.GetStatus()
if status == nil { if status == nil {
return nil, remote.ErrContainerStatusNil return nil, nil, remote.ErrContainerStatusNil
} }
cStatus := m.convertToKubeContainerStatus(status) cStatus := m.convertToKubeContainerStatus(status)
statuses = append(statuses, cStatus) statuses = append(statuses, cStatus)
if c.PodSandboxId == activePodSandboxID {
activeContainerStatuses = append(activeContainerStatuses, cStatus)
}
} }
sort.Sort(containerStatusByCreated(statuses)) sort.Sort(containerStatusByCreated(statuses))
return statuses, nil sort.Sort(containerStatusByCreated(activeContainerStatuses))
return statuses, activeContainerStatuses, nil
} }
func toKubeContainerStatus(status *runtimeapi.ContainerStatus, runtimeName string) *kubecontainer.Status { func toKubeContainerStatus(status *runtimeapi.ContainerStatus, runtimeName string) *kubecontainer.Status {

View File

@ -1691,7 +1691,7 @@ func TestUpdatePodSandboxResources(t *testing.T) {
assert.Len(t, fakeContainers, 1) assert.Len(t, fakeContainers, 1)
ctx := context.Background() ctx := context.Background()
_, err := m.getPodContainerStatuses(ctx, pod.UID, pod.Name, pod.Namespace) _, _, err := m.getPodContainerStatuses(ctx, pod.UID, pod.Name, pod.Namespace, "")
require.NoError(t, err) require.NoError(t, err)
resourceConfig := &cm.ResourceConfig{} resourceConfig := &cm.ResourceConfig{}

View File

@ -959,7 +959,7 @@ func TestUpdateContainerResources(t *testing.T) {
assert.Len(t, fakeContainers, 1) assert.Len(t, fakeContainers, 1)
ctx := context.Background() ctx := context.Background()
cStatus, err := m.getPodContainerStatuses(ctx, pod.UID, pod.Name, pod.Namespace) cStatus, _, err := m.getPodContainerStatuses(ctx, pod.UID, pod.Name, pod.Namespace, "")
assert.NoError(t, err) assert.NoError(t, err)
containerID := cStatus[0].ID containerID := cStatus[0].ID

View File

@ -1642,9 +1642,11 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(ctx context.Context, uid kubety
sandboxStatuses := []*runtimeapi.PodSandboxStatus{} sandboxStatuses := []*runtimeapi.PodSandboxStatus{}
containerStatuses := []*kubecontainer.Status{} containerStatuses := []*kubecontainer.Status{}
activeContainerStatuses := []*kubecontainer.Status{}
timestamp := time.Now() timestamp := time.Now()
podIPs := []string{} podIPs := []string{}
var activePodSandboxID string
for idx, podSandboxID := range podSandboxIDs { for idx, podSandboxID := range podSandboxIDs {
resp, err := m.runtimeService.PodSandboxStatus(ctx, podSandboxID, false) resp, err := m.runtimeService.PodSandboxStatus(ctx, podSandboxID, false)
// Between List (getSandboxIDByPodUID) and check (PodSandboxStatus) another thread might remove a container, and that is normal. // Between List (getSandboxIDByPodUID) and check (PodSandboxStatus) another thread might remove a container, and that is normal.
@ -1666,6 +1668,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(ctx context.Context, uid kubety
// Only get pod IP from latest sandbox // Only get pod IP from latest sandbox
if idx == 0 && resp.Status.State == runtimeapi.PodSandboxState_SANDBOX_READY { if idx == 0 && resp.Status.State == runtimeapi.PodSandboxState_SANDBOX_READY {
podIPs = m.determinePodSandboxIPs(namespace, name, resp.Status) podIPs = m.determinePodSandboxIPs(namespace, name, resp.Status)
activePodSandboxID = podSandboxID
} }
if idx == 0 && utilfeature.DefaultFeatureGate.Enabled(features.EventedPLEG) { if idx == 0 && utilfeature.DefaultFeatureGate.Enabled(features.EventedPLEG) {
@ -1676,7 +1679,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(ctx context.Context, uid kubety
// features gate enabled, which includes Evented PLEG, but uses the // features gate enabled, which includes Evented PLEG, but uses the
// runtime without Evented PLEG support. // runtime without Evented PLEG support.
klog.V(4).InfoS("Runtime does not set pod status timestamp", "pod", klog.KObj(pod)) klog.V(4).InfoS("Runtime does not set pod status timestamp", "pod", klog.KObj(pod))
containerStatuses, err = m.getPodContainerStatuses(ctx, uid, name, namespace) containerStatuses, activeContainerStatuses, err = m.getPodContainerStatuses(ctx, uid, name, namespace, activePodSandboxID)
if err != nil { if err != nil {
if m.logReduction.ShouldMessageBePrinted(err.Error(), podFullName) { if m.logReduction.ShouldMessageBePrinted(err.Error(), podFullName) {
klog.ErrorS(err, "getPodContainerStatuses for pod failed", "pod", klog.KObj(pod)) klog.ErrorS(err, "getPodContainerStatuses for pod failed", "pod", klog.KObj(pod))
@ -1697,7 +1700,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(ctx context.Context, uid kubety
if !utilfeature.DefaultFeatureGate.Enabled(features.EventedPLEG) { if !utilfeature.DefaultFeatureGate.Enabled(features.EventedPLEG) {
// Get statuses of all containers visible in the pod. // Get statuses of all containers visible in the pod.
containerStatuses, err = m.getPodContainerStatuses(ctx, uid, name, namespace) containerStatuses, activeContainerStatuses, err = m.getPodContainerStatuses(ctx, uid, name, namespace, activePodSandboxID)
if err != nil { if err != nil {
if m.logReduction.ShouldMessageBePrinted(err.Error(), podFullName) { if m.logReduction.ShouldMessageBePrinted(err.Error(), podFullName) {
klog.ErrorS(err, "getPodContainerStatuses for pod failed", "pod", klog.KObj(pod)) klog.ErrorS(err, "getPodContainerStatuses for pod failed", "pod", klog.KObj(pod))
@ -1708,13 +1711,14 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(ctx context.Context, uid kubety
m.logReduction.ClearID(podFullName) m.logReduction.ClearID(podFullName)
return &kubecontainer.PodStatus{ return &kubecontainer.PodStatus{
ID: uid, ID: uid,
Name: name, Name: name,
Namespace: namespace, Namespace: namespace,
IPs: podIPs, IPs: podIPs,
SandboxStatuses: sandboxStatuses, SandboxStatuses: sandboxStatuses,
ContainerStatuses: containerStatuses, ContainerStatuses: containerStatuses,
TimeStamp: timestamp, ActiveContainerStatuses: activeContainerStatuses,
TimeStamp: timestamp,
}, nil }, nil
} }

View File

@ -5417,7 +5417,7 @@ var _ = SIGDescribe(feature.SidecarContainers, framework.WithSerial(), "Containe
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
ginkgo.When("A node running restartable init containers reboots", func() { ginkgo.When("A node running restartable init containers reboots", func() {
ginkgo.It("should restart the containers in right order after the node reboot", func(ctx context.Context) { ginkgo.It("should restart the containers in right order with the proper phase after the node reboot", func(ctx context.Context) {
init1 := "init-1" init1 := "init-1"
restartableInit2 := "restartable-init-2" restartableInit2 := "restartable-init-2"
init3 := "init-3" init3 := "init-3"
@ -5505,18 +5505,22 @@ var _ = SIGDescribe(feature.SidecarContainers, framework.WithSerial(), "Containe
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrueBecause("kubelet was expected to be healthy")) }, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrueBecause("kubelet was expected to be healthy"))
ginkgo.By("Waiting for the pod to be re-initialized and run") ginkgo.By("Waiting for the pod to re-initialize")
err = e2epod.WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "re-initialized", f.Timeouts.PodStart, func(pod *v1.Pod) (bool, error) { err = e2epod.WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "re-initialized", f.Timeouts.PodStart, func(pod *v1.Pod) (bool, error) {
if pod.Status.ContainerStatuses[0].RestartCount < 1 { if pod.Status.InitContainerStatuses[0].RestartCount < 1 {
return false, nil return false, nil
} }
if pod.Status.Phase != v1.PodRunning { if pod.Status.Phase != v1.PodPending {
return false, nil return false, fmt.Errorf("pod should remain in the pending phase until it is re-initialized, but it is %q", pod.Status.Phase)
} }
return true, nil return true, nil
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Waiting for the pod to run after re-initialization")
err = e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod)
framework.ExpectNoError(err)
ginkgo.By("Parsing results") ginkgo.By("Parsing results")
pod, err = client.Get(ctx, pod.Name, metav1.GetOptions{}) pod, err = client.Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)