Fix printPod panic with spurious container statuses

This commit is contained in:
Jordan Liggitt 2024-05-16 09:16:37 -04:00
parent 7a6931b31c
commit 5c1660c5e9
No known key found for this signature in database
2 changed files with 31 additions and 0 deletions

View File

@ -3207,6 +3207,9 @@ func (list SortableResourceNames) Less(i, j int) bool {
}
func isRestartableInitContainer(initContainer *api.Container) bool {
if initContainer == nil {
return false
}
if initContainer.RestartPolicy == nil {
return false
}

View File

@ -25,6 +25,7 @@ import (
"time"
"github.com/google/go-cmp/cmp"
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -50,6 +51,7 @@ import (
"k8s.io/kubernetes/pkg/apis/storagemigration"
"k8s.io/kubernetes/pkg/printers"
utilpointer "k8s.io/utils/pointer"
"k8s.io/utils/ptr"
)
var containerRestartPolicyAlways = api.ContainerRestartPolicyAlways
@ -1718,6 +1720,32 @@ func TestPrintPodWithRestartableInitContainer(t *testing.T) {
},
},
},
{
// Test pod has container statuses for non-existent initContainers and containers
api.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "test4"},
Spec: api.PodSpec{
InitContainers: []api.Container{
{Name: "init1", Image: "initimage"},
{Name: "sidecar1", Image: "sidecarimage", RestartPolicy: ptr.To(api.ContainerRestartPolicyAlways)},
},
Containers: []api.Container{{Name: "container1", Image: "containerimage"}},
},
Status: api.PodStatus{
Phase: "Running",
InitContainerStatuses: []api.ContainerStatus{
{Name: "initinvalid"},
{Name: "init1"},
{Name: "sidecar1"},
},
ContainerStatuses: []api.ContainerStatus{
{Name: "containerinvalid"},
{Name: "container1"},
},
},
},
[]metav1.TableRow{{Cells: []interface{}{"test4", "0/2", "Init:0/2", "0", "<unknown>"}}},
},
}
for i, test := range tests {