mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 15:05:27 +00:00
kuberuntime: add unit tests for container status population
Also refactor a little bit to make the function more testable.
This commit is contained in:
parent
7c75f5c551
commit
de0438a5c8
@ -385,34 +385,11 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
|
||||
glog.Errorf("ContainerStatus for %s error: %v", c.Id, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
annotatedInfo := getContainerInfoFromAnnotations(c.Annotations)
|
||||
labeledInfo := getContainerInfoFromLabels(c.Labels)
|
||||
cStatus := &kubecontainer.ContainerStatus{
|
||||
ID: kubecontainer.ContainerID{
|
||||
Type: m.runtimeName,
|
||||
ID: c.Id,
|
||||
},
|
||||
Name: labeledInfo.ContainerName,
|
||||
Image: status.Image.Image,
|
||||
ImageID: status.ImageRef,
|
||||
Hash: annotatedInfo.Hash,
|
||||
RestartCount: annotatedInfo.RestartCount,
|
||||
State: toKubeContainerState(c.State),
|
||||
CreatedAt: time.Unix(0, status.CreatedAt),
|
||||
}
|
||||
|
||||
if c.State != runtimeapi.ContainerState_CONTAINER_CREATED {
|
||||
// If container is not in the created state, we have tried and
|
||||
// started the container. Set the StartedAt time.
|
||||
cStatus.StartedAt = time.Unix(0, status.StartedAt)
|
||||
}
|
||||
if c.State == runtimeapi.ContainerState_CONTAINER_EXITED {
|
||||
cStatus.Reason = status.Reason
|
||||
cStatus.Message = status.Message
|
||||
cStatus.ExitCode = int(status.ExitCode)
|
||||
cStatus.FinishedAt = time.Unix(0, status.FinishedAt)
|
||||
|
||||
cStatus := toKubeContainerStatus(status, m.runtimeName)
|
||||
if status.State == runtimeapi.ContainerState_CONTAINER_EXITED {
|
||||
// Populate the termination message if needed.
|
||||
annotatedInfo := getContainerInfoFromAnnotations(status.Annotations)
|
||||
labeledInfo := getContainerInfoFromLabels(status.Labels)
|
||||
fallbackToLogs := annotatedInfo.TerminationMessagePolicy == v1.TerminationMessageFallbackToLogsOnError && (cStatus.ExitCode != 0 || cStatus.Reason == "OOMKilled")
|
||||
tMessage, checkLogs := getTerminationMessage(status, annotatedInfo.TerminationMessagePath, fallbackToLogs)
|
||||
if checkLogs {
|
||||
@ -424,7 +401,6 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
|
||||
cStatus.Message = tMessage
|
||||
}
|
||||
}
|
||||
|
||||
statuses[i] = cStatus
|
||||
}
|
||||
|
||||
@ -432,6 +408,37 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
|
||||
return statuses, nil
|
||||
}
|
||||
|
||||
func toKubeContainerStatus(status *runtimeapi.ContainerStatus, runtimeName string) *kubecontainer.ContainerStatus {
|
||||
annotatedInfo := getContainerInfoFromAnnotations(status.Annotations)
|
||||
labeledInfo := getContainerInfoFromLabels(status.Labels)
|
||||
cStatus := &kubecontainer.ContainerStatus{
|
||||
ID: kubecontainer.ContainerID{
|
||||
Type: runtimeName,
|
||||
ID: status.Id,
|
||||
},
|
||||
Name: labeledInfo.ContainerName,
|
||||
Image: status.Image.Image,
|
||||
ImageID: status.ImageRef,
|
||||
Hash: annotatedInfo.Hash,
|
||||
RestartCount: annotatedInfo.RestartCount,
|
||||
State: toKubeContainerState(status.State),
|
||||
CreatedAt: time.Unix(0, status.CreatedAt),
|
||||
}
|
||||
|
||||
if status.State != runtimeapi.ContainerState_CONTAINER_CREATED {
|
||||
// If container is not in the created state, we have tried and
|
||||
// started the container. Set the StartedAt time.
|
||||
cStatus.StartedAt = time.Unix(0, status.StartedAt)
|
||||
}
|
||||
if status.State == runtimeapi.ContainerState_CONTAINER_EXITED {
|
||||
cStatus.Reason = status.Reason
|
||||
cStatus.Message = status.Message
|
||||
cStatus.ExitCode = int(status.ExitCode)
|
||||
cStatus.FinishedAt = time.Unix(0, status.FinishedAt)
|
||||
}
|
||||
return cStatus
|
||||
}
|
||||
|
||||
// generateContainerEvent generates an event for the container.
|
||||
func (m *kubeGenericRuntimeManager) generateContainerEvent(containerID kubecontainer.ContainerID, eventType, reason, message string) {
|
||||
ref, ok := m.containerRefManager.GetRef(containerID)
|
||||
|
@ -19,11 +19,14 @@ package kuberuntime
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
)
|
||||
|
||||
@ -65,3 +68,100 @@ func TestRemoveContainer(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, containers)
|
||||
}
|
||||
|
||||
// TestToKubeContainerStatus tests the converting the CRI container status to
|
||||
// the internal type (i.e., toKubeContainerStatus()) for containers in
|
||||
// different states.
|
||||
func TestToKubeContainerStatus(t *testing.T) {
|
||||
cid := &kubecontainer.ContainerID{Type: "testRuntime", ID: "dummyid"}
|
||||
meta := &runtimeapi.ContainerMetadata{Name: "cname", Attempt: 3}
|
||||
imageSpec := &runtimeapi.ImageSpec{Image: "fimage"}
|
||||
var (
|
||||
createdAt int64 = 327
|
||||
startedAt int64 = 999
|
||||
finishedAt int64 = 1278
|
||||
)
|
||||
|
||||
for desc, test := range map[string]struct {
|
||||
input *runtimeapi.ContainerStatus
|
||||
expected *kubecontainer.ContainerStatus
|
||||
}{
|
||||
"created container": {
|
||||
input: &runtimeapi.ContainerStatus{
|
||||
Id: cid.ID,
|
||||
Metadata: meta,
|
||||
Image: imageSpec,
|
||||
State: runtimeapi.ContainerState_CONTAINER_CREATED,
|
||||
CreatedAt: createdAt,
|
||||
},
|
||||
expected: &kubecontainer.ContainerStatus{
|
||||
ID: *cid,
|
||||
Image: imageSpec.Image,
|
||||
State: kubecontainer.ContainerStateCreated,
|
||||
CreatedAt: time.Unix(0, createdAt),
|
||||
},
|
||||
},
|
||||
"running container": {
|
||||
input: &runtimeapi.ContainerStatus{
|
||||
Id: cid.ID,
|
||||
Metadata: meta,
|
||||
Image: imageSpec,
|
||||
State: runtimeapi.ContainerState_CONTAINER_RUNNING,
|
||||
CreatedAt: createdAt,
|
||||
StartedAt: startedAt,
|
||||
},
|
||||
expected: &kubecontainer.ContainerStatus{
|
||||
ID: *cid,
|
||||
Image: imageSpec.Image,
|
||||
State: kubecontainer.ContainerStateRunning,
|
||||
CreatedAt: time.Unix(0, createdAt),
|
||||
StartedAt: time.Unix(0, startedAt),
|
||||
},
|
||||
},
|
||||
"exited container": {
|
||||
input: &runtimeapi.ContainerStatus{
|
||||
Id: cid.ID,
|
||||
Metadata: meta,
|
||||
Image: imageSpec,
|
||||
State: runtimeapi.ContainerState_CONTAINER_EXITED,
|
||||
CreatedAt: createdAt,
|
||||
StartedAt: startedAt,
|
||||
FinishedAt: finishedAt,
|
||||
ExitCode: int32(121),
|
||||
Reason: "GotKilled",
|
||||
Message: "The container was killed",
|
||||
},
|
||||
expected: &kubecontainer.ContainerStatus{
|
||||
ID: *cid,
|
||||
Image: imageSpec.Image,
|
||||
State: kubecontainer.ContainerStateExited,
|
||||
CreatedAt: time.Unix(0, createdAt),
|
||||
StartedAt: time.Unix(0, startedAt),
|
||||
FinishedAt: time.Unix(0, finishedAt),
|
||||
ExitCode: 121,
|
||||
Reason: "GotKilled",
|
||||
Message: "The container was killed",
|
||||
},
|
||||
},
|
||||
"unknown container": {
|
||||
input: &runtimeapi.ContainerStatus{
|
||||
Id: cid.ID,
|
||||
Metadata: meta,
|
||||
Image: imageSpec,
|
||||
State: runtimeapi.ContainerState_CONTAINER_UNKNOWN,
|
||||
CreatedAt: createdAt,
|
||||
StartedAt: startedAt,
|
||||
},
|
||||
expected: &kubecontainer.ContainerStatus{
|
||||
ID: *cid,
|
||||
Image: imageSpec.Image,
|
||||
State: kubecontainer.ContainerStateUnknown,
|
||||
CreatedAt: time.Unix(0, createdAt),
|
||||
StartedAt: time.Unix(0, startedAt),
|
||||
},
|
||||
},
|
||||
} {
|
||||
actual := toKubeContainerStatus(test.input, cid.Type)
|
||||
assert.Equal(t, test.expected, actual, desc)
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user