Add support for CRI verbose fields

The remote runtime implementation now supports the `verbose` fields,
which are required for consumers like cri-tools to enable multi CRI
version support.

Signed-off-by: Sascha Grunert <sgrunert@redhat.com>
This commit is contained in:
Sascha Grunert 2022-02-04 12:34:53 +01:00
parent 56273a6aa3
commit effbcd3a0a
No known key found for this signature in database
GPG Key ID: 09D97D153EF94D93
22 changed files with 198 additions and 138 deletions

View File

@ -27,8 +27,8 @@ func fromV1alpha2VersionResponse(from *v1alpha2.VersionResponse) *runtimeapi.Ver
return (*runtimeapi.VersionResponse)(unsafe.Pointer(from))
}
func fromV1alpha2PodSandboxStatus(from *v1alpha2.PodSandboxStatus) *runtimeapi.PodSandboxStatus {
return (*runtimeapi.PodSandboxStatus)(unsafe.Pointer(from))
func fromV1alpha2PodSandboxStatusResponse(from *v1alpha2.PodSandboxStatusResponse) *runtimeapi.PodSandboxStatusResponse {
return (*runtimeapi.PodSandboxStatusResponse)(unsafe.Pointer(from))
}
func fromV1alpha2ListPodSandboxResponse(from *v1alpha2.ListPodSandboxResponse) *runtimeapi.ListPodSandboxResponse {
@ -39,8 +39,8 @@ func fromV1alpha2ListContainersResponse(from *v1alpha2.ListContainersResponse) *
return (*runtimeapi.ListContainersResponse)(unsafe.Pointer(from))
}
func fromV1alpha2ContainerStatus(from *v1alpha2.ContainerStatus) *runtimeapi.ContainerStatus {
return (*runtimeapi.ContainerStatus)(unsafe.Pointer(from))
func fromV1alpha2ContainerStatusResponse(from *v1alpha2.ContainerStatusResponse) *runtimeapi.ContainerStatusResponse {
return (*runtimeapi.ContainerStatusResponse)(unsafe.Pointer(from))
}
func fromV1alpha2ExecResponse(from *v1alpha2.ExecResponse) *runtimeapi.ExecResponse {
@ -61,8 +61,8 @@ func fromV1alpha2PortForwardResponse(from *v1alpha2.PortForwardResponse) *runtim
return (*runtimeapi.PortForwardResponse)(unsafe.Pointer(from))
}
func fromV1alpha2RuntimeStatus(from *v1alpha2.RuntimeStatus) *runtimeapi.RuntimeStatus {
return (*runtimeapi.RuntimeStatus)(unsafe.Pointer(from))
func fromV1alpha2StatusResponse(from *v1alpha2.StatusResponse) *runtimeapi.StatusResponse {
return (*runtimeapi.StatusResponse)(unsafe.Pointer(from))
}
func fromV1alpha2ContainerStats(from *v1alpha2.ContainerStats) *runtimeapi.ContainerStats {
@ -85,8 +85,8 @@ func fromV1alpha2ListPodSandboxStatsResponse(from *v1alpha2.ListPodSandboxStatsR
return (*runtimeapi.ListPodSandboxStatsResponse)(unsafe.Pointer(from))
}
func fromV1alpha2Image(from *v1alpha2.Image) *runtimeapi.Image {
return (*runtimeapi.Image)(unsafe.Pointer(from))
func fromV1alpha2ImageStatusResponse(from *v1alpha2.ImageStatusResponse) *runtimeapi.ImageStatusResponse {
return (*runtimeapi.ImageStatusResponse)(unsafe.Pointer(from))
}
func fromV1alpha2ListImagesResponse(from *v1alpha2.ListImagesResponse) *runtimeapi.ListImagesResponse {

View File

@ -205,10 +205,10 @@ func TestFromV1alpha2VersionResponse(t *testing.T) {
assertEqual(t, from, to)
}
func TestFromV1alpha2PodSandboxStatus(t *testing.T) {
from := &v1alpha2.PodSandboxStatus{}
func TestFromV1alpha2PodSandboxStatusResponse(t *testing.T) {
from := &v1alpha2.PodSandboxStatusResponse{}
fillFields(from)
to := fromV1alpha2PodSandboxStatus(from)
to := fromV1alpha2PodSandboxStatusResponse(from)
assertEqual(t, from, to)
}
@ -226,10 +226,10 @@ func TestFromV1alpha2ListContainersResponse(t *testing.T) {
assertEqual(t, from, to)
}
func TestFromV1alpha2ContainerStatus(t *testing.T) {
from := &v1alpha2.ContainerStatus{}
func TestFromV1alpha2ContainerStatusResponse(t *testing.T) {
from := &v1alpha2.ContainerStatusResponse{}
fillFields(from)
to := fromV1alpha2ContainerStatus(from)
to := fromV1alpha2ContainerStatusResponse(from)
assertEqual(t, from, to)
}
@ -254,10 +254,10 @@ func TestFromV1alpha2PortForwardResponse(t *testing.T) {
assertEqual(t, from, to)
}
func TestFromV1alpha2RuntimeStatus(t *testing.T) {
from := &v1alpha2.RuntimeStatus{}
func TestFromV1alpha2StatusResponse(t *testing.T) {
from := &v1alpha2.StatusResponse{}
fillFields(from)
to := fromV1alpha2RuntimeStatus(from)
to := fromV1alpha2StatusResponse(from)
assertEqual(t, from, to)
}
@ -296,11 +296,10 @@ func TestFromV1alpha2ListPodSandboxStatsResponse(t *testing.T) {
assertEqual(t, from, to)
}
func TestFromV1alpha2Image(t *testing.T) {
from := &v1alpha2.Image{}
func TestFromV1alpha2ImageStatusResponse(t *testing.T) {
from := &v1alpha2.ImageStatusResponse{}
fillFields(from)
to := fromV1alpha2Image(from)
fmt.Printf(":%+v\n", to)
to := fromV1alpha2ImageStatusResponse(from)
assertEqual(t, from, to)
}

View File

@ -38,12 +38,12 @@ func (f *RemoteRuntime) ListImages(ctx context.Context, req *kubeapi.ListImagesR
// present, returns a response with ImageStatusResponse.Image set to
// nil.
func (f *RemoteRuntime) ImageStatus(ctx context.Context, req *kubeapi.ImageStatusRequest) (*kubeapi.ImageStatusResponse, error) {
status, err := f.ImageService.ImageStatus(req.Image)
resp, err := f.ImageService.ImageStatus(req.Image, false)
if err != nil {
return nil, err
}
return &kubeapi.ImageStatusResponse{Image: status}, nil
return resp, nil
}
// PullImage pulls an image with authentication config.

View File

@ -123,12 +123,12 @@ func (f *RemoteRuntime) RemovePodSandbox(ctx context.Context, req *kubeapi.Remov
// PodSandboxStatus returns the status of the PodSandbox. If the PodSandbox is not
// present, returns an error.
func (f *RemoteRuntime) PodSandboxStatus(ctx context.Context, req *kubeapi.PodSandboxStatusRequest) (*kubeapi.PodSandboxStatusResponse, error) {
podStatus, err := f.RuntimeService.PodSandboxStatus(req.PodSandboxId)
resp, err := f.RuntimeService.PodSandboxStatus(req.PodSandboxId, false)
if err != nil {
return nil, err
}
return &kubeapi.PodSandboxStatusResponse{Status: podStatus}, nil
return resp, nil
}
// ListPodSandbox returns a list of PodSandboxes.
@ -199,12 +199,12 @@ func (f *RemoteRuntime) ListContainers(ctx context.Context, req *kubeapi.ListCon
// ContainerStatus returns status of the container. If the container is not
// present, returns an error.
func (f *RemoteRuntime) ContainerStatus(ctx context.Context, req *kubeapi.ContainerStatusRequest) (*kubeapi.ContainerStatusResponse, error) {
status, err := f.RuntimeService.ContainerStatus(req.ContainerId)
resp, err := f.RuntimeService.ContainerStatus(req.ContainerId, false)
if err != nil {
return nil, err
}
return &kubeapi.ContainerStatusResponse{Status: status}, nil
return resp, nil
}
// ExecSync runs a command in a container synchronously.
@ -295,12 +295,12 @@ func (f *RemoteRuntime) UpdateRuntimeConfig(ctx context.Context, req *kubeapi.Up
// Status returns the status of the runtime.
func (f *RemoteRuntime) Status(ctx context.Context, req *kubeapi.StatusRequest) (*kubeapi.StatusResponse, error) {
status, err := f.RuntimeService.Status()
resp, err := f.RuntimeService.Status(false)
if err != nil {
return nil, err
}
return &kubeapi.StatusResponse{Status: status}, nil
return resp, nil
}
// UpdateContainerResources updates ContainerConfig of the container.

View File

@ -137,7 +137,7 @@ func (r *remoteImageService) listImagesV1(ctx context.Context, filter *runtimeap
}
// ImageStatus returns the status of the image.
func (r *remoteImageService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) {
func (r *remoteImageService) ImageStatus(image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) {
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
@ -146,15 +146,16 @@ func (r *remoteImageService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimea
// https://github.com/kubernetes/kubernetes/pull/104575/files#r705600987
// https://github.com/kubernetes/kubernetes/pull/104575/files#r696793706
if r.useV1API() {
return r.imageStatusV1(ctx, image)
return r.imageStatusV1(ctx, image, verbose)
}
return r.imageStatusV1alpha2(ctx, image)
return r.imageStatusV1alpha2(ctx, image, verbose)
}
func (r *remoteImageService) imageStatusV1alpha2(ctx context.Context, image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) {
func (r *remoteImageService) imageStatusV1alpha2(ctx context.Context, image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) {
resp, err := r.imageClientV1alpha2.ImageStatus(ctx, &runtimeapiV1alpha2.ImageStatusRequest{
Image: v1alpha2ImageSpec(image),
Image: v1alpha2ImageSpec(image),
Verbose: verbose,
})
if err != nil {
klog.ErrorS(err, "Get ImageStatus from image service failed", "image", image.Image)
@ -170,12 +171,13 @@ func (r *remoteImageService) imageStatusV1alpha2(ctx context.Context, image *run
}
}
return fromV1alpha2Image(resp.Image), nil
return fromV1alpha2ImageStatusResponse(resp), nil
}
func (r *remoteImageService) imageStatusV1(ctx context.Context, image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) {
func (r *remoteImageService) imageStatusV1(ctx context.Context, image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) {
resp, err := r.imageClient.ImageStatus(ctx, &runtimeapi.ImageStatusRequest{
Image: image,
Image: image,
Verbose: verbose,
})
if err != nil {
klog.ErrorS(err, "Get ImageStatus from image service failed", "image", image.Image)
@ -191,7 +193,7 @@ func (r *remoteImageService) imageStatusV1(ctx context.Context, image *runtimeap
}
}
return resp.Image, nil
return resp, nil
}
// PullImage pulls an image with authentication config.

View File

@ -55,6 +55,9 @@ const (
// versions.
type CRIVersion string
// ErrContainerStatusNil indicates that the returned container status is nil.
var ErrContainerStatusNil = errors.New("container status is nil")
const (
// CRIVersionV1 references the v1 CRI API.
CRIVersionV1 CRIVersion = "v1"
@ -278,21 +281,22 @@ func (r *remoteRuntimeService) RemovePodSandbox(podSandBoxID string) (err error)
}
// PodSandboxStatus returns the status of the PodSandbox.
func (r *remoteRuntimeService) PodSandboxStatus(podSandBoxID string) (*runtimeapi.PodSandboxStatus, error) {
func (r *remoteRuntimeService) PodSandboxStatus(podSandBoxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) {
klog.V(10).InfoS("[RemoteRuntimeService] PodSandboxStatus", "podSandboxID", podSandBoxID, "timeout", r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
if r.useV1API() {
return r.podSandboxStatusV1(ctx, podSandBoxID)
return r.podSandboxStatusV1(ctx, podSandBoxID, verbose)
}
return r.podSandboxStatusV1alpha2(ctx, podSandBoxID)
return r.podSandboxStatusV1alpha2(ctx, podSandBoxID, verbose)
}
func (r *remoteRuntimeService) podSandboxStatusV1alpha2(ctx context.Context, podSandBoxID string) (*runtimeapi.PodSandboxStatus, error) {
func (r *remoteRuntimeService) podSandboxStatusV1alpha2(ctx context.Context, podSandBoxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) {
resp, err := r.runtimeClientV1alpha2.PodSandboxStatus(ctx, &runtimeapiV1alpha2.PodSandboxStatusRequest{
PodSandboxId: podSandBoxID,
Verbose: verbose,
})
if err != nil {
return nil, err
@ -300,19 +304,20 @@ func (r *remoteRuntimeService) podSandboxStatusV1alpha2(ctx context.Context, pod
klog.V(10).InfoS("[RemoteRuntimeService] PodSandboxStatus Response", "podSandboxID", podSandBoxID, "status", resp.Status)
status := fromV1alpha2PodSandboxStatus(resp.Status)
if resp.Status != nil {
if err := verifySandboxStatus(status); err != nil {
res := fromV1alpha2PodSandboxStatusResponse(resp)
if res.Status != nil {
if err := verifySandboxStatus(res.Status); err != nil {
return nil, err
}
}
return status, nil
return res, nil
}
func (r *remoteRuntimeService) podSandboxStatusV1(ctx context.Context, podSandBoxID string) (*runtimeapi.PodSandboxStatus, error) {
func (r *remoteRuntimeService) podSandboxStatusV1(ctx context.Context, podSandBoxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) {
resp, err := r.runtimeClient.PodSandboxStatus(ctx, &runtimeapi.PodSandboxStatusRequest{
PodSandboxId: podSandBoxID,
Verbose: verbose,
})
if err != nil {
return nil, err
@ -327,7 +332,7 @@ func (r *remoteRuntimeService) podSandboxStatusV1(ctx context.Context, podSandBo
}
}
return status, nil
return resp, nil
}
// ListPodSandbox returns a list of PodSandboxes.
@ -550,21 +555,22 @@ func (r *remoteRuntimeService) listContainersV1(ctx context.Context, filter *run
}
// ContainerStatus returns the container status.
func (r *remoteRuntimeService) ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) {
func (r *remoteRuntimeService) ContainerStatus(containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) {
klog.V(10).InfoS("[RemoteRuntimeService] ContainerStatus", "containerID", containerID, "timeout", r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
if r.useV1API() {
return r.containerStatusV1(ctx, containerID)
return r.containerStatusV1(ctx, containerID, verbose)
}
return r.containerStatusV1alpha2(ctx, containerID)
return r.containerStatusV1alpha2(ctx, containerID, verbose)
}
func (r *remoteRuntimeService) containerStatusV1alpha2(ctx context.Context, containerID string) (*runtimeapi.ContainerStatus, error) {
func (r *remoteRuntimeService) containerStatusV1alpha2(ctx context.Context, containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) {
resp, err := r.runtimeClientV1alpha2.ContainerStatus(ctx, &runtimeapiV1alpha2.ContainerStatusRequest{
ContainerId: containerID,
Verbose: verbose,
})
if err != nil {
// Don't spam the log with endless messages about the same failure.
@ -576,20 +582,21 @@ func (r *remoteRuntimeService) containerStatusV1alpha2(ctx context.Context, cont
r.logReduction.ClearID(containerID)
klog.V(10).InfoS("[RemoteRuntimeService] ContainerStatus Response", "containerID", containerID, "status", resp.Status)
status := fromV1alpha2ContainerStatus(resp.Status)
res := fromV1alpha2ContainerStatusResponse(resp)
if resp.Status != nil {
if err := verifyContainerStatus(status); err != nil {
if err := verifyContainerStatus(res.Status); err != nil {
klog.ErrorS(err, "verify ContainerStatus failed", "containerID", containerID)
return nil, err
}
}
return status, nil
return res, nil
}
func (r *remoteRuntimeService) containerStatusV1(ctx context.Context, containerID string) (*runtimeapi.ContainerStatus, error) {
func (r *remoteRuntimeService) containerStatusV1(ctx context.Context, containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) {
resp, err := r.runtimeClient.ContainerStatus(ctx, &runtimeapi.ContainerStatusRequest{
ContainerId: containerID,
Verbose: verbose,
})
if err != nil {
// Don't spam the log with endless messages about the same failure.
@ -609,7 +616,7 @@ func (r *remoteRuntimeService) containerStatusV1(ctx context.Context, containerI
}
}
return status, nil
return resp, nil
}
// UpdateContainerResources updates a containers resource config
@ -898,20 +905,22 @@ func (r *remoteRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.Run
}
// Status returns the status of the runtime.
func (r *remoteRuntimeService) Status() (*runtimeapi.RuntimeStatus, error) {
func (r *remoteRuntimeService) Status(verbose bool) (*runtimeapi.StatusResponse, error) {
klog.V(10).InfoS("[RemoteRuntimeService] Status", "timeout", r.timeout)
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
if r.useV1API() {
return r.statusV1(ctx)
return r.statusV1(ctx, verbose)
}
return r.statusV1alpha2(ctx)
return r.statusV1alpha2(ctx, verbose)
}
func (r *remoteRuntimeService) statusV1alpha2(ctx context.Context) (*runtimeapi.RuntimeStatus, error) {
resp, err := r.runtimeClientV1alpha2.Status(ctx, &runtimeapiV1alpha2.StatusRequest{})
func (r *remoteRuntimeService) statusV1alpha2(ctx context.Context, verbose bool) (*runtimeapi.StatusResponse, error) {
resp, err := r.runtimeClientV1alpha2.Status(ctx, &runtimeapiV1alpha2.StatusRequest{
Verbose: verbose,
})
if err != nil {
klog.ErrorS(err, "Status from runtime service failed")
return nil, err
@ -926,11 +935,13 @@ func (r *remoteRuntimeService) statusV1alpha2(ctx context.Context) (*runtimeapi.
return nil, err
}
return fromV1alpha2RuntimeStatus(resp.Status), nil
return fromV1alpha2StatusResponse(resp), nil
}
func (r *remoteRuntimeService) statusV1(ctx context.Context) (*runtimeapi.RuntimeStatus, error) {
resp, err := r.runtimeClient.Status(ctx, &runtimeapi.StatusRequest{})
func (r *remoteRuntimeService) statusV1(ctx context.Context, verbose bool) (*runtimeapi.StatusResponse, error) {
resp, err := r.runtimeClient.Status(ctx, &runtimeapi.StatusRequest{
Verbose: verbose,
})
if err != nil {
klog.ErrorS(err, "Status from runtime service failed")
return nil, err
@ -945,7 +956,7 @@ func (r *remoteRuntimeService) statusV1(ctx context.Context) (*runtimeapi.Runtim
return nil, err
}
return resp.Status, nil
return resp, nil
}
// ContainerStats returns the stats of the container.

View File

@ -120,10 +120,11 @@ func (m *kubeGenericRuntimeManager) sandboxToKubeContainer(s *runtimeapi.PodSand
// getImageUser gets uid or user name that will run the command(s) from image. The function
// guarantees that only one of them is set.
func (m *kubeGenericRuntimeManager) getImageUser(image string) (*int64, string, error) {
imageStatus, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: image})
resp, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: image}, false)
if err != nil {
return nil, "", err
}
imageStatus := resp.GetImage()
if imageStatus != nil {
if imageStatus.Uid != nil {

View File

@ -68,11 +68,11 @@ func (in instrumentedRuntimeService) Version(apiVersion string) (*runtimeapi.Ver
return out, err
}
func (in instrumentedRuntimeService) Status() (*runtimeapi.RuntimeStatus, error) {
func (in instrumentedRuntimeService) Status(verbose bool) (*runtimeapi.StatusResponse, error) {
const operation = "status"
defer recordOperation(operation, time.Now())
out, err := in.service.Status()
out, err := in.service.Status(verbose)
recordError(operation, err)
return out, err
}
@ -122,11 +122,11 @@ func (in instrumentedRuntimeService) ListContainers(filter *runtimeapi.Container
return out, err
}
func (in instrumentedRuntimeService) ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) {
func (in instrumentedRuntimeService) ContainerStatus(containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) {
const operation = "container_status"
defer recordOperation(operation, time.Now())
out, err := in.service.ContainerStatus(containerID)
out, err := in.service.ContainerStatus(containerID, verbose)
recordError(operation, err)
return out, err
}
@ -208,11 +208,11 @@ func (in instrumentedRuntimeService) RemovePodSandbox(podSandboxID string) error
return err
}
func (in instrumentedRuntimeService) PodSandboxStatus(podSandboxID string) (*runtimeapi.PodSandboxStatus, error) {
func (in instrumentedRuntimeService) PodSandboxStatus(podSandboxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) {
const operation = "podsandbox_status"
defer recordOperation(operation, time.Now())
out, err := in.service.PodSandboxStatus(podSandboxID)
out, err := in.service.PodSandboxStatus(podSandboxID, verbose)
recordError(operation, err)
return out, err
}
@ -289,11 +289,11 @@ func (in instrumentedImageManagerService) ListImages(filter *runtimeapi.ImageFil
return out, err
}
func (in instrumentedImageManagerService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) {
func (in instrumentedImageManagerService) ImageStatus(image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) {
const operation = "image_status"
defer recordOperation(operation, time.Now())
out, err := in.service.ImageStatus(image)
out, err := in.service.ImageStatus(image, verbose)
recordError(operation, err)
return out, err
}

View File

@ -86,7 +86,7 @@ func TestStatus(t *testing.T) {
},
}
irs := newInstrumentedRuntimeService(fakeRuntime)
actural, err := irs.Status()
actural, err := irs.Status(false)
assert.NoError(t, err)
expected := &runtimeapi.RuntimeStatus{
Conditions: []*runtimeapi.RuntimeCondition{
@ -94,5 +94,5 @@ func TestStatus(t *testing.T) {
{Type: runtimeapi.NetworkReady, Status: true},
},
}
assert.Equal(t, expected, actural)
assert.Equal(t, expected, actural.Status)
}

View File

@ -48,6 +48,7 @@ import (
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/cri/remote"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/format"
@ -505,12 +506,16 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
statuses := make([]*kubecontainer.Status, len(containers))
// TODO: optimization: set maximum number of containers per container name to examine.
for i, c := range containers {
status, err := m.runtimeService.ContainerStatus(c.Id)
resp, err := m.runtimeService.ContainerStatus(c.Id, false)
if err != nil {
// Merely log this here; GetPodStatus will actually report the error out.
klog.V(4).InfoS("ContainerStatus return error", "containerID", c.Id, "err", err)
return nil, err
}
status := resp.GetStatus()
if status == nil {
return nil, remote.ErrContainerStatusNil
}
cStatus := toKubeContainerStatus(status, m.runtimeName)
if status.State == runtimeapi.ContainerState_CONTAINER_EXITED {
// Populate the termination message if needed.
@ -607,10 +612,14 @@ func (m *kubeGenericRuntimeManager) executePreStopHook(pod *v1.Pod, containerID
func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID kubecontainer.ContainerID) (*v1.Pod, *v1.Container, error) {
var pod *v1.Pod
var container *v1.Container
s, err := m.runtimeService.ContainerStatus(containerID.ID)
resp, err := m.runtimeService.ContainerStatus(containerID.ID, false)
if err != nil {
return nil, nil, err
}
s := resp.GetStatus()
if s == nil {
return nil, nil, remote.ErrContainerStatusNil
}
l := getContainerInfoFromLabels(s.Labels)
a := getContainerInfoFromAnnotations(s.Annotations)
@ -869,11 +878,15 @@ func findNextInitContainerToRun(pod *v1.Pod, podStatus *kubecontainer.PodStatus)
// GetContainerLogs returns logs of a specific container.
func (m *kubeGenericRuntimeManager) GetContainerLogs(ctx context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) {
status, err := m.runtimeService.ContainerStatus(containerID.ID)
resp, err := m.runtimeService.ContainerStatus(containerID.ID, false)
if err != nil {
klog.V(4).InfoS("Failed to get container status", "containerID", containerID.String(), "err", err)
return fmt.Errorf("unable to retrieve container logs for %v", containerID.String())
}
status := resp.GetStatus()
if status == nil {
return remote.ErrContainerStatusNil
}
return m.ReadLogs(ctx, status.GetLogPath(), containerID.ID, logOptions, stdout, stderr)
}
@ -950,10 +963,14 @@ func (m *kubeGenericRuntimeManager) removeContainerLog(containerID string) error
return err
}
status, err := m.runtimeService.ContainerStatus(containerID)
resp, err := m.runtimeService.ContainerStatus(containerID, false)
if err != nil {
return fmt.Errorf("failed to get container status %q: %v", containerID, err)
}
status := resp.GetStatus()
if status == nil {
return remote.ErrContainerStatusNil
}
// Remove the legacy container log symlink.
// TODO(random-liu): Remove this after cluster logging supports CRI container log path.
labeledInfo := getContainerInfoFromLabels(status.Labels)

View File

@ -135,10 +135,10 @@ func TestGenerateContainerConfig(t *testing.T) {
assert.Error(t, err)
imageID, _ := imageService.PullImage(&runtimeapi.ImageSpec{Image: "busybox"}, nil, nil)
image, _ := imageService.ImageStatus(&runtimeapi.ImageSpec{Image: imageID})
resp, _ := imageService.ImageStatus(&runtimeapi.ImageSpec{Image: imageID}, false)
image.Uid = nil
image.Username = "test"
resp.Image.Uid = nil
resp.Image.Username = "test"
podWithContainerSecurityContext.Spec.Containers[0].SecurityContext.RunAsUser = nil
podWithContainerSecurityContext.Spec.Containers[0].SecurityContext.RunAsNonRoot = &runAsNonRootTrue

View File

@ -354,25 +354,32 @@ func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error {
for _, logSymlink := range logSymlinks {
if _, err := osInterface.Stat(logSymlink); os.IsNotExist(err) {
if containerID, err := getContainerIDFromLegacyLogSymlink(logSymlink); err == nil {
status, err := cgc.manager.runtimeService.ContainerStatus(containerID)
resp, err := cgc.manager.runtimeService.ContainerStatus(containerID, false)
if err != nil {
// TODO: we should handle container not found (i.e. container was deleted) case differently
// once https://github.com/kubernetes/kubernetes/issues/63336 is resolved
klog.InfoS("Error getting ContainerStatus for containerID", "containerID", containerID, "err", err)
} else if status.State != runtimeapi.ContainerState_CONTAINER_EXITED {
// Here is how container log rotation works (see containerLogManager#rotateLatestLog):
//
// 1. rename current log to rotated log file whose filename contains current timestamp (fmt.Sprintf("%s.%s", log, timestamp))
// 2. reopen the container log
// 3. if #2 fails, rename rotated log file back to container log
//
// There is small but indeterministic amount of time during which log file doesn't exist (between steps #1 and #2, between #1 and #3).
// Hence the symlink may be deemed unhealthy during that period.
// See https://github.com/kubernetes/kubernetes/issues/52172
//
// We only remove unhealthy symlink for dead containers
klog.V(5).InfoS("Container is still running, not removing symlink", "containerID", containerID, "path", logSymlink)
continue
} else {
status := resp.GetStatus()
if status == nil {
klog.V(4).InfoS("Container status is nil")
continue
}
if status.State != runtimeapi.ContainerState_CONTAINER_EXITED {
// Here is how container log rotation works (see containerLogManager#rotateLatestLog):
//
// 1. rename current log to rotated log file whose filename contains current timestamp (fmt.Sprintf("%s.%s", log, timestamp))
// 2. reopen the container log
// 3. if #2 fails, rename rotated log file back to container log
//
// There is small but indeterministic amount of time during which log file doesn't exist (between steps #1 and #2, between #1 and #3).
// Hence the symlink may be deemed unhealthy during that period.
// See https://github.com/kubernetes/kubernetes/issues/52172
//
// We only remove unhealthy symlink for dead containers
klog.V(5).InfoS("Container is still running, not removing symlink", "containerID", containerID, "path", logSymlink)
continue
}
}
} else {
klog.V(4).InfoS("Unable to obtain container ID", "err", err)

View File

@ -181,9 +181,9 @@ func TestSandboxGC(t *testing.T) {
assert.NoError(t, err)
assert.Len(t, realRemain, len(test.remain))
for _, remain := range test.remain {
status, err := fakeRuntime.PodSandboxStatus(fakeSandboxes[remain].Id)
resp, err := fakeRuntime.PodSandboxStatus(fakeSandboxes[remain].Id, false)
assert.NoError(t, err)
assert.Equal(t, &fakeSandboxes[remain].PodSandboxStatus, status)
assert.Equal(t, &fakeSandboxes[remain].PodSandboxStatus, resp.Status)
}
})
}
@ -409,9 +409,9 @@ func TestContainerGC(t *testing.T) {
assert.NoError(t, err)
assert.Len(t, realRemain, len(test.remain))
for _, remain := range test.remain {
status, err := fakeRuntime.ContainerStatus(fakeContainers[remain].Id)
resp, err := fakeRuntime.ContainerStatus(fakeContainers[remain].Id, false)
assert.NoError(t, err)
assert.Equal(t, &fakeContainers[remain].ContainerStatus, status)
assert.Equal(t, &fakeContainers[remain].ContainerStatus, resp.Status)
}
})
}

View File

@ -81,15 +81,15 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul
// GetImageRef gets the ID of the image which has already been in
// the local storage. It returns ("", nil) if the image isn't in the local storage.
func (m *kubeGenericRuntimeManager) GetImageRef(image kubecontainer.ImageSpec) (string, error) {
status, err := m.imageService.ImageStatus(toRuntimeAPIImageSpec(image))
resp, err := m.imageService.ImageStatus(toRuntimeAPIImageSpec(image), false)
if err != nil {
klog.ErrorS(err, "Failed to get image status", "image", image.Image)
return "", err
}
if status == nil {
if resp.Image == nil {
return "", nil
}
return status.Id, nil
return resp.Image.Id, nil
}
// ListImages gets all images currently on the machine.

View File

@ -333,11 +333,14 @@ func (m *kubeGenericRuntimeManager) APIVersion() (kubecontainer.Version, error)
// Status returns the status of the runtime. An error is returned if the Status
// function itself fails, nil otherwise.
func (m *kubeGenericRuntimeManager) Status() (*kubecontainer.RuntimeStatus, error) {
status, err := m.runtimeService.Status()
resp, err := m.runtimeService.Status(false)
if err != nil {
return nil, err
}
return toKubeRuntimeStatus(status), nil
if resp.GetStatus() == nil {
return nil, errors.New("runtime status is nil")
}
return toKubeRuntimeStatus(resp.GetStatus()), nil
}
// GetPods returns a list of containers grouped by pods. The boolean parameter
@ -822,7 +825,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
}
klog.V(4).InfoS("Created PodSandbox for pod", "podSandboxID", podSandboxID, "pod", klog.KObj(pod))
podSandboxStatus, err := m.runtimeService.PodSandboxStatus(podSandboxID)
resp, err := m.runtimeService.PodSandboxStatus(podSandboxID, false)
if err != nil {
ref, referr := ref.GetReference(legacyscheme.Scheme, pod)
if referr != nil {
@ -833,12 +836,16 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
result.Fail(err)
return
}
if resp.GetStatus() == nil {
result.Fail(errors.New("pod sandbox status is nil"))
return
}
// If we ever allow updating a pod from non-host-network to
// host-network, we may use a stale IP.
if !kubecontainer.IsHostNetworkPod(pod) {
// Overwrite the podIPs passed in the pod status, since we just started the pod sandbox.
podIPs = m.determinePodSandboxIPs(pod.Namespace, pod.Name, podSandboxStatus)
podIPs = m.determinePodSandboxIPs(pod.Namespace, pod.Name, resp.GetStatus())
klog.V(4).InfoS("Determined the ip for pod after sandbox changed", "IPs", podIPs, "pod", klog.KObj(pod))
}
}
@ -1035,16 +1042,20 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp
sandboxStatuses := make([]*runtimeapi.PodSandboxStatus, len(podSandboxIDs))
podIPs := []string{}
for idx, podSandboxID := range podSandboxIDs {
podSandboxStatus, err := m.runtimeService.PodSandboxStatus(podSandboxID)
resp, err := m.runtimeService.PodSandboxStatus(podSandboxID, false)
if err != nil {
klog.ErrorS(err, "PodSandboxStatus of sandbox for pod", "podSandboxID", podSandboxID, "pod", klog.KObj(pod))
return nil, err
}
sandboxStatuses[idx] = podSandboxStatus
if resp.GetStatus() == nil {
return nil, errors.New("pod sandbox status is nil")
}
sandboxStatuses[idx] = resp.Status
// Only get pod IP from latest sandbox
if idx == 0 && podSandboxStatus.State == runtimeapi.PodSandboxState_SANDBOX_READY {
podIPs = m.determinePodSandboxIPs(namespace, name, podSandboxStatus)
if idx == 0 && resp.Status.State == runtimeapi.PodSandboxState_SANDBOX_READY {
podIPs = m.determinePodSandboxIPs(namespace, name, resp.Status)
}
}

View File

@ -35,6 +35,7 @@ import (
v1 "k8s.io/api/core/v1"
internalapi "k8s.io/cri-api/pkg/apis"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/kubernetes/pkg/kubelet/cri/remote"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/util/tail"
)
@ -412,13 +413,17 @@ func ReadLogs(ctx context.Context, path, containerID string, opts *LogOptions, r
}
func isContainerRunning(id string, r internalapi.RuntimeService) (bool, error) {
s, err := r.ContainerStatus(id)
resp, err := r.ContainerStatus(id, false)
if err != nil {
return false, err
}
status := resp.GetStatus()
if status == nil {
return false, remote.ErrContainerStatusNil
}
// Only keep following container log when it is running.
if s.State != runtimeapi.ContainerState_CONTAINER_RUNNING {
klog.V(5).InfoS("Container is not running", "containerId", id, "state", s.State)
if status.State != runtimeapi.ContainerState_CONTAINER_RUNNING {
klog.V(5).InfoS("Container is not running", "containerId", id, "state", status.State)
// Do not return error because it's normal that the container stops
// during waiting.
return false, nil

View File

@ -189,11 +189,14 @@ func (c *containerLogManager) Start() {
func (c *containerLogManager) Clean(containerID string) error {
c.mutex.Lock()
defer c.mutex.Unlock()
status, err := c.runtimeService.ContainerStatus(containerID)
resp, err := c.runtimeService.ContainerStatus(containerID, false)
if err != nil {
return fmt.Errorf("failed to get container status %q: %v", containerID, err)
}
pattern := fmt.Sprintf("%s*", status.GetLogPath())
if resp.GetStatus() == nil {
return fmt.Errorf("container status is nil for %q", containerID)
}
pattern := fmt.Sprintf("%s*", resp.GetStatus().GetLogPath())
logs, err := c.osInterface.Glob(pattern)
if err != nil {
return fmt.Errorf("failed to list all log files with pattern %q: %v", pattern, err)
@ -225,12 +228,16 @@ func (c *containerLogManager) rotateLogs() error {
}
id := container.GetId()
// Note that we should not block log rotate for an error of a single container.
status, err := c.runtimeService.ContainerStatus(id)
resp, err := c.runtimeService.ContainerStatus(id, false)
if err != nil {
klog.ErrorS(err, "Failed to get container status", "containerID", id)
continue
}
path := status.GetLogPath()
if resp.GetStatus() == nil {
klog.ErrorS(err, "Container status is nil", "containerID", id)
continue
}
path := resp.GetStatus().GetLogPath()
info, err := c.osInterface.Stat(path)
if err != nil {
if !os.IsNotExist(err) {

View File

@ -42,7 +42,7 @@ type ContainerManager interface {
// ListContainers lists all containers by filters.
ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error)
// ContainerStatus returns the status of the container.
ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error)
ContainerStatus(containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error)
// UpdateContainerResources updates the cgroup resources for the container.
UpdateContainerResources(containerID string, resources *runtimeapi.LinuxContainerResources) error
// ExecSync executes a command in the container, and returns the stdout output.
@ -71,7 +71,7 @@ type PodSandboxManager interface {
// sandbox, they should be forcibly removed.
RemovePodSandbox(podSandboxID string) error
// PodSandboxStatus returns the Status of the PodSandbox.
PodSandboxStatus(podSandboxID string) (*runtimeapi.PodSandboxStatus, error)
PodSandboxStatus(podSandboxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error)
// ListPodSandbox returns a list of Sandbox.
ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error)
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address.
@ -104,7 +104,7 @@ type RuntimeService interface {
// UpdateRuntimeConfig updates runtime configuration if specified
UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) error
// Status returns the status of the runtime.
Status() (*runtimeapi.RuntimeStatus, error)
Status(verbose bool) (*runtimeapi.StatusResponse, error)
}
// ImageManagerService interface should be implemented by a container image
@ -114,7 +114,7 @@ type ImageManagerService interface {
// ListImages lists the existing images.
ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error)
// ImageStatus returns the status of the image.
ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error)
ImageStatus(image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error)
// PullImage pulls an image with the authentication config.
PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error)
// RemoveImage removes the image.

View File

@ -154,7 +154,7 @@ func (r *FakeImageService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtim
}
// ImageStatus returns the status of the image from the FakeImageService.
func (r *FakeImageService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) {
func (r *FakeImageService) ImageStatus(image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) {
r.Lock()
defer r.Unlock()
@ -163,7 +163,7 @@ func (r *FakeImageService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi
return nil, err
}
return r.Images[image.Image], nil
return &runtimeapi.ImageStatusResponse{Image: r.Images[image.Image]}, nil
}
// PullImage emulate pulling the image from the FakeImageService.

View File

@ -180,7 +180,7 @@ func (r *FakeRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResp
}
// Status returns runtime status of the FakeRuntimeService.
func (r *FakeRuntimeService) Status() (*runtimeapi.RuntimeStatus, error) {
func (r *FakeRuntimeService) Status(verbose bool) (*runtimeapi.StatusResponse, error) {
r.Lock()
defer r.Unlock()
@ -189,7 +189,7 @@ func (r *FakeRuntimeService) Status() (*runtimeapi.RuntimeStatus, error) {
return nil, err
}
return r.FakeStatus, nil
return &runtimeapi.StatusResponse{Status: r.FakeStatus}, nil
}
// RunPodSandbox emulates the run of the pod sandbox in the FakeRuntimeService.
@ -281,7 +281,7 @@ func (r *FakeRuntimeService) RemovePodSandbox(podSandboxID string) error {
}
// PodSandboxStatus returns pod sandbox status from the FakeRuntimeService.
func (r *FakeRuntimeService) PodSandboxStatus(podSandboxID string) (*runtimeapi.PodSandboxStatus, error) {
func (r *FakeRuntimeService) PodSandboxStatus(podSandboxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) {
r.Lock()
defer r.Unlock()
@ -296,7 +296,7 @@ func (r *FakeRuntimeService) PodSandboxStatus(podSandboxID string) (*runtimeapi.
}
status := s.PodSandboxStatus
return &status, nil
return &runtimeapi.PodSandboxStatusResponse{Status: &status}, nil
}
// ListPodSandbox returns the list of pod sandboxes in the FakeRuntimeService.
@ -490,7 +490,7 @@ func (r *FakeRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter)
}
// ContainerStatus returns the container status given the container ID in FakeRuntimeService.
func (r *FakeRuntimeService) ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) {
func (r *FakeRuntimeService) ContainerStatus(containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) {
r.Lock()
defer r.Unlock()
@ -505,7 +505,7 @@ func (r *FakeRuntimeService) ContainerStatus(containerID string) (*runtimeapi.Co
}
status := c.ContainerStatus
return &status, nil
return &runtimeapi.ContainerStatusResponse{Status: &status}, nil
}
// UpdateContainerResources returns the container resource in the FakeRuntimeService.

View File

@ -19,7 +19,7 @@ package e2enode
import (
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -74,9 +74,9 @@ var _ = SIGDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func()
id := kubecontainer.ParseContainerID(pod.Status.ContainerStatuses[0].ContainerID).ID
r, _, err := getCRIClient()
framework.ExpectNoError(err)
status, err := r.ContainerStatus(id)
resp, err := r.ContainerStatus(id, false)
framework.ExpectNoError(err)
logPath := status.GetLogPath()
logPath := resp.GetStatus().GetLogPath()
ginkgo.By("wait for container log being rotated to max file limit")
gomega.Eventually(func() (int, error) {
logs, err := kubelogs.GetAllLogs(logPath)

View File

@ -125,8 +125,8 @@ func (rp *remotePuller) Name() string {
}
func (rp *remotePuller) Pull(image string) ([]byte, error) {
imageStatus, err := rp.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: image})
if err == nil && imageStatus != nil {
resp, err := rp.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: image}, false)
if err == nil && resp.GetImage() != nil {
return nil, nil
}
_, err = rp.imageService.PullImage(&runtimeapi.ImageSpec{Image: image}, nil, nil)