kubelet/kuberuntime: update cri to protobuf v3

This commit is contained in:
Pengfei Ni 2017-01-20 09:55:56 +08:00
parent d4bfcd1fda
commit 53c20e3630
13 changed files with 217 additions and 203 deletions

View File

@ -197,14 +197,14 @@ func ConvertPodStatusToRunningPod(runtimeName string, podStatus *PodStatus) Pod
// Populate sandboxes in kubecontainer.Pod
for _, sandbox := range podStatus.SandboxStatuses {
runningPod.Sandboxes = append(runningPod.Sandboxes, &Container{
ID: ContainerID{Type: runtimeName, ID: *sandbox.Id},
State: SandboxToContainerState(*sandbox.State),
ID: ContainerID{Type: runtimeName, ID: sandbox.Id},
State: SandboxToContainerState(sandbox.State),
})
}
return runningPod
}
// sandboxToContainerState converts runtimeApi.PodSandboxState to
// SandboxToContainerState converts runtimeapi.PodSandboxState to
// kubecontainer.ContainerState.
// This is only needed because we need to return sandboxes as if they were
// kubecontainer.Containers to avoid substantial changes to PLEG.

View File

@ -113,7 +113,7 @@ func NewFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageS
}
kubeRuntimeManager.containerGC = NewContainerGC(runtimeService, newFakePodGetter(), kubeRuntimeManager)
kubeRuntimeManager.runtimeName = typedVersion.GetRuntimeName()
kubeRuntimeManager.runtimeName = typedVersion.RuntimeName
kubeRuntimeManager.imagePuller = images.NewImageManager(
kubecontainer.FilterEventRecorder(recorder),
kubeRuntimeManager,

View File

@ -61,7 +61,7 @@ type podSandboxByCreated []*runtimeapi.PodSandbox
func (p podSandboxByCreated) Len() int { return len(p) }
func (p podSandboxByCreated) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p podSandboxByCreated) Less(i, j int) bool { return p[i].GetCreatedAt() > p[j].GetCreatedAt() }
func (p podSandboxByCreated) Less(i, j int) bool { return p[i].CreatedAt > p[j].CreatedAt }
type containerStatusByCreated []*kubecontainer.ContainerStatus
@ -100,18 +100,18 @@ func toRuntimeProtocol(protocol v1.Protocol) runtimeapi.Protocol {
// toKubeContainer converts runtimeapi.Container to kubecontainer.Container.
func (m *kubeGenericRuntimeManager) toKubeContainer(c *runtimeapi.Container) (*kubecontainer.Container, error) {
if c == nil || c.Id == nil || c.Image == nil || c.State == nil {
if c == nil || c.Id == "" || c.Image == nil {
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime container")
}
labeledInfo := getContainerInfoFromLabels(c.Labels)
annotatedInfo := getContainerInfoFromAnnotations(c.Annotations)
return &kubecontainer.Container{
ID: kubecontainer.ContainerID{Type: m.runtimeName, ID: c.GetId()},
ID: kubecontainer.ContainerID{Type: m.runtimeName, ID: c.Id},
Name: labeledInfo.ContainerName,
Image: c.Image.GetImage(),
Image: c.Image.Image,
Hash: annotatedInfo.Hash,
State: toKubeContainerState(c.GetState()),
State: toKubeContainerState(c.State),
}, nil
}
@ -120,34 +120,36 @@ func (m *kubeGenericRuntimeManager) toKubeContainer(c *runtimeapi.Container) (*k
// kubecontainer.Containers to avoid substantial changes to PLEG.
// TODO: Remove this once it becomes obsolete.
func (m *kubeGenericRuntimeManager) sandboxToKubeContainer(s *runtimeapi.PodSandbox) (*kubecontainer.Container, error) {
if s == nil || s.Id == nil || s.State == nil {
if s == nil || s.Id == "" {
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime container")
}
return &kubecontainer.Container{
ID: kubecontainer.ContainerID{Type: m.runtimeName, ID: s.GetId()},
State: kubecontainer.SandboxToContainerState(s.GetState()),
ID: kubecontainer.ContainerID{Type: m.runtimeName, ID: s.Id},
State: kubecontainer.SandboxToContainerState(s.State),
}, nil
}
// getImageUser gets uid or user name that will run the command(s) from image. The function
// guarantees that only one of them is set.
func (m *kubeGenericRuntimeManager) getImageUser(image string) (*int64, *string, error) {
imageStatus, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: &image})
func (m *kubeGenericRuntimeManager) getImageUser(image string) (*int64, string, error) {
imageStatus, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: image})
if err != nil {
return nil, nil, err
return nil, "", err
}
if imageStatus != nil && imageStatus.Uid != nil {
// If uid is set, return uid.
return imageStatus.Uid, nil, nil
}
if imageStatus != nil && imageStatus.Username != nil {
// If uid is not set, but user name is set, return user name.
return nil, imageStatus.Username, nil
if imageStatus != nil {
if imageStatus.Uid != nil {
return &imageStatus.GetUid().Value, "", nil
}
if imageStatus.Username != "" {
return nil, imageStatus.Username, nil
}
}
// If non of them is set, treat it as root.
return new(int64), nil, nil
return new(int64), "", nil
}
// isContainerFailed returns true if container has exited and exitcode is not zero.
@ -226,10 +228,10 @@ func toKubeRuntimeStatus(status *runtimeapi.RuntimeStatus) *kubecontainer.Runtim
conditions := []kubecontainer.RuntimeCondition{}
for _, c := range status.GetConditions() {
conditions = append(conditions, kubecontainer.RuntimeCondition{
Type: kubecontainer.RuntimeConditionType(c.GetType()),
Status: c.GetStatus(),
Reason: c.GetReason(),
Message: c.GetMessage(),
Type: kubecontainer.RuntimeConditionType(c.Type),
Status: c.Status,
Reason: c.Reason,
Message: c.Message,
})
}
return &kubecontainer.RuntimeStatus{Conditions: conditions}

View File

@ -102,9 +102,9 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
// TODO(random-liu): Remove this after cluster logging supports CRI container log path.
containerMeta := containerConfig.GetMetadata()
sandboxMeta := podSandboxConfig.GetMetadata()
legacySymlink := legacyLogSymlink(containerID, containerMeta.GetName(), sandboxMeta.GetName(),
sandboxMeta.GetNamespace())
containerLog := filepath.Join(podSandboxConfig.GetLogDirectory(), containerConfig.GetLogPath())
legacySymlink := legacyLogSymlink(containerID, containerMeta.Name, sandboxMeta.Name,
sandboxMeta.Namespace)
containerLog := filepath.Join(podSandboxConfig.LogDirectory, containerConfig.LogPath)
if err := m.osInterface.Symlink(containerLog, legacySymlink); err != nil {
glog.Errorf("Failed to create legacy symbolic link %q to container %q log %q: %v",
legacySymlink, containerID, containerLog, err)
@ -144,8 +144,8 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai
if err := verifyRunAsNonRoot(pod, container, *uid); err != nil {
return nil, err
}
} else {
glog.Warningf("Non-root verification doesn't support non-numeric user (%s)", *username)
} else if username != "" {
glog.Warningf("Non-root verification doesn't support non-numeric user (%s)", username)
}
command, args := kubecontainer.ExpandContainerCommandAndArgs(container, opts.Envs)
@ -153,21 +153,21 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai
restartCountUint32 := uint32(restartCount)
config := &runtimeapi.ContainerConfig{
Metadata: &runtimeapi.ContainerMetadata{
Name: &container.Name,
Attempt: &restartCountUint32,
Name: container.Name,
Attempt: restartCountUint32,
},
Image: &runtimeapi.ImageSpec{Image: &imageRef},
Image: &runtimeapi.ImageSpec{Image: imageRef},
Command: command,
Args: args,
WorkingDir: &container.WorkingDir,
WorkingDir: container.WorkingDir,
Labels: newContainerLabels(container, pod),
Annotations: newContainerAnnotations(container, pod, restartCount),
Devices: makeDevices(opts),
Mounts: m.makeMounts(opts, container),
LogPath: &containerLogsPath,
Stdin: &container.Stdin,
StdinOnce: &container.StdinOnce,
Tty: &container.TTY,
LogPath: containerLogsPath,
Stdin: container.Stdin,
StdinOnce: container.StdinOnce,
Tty: container.TTY,
Linux: m.generateLinuxContainerConfig(container, pod, uid, username),
}
@ -176,8 +176,8 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai
for idx := range opts.Envs {
e := opts.Envs[idx]
envs[idx] = &runtimeapi.KeyValue{
Key: &e.Name,
Value: &e.Value,
Key: e.Name,
Value: e.Value,
}
}
config.Envs = envs
@ -186,7 +186,7 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai
}
// generateLinuxContainerConfig generates linux container config for kubelet runtime v1.
func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username *string) *runtimeapi.LinuxContainerConfig {
func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username string) *runtimeapi.LinuxContainerConfig {
lc := &runtimeapi.LinuxContainerConfig{
Resources: &runtimeapi.LinuxContainerResources{},
SecurityContext: m.determineEffectiveSecurityContext(pod, container, uid, username),
@ -209,20 +209,20 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.C
// of CPU shares.
cpuShares = milliCPUToShares(cpuRequest.MilliValue())
}
lc.Resources.CpuShares = &cpuShares
lc.Resources.CpuShares = cpuShares
if memoryLimit != 0 {
lc.Resources.MemoryLimitInBytes = &memoryLimit
lc.Resources.MemoryLimitInBytes = memoryLimit
}
// Set OOM score of the container based on qos policy. Processes in lower-priority pods should
// be killed first if the system runs out of memory.
lc.Resources.OomScoreAdj = &oomScoreAdj
lc.Resources.OomScoreAdj = oomScoreAdj
if m.cpuCFSQuota {
// if cpuLimit.Amount is nil, then the appropriate default value is returned
// to allow full usage of cpu resource.
cpuQuota, cpuPeriod := milliCPUToQuota(cpuLimit.MilliValue())
lc.Resources.CpuQuota = &cpuQuota
lc.Resources.CpuPeriod = &cpuPeriod
lc.Resources.CpuQuota = cpuQuota
lc.Resources.CpuPeriod = cpuPeriod
}
return lc
@ -235,9 +235,9 @@ func makeDevices(opts *kubecontainer.RunContainerOptions) []*runtimeapi.Device {
for idx := range opts.Devices {
device := opts.Devices[idx]
devices[idx] = &runtimeapi.Device{
HostPath: &device.PathOnHost,
ContainerPath: &device.PathInContainer,
Permissions: &device.Permissions,
HostPath: device.PathOnHost,
ContainerPath: device.PathInContainer,
Permissions: device.Permissions,
}
}
@ -252,10 +252,10 @@ func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerO
v := opts.Mounts[idx]
selinuxRelabel := v.SELinuxRelabel && selinux.SELinuxEnabled()
mount := &runtimeapi.Mount{
HostPath: &v.HostPath,
ContainerPath: &v.ContainerPath,
Readonly: &v.ReadOnly,
SelinuxRelabel: &selinuxRelabel,
HostPath: v.HostPath,
ContainerPath: v.ContainerPath,
Readonly: v.ReadOnly,
SelinuxRelabel: selinuxRelabel,
}
volumeMounts = append(volumeMounts, mount)
@ -277,9 +277,9 @@ func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerO
fs.Close()
selinuxRelabel := selinux.SELinuxEnabled()
volumeMounts = append(volumeMounts, &runtimeapi.Mount{
HostPath: &containerLogPath,
ContainerPath: &container.TerminationMessagePath,
SelinuxRelabel: &selinuxRelabel,
HostPath: containerLogPath,
ContainerPath: container.TerminationMessagePath,
SelinuxRelabel: selinuxRelabel,
})
}
}
@ -296,7 +296,9 @@ func (m *kubeGenericRuntimeManager) getKubeletContainers(allContainers bool) ([]
}
if !allContainers {
runningState := runtimeapi.ContainerState_CONTAINER_RUNNING
filter.State = &runningState
filter.State = &runtimeapi.ContainerStateValue{
State: runningState,
}
}
containers, err := m.getContainersHelper(filter)
@ -333,8 +335,8 @@ func getTerminationMessage(status *runtimeapi.ContainerStatus, kubeStatus *kubec
}
for _, mount := range status.Mounts {
if mount.GetContainerPath() == terminationMessagePath {
path := mount.GetHostPath()
if mount.ContainerPath == terminationMessagePath {
path := mount.HostPath
if data, err := ioutil.ReadFile(path); err != nil {
message = fmt.Sprintf("Error on reading termination-log %s: %v", path, err)
} else {
@ -362,9 +364,9 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
statuses := make([]*kubecontainer.ContainerStatus, len(containers))
// TODO: optimization: set maximum number of containers per container name to examine.
for i, c := range containers {
status, err := m.runtimeService.ContainerStatus(c.GetId())
status, err := m.runtimeService.ContainerStatus(c.Id)
if err != nil {
glog.Errorf("ContainerStatus for %s error: %v", c.GetId(), err)
glog.Errorf("ContainerStatus for %s error: %v", c.Id, err)
return nil, err
}
@ -373,24 +375,24 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
cStatus := &kubecontainer.ContainerStatus{
ID: kubecontainer.ContainerID{
Type: m.runtimeName,
ID: c.GetId(),
ID: c.Id,
},
Name: labeledInfo.ContainerName,
Image: status.Image.GetImage(),
ImageID: status.GetImageRef(),
Image: status.Image.Image,
ImageID: status.ImageRef,
Hash: annotatedInfo.Hash,
RestartCount: annotatedInfo.RestartCount,
State: toKubeContainerState(c.GetState()),
CreatedAt: time.Unix(0, status.GetCreatedAt()),
State: toKubeContainerState(c.State),
CreatedAt: time.Unix(0, status.CreatedAt),
}
if c.GetState() == runtimeapi.ContainerState_CONTAINER_RUNNING {
cStatus.StartedAt = time.Unix(0, status.GetStartedAt())
if c.State == runtimeapi.ContainerState_CONTAINER_RUNNING {
cStatus.StartedAt = time.Unix(0, status.StartedAt)
} else {
cStatus.Reason = status.GetReason()
cStatus.Message = status.GetMessage()
cStatus.ExitCode = int(status.GetExitCode())
cStatus.FinishedAt = time.Unix(0, status.GetFinishedAt())
cStatus.Reason = status.Reason
cStatus.Message = status.Message
cStatus.ExitCode = int(status.ExitCode)
cStatus.FinishedAt = time.Unix(0, status.FinishedAt)
}
tMessage := getTerminationMessage(status, cStatus, annotatedInfo.TerminationMessagePath)
@ -670,31 +672,31 @@ func (m *kubeGenericRuntimeManager) GetContainerLogs(pod *v1.Pod, containerID ku
// GetExec gets the endpoint the runtime will serve the exec request from.
func (m *kubeGenericRuntimeManager) GetExec(id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) {
req := &runtimeapi.ExecRequest{
ContainerId: &id.ID,
ContainerId: id.ID,
Cmd: cmd,
Tty: &tty,
Stdin: &stdin,
Tty: tty,
Stdin: stdin,
}
resp, err := m.runtimeService.Exec(req)
if err != nil {
return nil, err
}
return url.Parse(resp.GetUrl())
return url.Parse(resp.Url)
}
// GetAttach gets the endpoint the runtime will serve the attach request from.
func (m *kubeGenericRuntimeManager) GetAttach(id kubecontainer.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) {
req := &runtimeapi.AttachRequest{
ContainerId: &id.ID,
Stdin: &stdin,
Tty: &tty,
ContainerId: id.ID,
Stdin: stdin,
Tty: tty,
}
resp, err := m.runtimeService.Attach(req)
if err != nil {
return nil, err
}
return url.Parse(resp.GetUrl())
return url.Parse(resp.Url)
}
// RunInContainer synchronously executes the command in the container, and returns the output.

View File

@ -51,7 +51,7 @@ func TestRemoveContainer(t *testing.T) {
_, fakeContainers := makeAndSetFakePod(t, m, fakeRuntime, pod)
assert.Equal(t, len(fakeContainers), 1)
containerId := fakeContainers[0].GetId()
containerId := fakeContainers[0].Id
fakeOS := m.osInterface.(*containertest.FakeOS)
err = m.removeContainer(containerId)
assert.NoError(t, err)
@ -61,7 +61,7 @@ func TestRemoveContainer(t *testing.T) {
assert.Equal(t, fakeOS.Removes, []string{expectedContainerLogPath, expectedContainerLogSymlink})
// Verify container is removed
fakeRuntime.AssertCalls([]string{"RemoveContainer"})
containers, err := fakeRuntime.ListContainers(&runtimeapi.ContainerFilter{Id: &containerId})
containers, err := fakeRuntime.ListContainers(&runtimeapi.ContainerFilter{Id: containerId})
assert.NoError(t, err)
assert.Empty(t, containers)
}

View File

@ -161,21 +161,21 @@ func (cgc *containerGC) evictableContainers(minAge time.Duration) (containersByE
newestGCTime := time.Now().Add(-minAge)
for _, container := range containers {
// Prune out running containers.
if container.GetState() == runtimeapi.ContainerState_CONTAINER_RUNNING {
if container.State == runtimeapi.ContainerState_CONTAINER_RUNNING {
continue
}
createdAt := time.Unix(0, container.GetCreatedAt())
createdAt := time.Unix(0, container.CreatedAt)
if newestGCTime.Before(createdAt) {
continue
}
labeledInfo := getContainerInfoFromLabels(container.Labels)
containerInfo := containerGCInfo{
id: container.GetId(),
name: container.Metadata.GetName(),
id: container.Id,
name: container.Metadata.Name,
createTime: createdAt,
sandboxID: container.GetPodSandboxId(),
sandboxID: container.PodSandboxId,
}
key := evictUnit{
uid: labeledInfo.PodUID,
@ -256,15 +256,15 @@ func (cgc *containerGC) evictSandboxes(minAge time.Duration) error {
newestGCTime := time.Now().Add(-minAge)
for _, sandbox := range sandboxes {
// Prune out ready sandboxes.
if sandbox.GetState() == runtimeapi.PodSandboxState_SANDBOX_READY {
if sandbox.State == runtimeapi.PodSandboxState_SANDBOX_READY {
continue
}
// Prune out sandboxes that still have containers.
found := false
sandboxID := sandbox.GetId()
sandboxID := sandbox.Id
for _, container := range containers {
if container.GetPodSandboxId() == sandboxID {
if container.PodSandboxId == sandboxID {
found = true
break
}
@ -274,7 +274,7 @@ func (cgc *containerGC) evictSandboxes(minAge time.Duration) error {
}
// Only garbage collect sandboxes older than sandboxMinGCAge.
createdAt := time.Unix(0, sandbox.GetCreatedAt())
createdAt := time.Unix(0, sandbox.CreatedAt)
if createdAt.After(newestGCTime) {
continue
}

View File

@ -115,7 +115,7 @@ func TestSandboxGC(t *testing.T) {
assert.NoError(t, err)
assert.Len(t, realRemain, len(test.remain))
for _, remain := range test.remain {
status, err := fakeRuntime.PodSandboxStatus(fakeSandboxes[remain].GetId())
status, err := fakeRuntime.PodSandboxStatus(fakeSandboxes[remain].Id)
assert.NoError(t, err)
assert.Equal(t, &fakeSandboxes[remain].PodSandboxStatus, status)
}
@ -288,7 +288,7 @@ func TestContainerGC(t *testing.T) {
assert.NoError(t, err)
assert.Len(t, realRemain, len(test.remain))
for _, remain := range test.remain {
status, err := fakeRuntime.ContainerStatus(fakeContainers[remain].GetId())
status, err := fakeRuntime.ContainerStatus(fakeContainers[remain].Id)
assert.NoError(t, err)
assert.Equal(t, &fakeContainers[remain].ContainerStatus, status)
}

View File

@ -40,7 +40,7 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul
return "", err
}
imgSpec := &runtimeapi.ImageSpec{Image: &img}
imgSpec := &runtimeapi.ImageSpec{Image: img}
creds, withCredentials := keyring.Lookup(repoToPull)
if !withCredentials {
glog.V(3).Infof("Pulling image %q without credentials", img)
@ -58,12 +58,12 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul
for _, currentCreds := range creds {
authConfig := credentialprovider.LazyProvide(currentCreds)
auth := &runtimeapi.AuthConfig{
Username: &authConfig.Username,
Password: &authConfig.Password,
Auth: &authConfig.Auth,
ServerAddress: &authConfig.ServerAddress,
IdentityToken: &authConfig.IdentityToken,
RegistryToken: &authConfig.RegistryToken,
Username: authConfig.Username,
Password: authConfig.Password,
Auth: authConfig.Auth,
ServerAddress: authConfig.ServerAddress,
IdentityToken: authConfig.IdentityToken,
RegistryToken: authConfig.RegistryToken,
}
imageRef, err := m.imageService.PullImage(imgSpec, auth)
@ -81,7 +81,7 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul
// GetImageRef gets the reference (digest or ID) of the image which has already been in
// the local storage. It returns ("", nil) if the image isn't in the local storage.
func (m *kubeGenericRuntimeManager) GetImageRef(image kubecontainer.ImageSpec) (string, error) {
status, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: &image.Image})
status, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: image.Image})
if err != nil {
glog.Errorf("ImageStatus for image %q failed: %v", image, err)
return "", err
@ -90,7 +90,7 @@ func (m *kubeGenericRuntimeManager) GetImageRef(image kubecontainer.ImageSpec) (
return "", nil
}
imageRef := status.GetId()
imageRef := status.Id
if len(status.RepoDigests) > 0 {
imageRef = status.RepoDigests[0]
}
@ -109,8 +109,8 @@ func (m *kubeGenericRuntimeManager) ListImages() ([]kubecontainer.Image, error)
for _, img := range allImages {
images = append(images, kubecontainer.Image{
ID: img.GetId(),
Size: int64(img.GetSize_()),
ID: img.Id,
Size: int64(img.Size_),
RepoTags: img.RepoTags,
RepoDigests: img.RepoDigests,
})
@ -121,7 +121,7 @@ func (m *kubeGenericRuntimeManager) ListImages() ([]kubecontainer.Image, error)
// RemoveImage removes the specified image.
func (m *kubeGenericRuntimeManager) RemoveImage(image kubecontainer.ImageSpec) error {
err := m.imageService.RemoveImage(&runtimeapi.ImageSpec{Image: &image.Image})
err := m.imageService.RemoveImage(&runtimeapi.ImageSpec{Image: image.Image})
if err != nil {
glog.Errorf("Remove image %q failed: %v", image.Image, err)
return err
@ -142,7 +142,7 @@ func (m *kubeGenericRuntimeManager) ImageStats() (*kubecontainer.ImageStats, err
}
stats := &kubecontainer.ImageStats{}
for _, img := range allImages {
stats.TotalStorageBytes += img.GetSize_()
stats.TotalStorageBytes += img.Size_
}
return stats, nil
}

View File

@ -156,18 +156,18 @@ func NewKubeGenericRuntimeManager(
// Only matching kubeRuntimeAPIVersion is supported now
// TODO: Runtime API machinery is under discussion at https://github.com/kubernetes/kubernetes/issues/28642
if typedVersion.GetVersion() != kubeRuntimeAPIVersion {
if typedVersion.Version != kubeRuntimeAPIVersion {
glog.Errorf("Runtime api version %s is not supported, only %s is supported now",
typedVersion.GetVersion(),
typedVersion.Version,
kubeRuntimeAPIVersion)
return nil, ErrVersionNotSupported
}
kubeRuntimeManager.runtimeName = typedVersion.GetRuntimeName()
kubeRuntimeManager.runtimeName = typedVersion.RuntimeName
glog.Infof("Container runtime %s initialized, version: %s, apiVersion: %s",
typedVersion.GetRuntimeName(),
typedVersion.GetRuntimeVersion(),
typedVersion.GetRuntimeApiVersion())
typedVersion.RuntimeName,
typedVersion.RuntimeVersion,
typedVersion.RuntimeApiVersion)
// If the container logs directory does not exist, create it.
// TODO: create podLogsRootDirectory at kubelet.go when kubelet is refactored to
@ -224,7 +224,7 @@ func (m *kubeGenericRuntimeManager) Version() (kubecontainer.Version, error) {
return nil, err
}
return newRuntimeVersion(typedVersion.GetVersion())
return newRuntimeVersion(typedVersion.Version)
}
// APIVersion returns the cached API version information of the container
@ -237,7 +237,7 @@ func (m *kubeGenericRuntimeManager) APIVersion() (kubecontainer.Version, error)
}
typedVersion := versionObject.(*runtimeapi.VersionResponse)
return newRuntimeVersion(typedVersion.GetRuntimeApiVersion())
return newRuntimeVersion(typedVersion.RuntimeApiVersion)
}
// Status returns the status of the runtime. An error is returned if the Status
@ -265,12 +265,12 @@ func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, err
glog.V(4).Infof("Sandbox does not have metadata: %+v", s)
continue
}
podUID := kubetypes.UID(s.Metadata.GetUid())
podUID := kubetypes.UID(s.Metadata.Uid)
if _, ok := pods[podUID]; !ok {
pods[podUID] = &kubecontainer.Pod{
ID: podUID,
Name: s.Metadata.GetName(),
Namespace: s.Metadata.GetNamespace(),
Name: s.Metadata.Name,
Namespace: s.Metadata.Namespace,
}
}
p := pods[podUID]
@ -372,26 +372,26 @@ func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *v1.Pod, podStatus *ku
readySandboxCount := 0
for _, s := range podStatus.SandboxStatuses {
if s.GetState() == runtimeapi.PodSandboxState_SANDBOX_READY {
if s.State == runtimeapi.PodSandboxState_SANDBOX_READY {
readySandboxCount++
}
}
// Needs to create a new sandbox when readySandboxCount > 1 or the ready sandbox is not the latest one.
sandboxStatus := podStatus.SandboxStatuses[0]
if readySandboxCount > 1 || sandboxStatus.GetState() != runtimeapi.PodSandboxState_SANDBOX_READY {
if readySandboxCount > 1 || sandboxStatus.State != runtimeapi.PodSandboxState_SANDBOX_READY {
glog.V(2).Infof("No ready sandbox for pod %q can be found. Need to start a new one", format.Pod(pod))
return true, sandboxStatus.Metadata.GetAttempt() + 1, sandboxStatus.GetId()
return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id
}
// Needs to create a new sandbox when network namespace changed.
if sandboxStatus.Linux != nil && sandboxStatus.Linux.Namespaces.Options != nil &&
sandboxStatus.Linux.Namespaces.Options.GetHostNetwork() != kubecontainer.IsHostNetworkPod(pod) {
sandboxStatus.Linux.Namespaces.Options.HostNetwork != kubecontainer.IsHostNetworkPod(pod) {
glog.V(2).Infof("Sandbox for pod %q has changed. Need to start a new one", format.Pod(pod))
return true, sandboxStatus.Metadata.GetAttempt() + 1, ""
return true, sandboxStatus.Metadata.Attempt + 1, ""
}
return false, sandboxStatus.Metadata.GetAttempt(), sandboxStatus.GetId()
return false, sandboxStatus.Metadata.Attempt, sandboxStatus.Id
}
// checkAndKeepInitContainers keeps all successfully completed init containers. If there
@ -794,10 +794,8 @@ func (m *kubeGenericRuntimeManager) isHostNetwork(podSandBoxID string, pod *v1.P
return false, err
}
if podStatus.Linux != nil && podStatus.Linux.Namespaces != nil && podStatus.Linux.Namespaces.Options != nil {
if podStatus.Linux.Namespaces.Options.HostNetwork != nil {
return podStatus.Linux.Namespaces.Options.GetHostNetwork(), nil
}
if nsOpts := podStatus.GetLinux().GetNamespaces().GetOptions(); nsOpts != nil {
return nsOpts.HostNetwork, nil
}
return false, nil
@ -844,7 +842,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp
sandboxStatuses[idx] = podSandboxStatus
// Only get pod IP from latest sandbox
if idx == 0 && podSandboxStatus.GetState() == runtimeapi.PodSandboxState_SANDBOX_READY {
if idx == 0 && podSandboxStatus.State == runtimeapi.PodSandboxState_SANDBOX_READY {
podIP = m.determinePodSandboxIP(namespace, name, podSandboxStatus)
}
}
@ -900,7 +898,7 @@ func (m *kubeGenericRuntimeManager) UpdatePodCIDR(podCIDR string) error {
return m.runtimeService.UpdateRuntimeConfig(
&runtimeapi.RuntimeConfig{
NetworkConfig: &runtimeapi.NetworkConfig{
PodCidr: &podCIDR,
PodCidr: podCIDR,
},
})
}

View File

@ -119,12 +119,12 @@ func makeFakePodSandbox(t *testing.T, m *kubeGenericRuntimeManager, template san
podSandboxID := apitest.BuildSandboxName(config.Metadata)
return &apitest.FakePodSandbox{
PodSandboxStatus: runtimeapi.PodSandboxStatus{
Id: &podSandboxID,
Id: podSandboxID,
Metadata: config.Metadata,
State: &template.state,
CreatedAt: &template.createdAt,
State: template.state,
CreatedAt: template.createdAt,
Network: &runtimeapi.PodSandboxNetworkStatus{
Ip: &apitest.FakePodSandboxIP,
Ip: apitest.FakePodSandboxIP,
},
Labels: config.Labels,
},
@ -151,15 +151,15 @@ func makeFakeContainer(t *testing.T, m *kubeGenericRuntimeManager, template cont
podSandboxID := apitest.BuildSandboxName(sandboxConfig.Metadata)
containerID := apitest.BuildContainerName(containerConfig.Metadata, podSandboxID)
imageRef := containerConfig.Image.GetImage()
imageRef := containerConfig.Image.Image
return &apitest.FakeContainer{
ContainerStatus: runtimeapi.ContainerStatus{
Id: &containerID,
Id: containerID,
Metadata: containerConfig.Metadata,
Image: containerConfig.Image,
ImageRef: &imageRef,
CreatedAt: &template.createdAt,
State: &template.state,
ImageRef: imageRef,
CreatedAt: template.createdAt,
State: template.state,
Labels: containerConfig.Labels,
Annotations: containerConfig.Annotations,
},
@ -223,7 +223,7 @@ func verifyPods(a, b []*kubecontainer.Pod) bool {
func verifyFakeContainerList(fakeRuntime *apitest.FakeRuntimeService, expected []string) ([]string, bool) {
actual := []string{}
for _, c := range fakeRuntime.Containers {
actual = append(actual, c.GetId())
actual = append(actual, c.Id)
}
sort.Sort(sort.StringSlice(actual))
sort.Sort(sort.StringSlice(expected))
@ -411,7 +411,7 @@ func TestGetPodContainerID(t *testing.T) {
Sandboxes: []*kubecontainer.Container{sandbox},
}
actual, err := m.GetPodContainerID(expectedPod)
assert.Equal(t, fakeSandbox.GetId(), actual.ID)
assert.Equal(t, fakeSandbox.Id, actual.ID)
}
func TestGetNetNS(t *testing.T) {
@ -441,7 +441,7 @@ func TestGetNetNS(t *testing.T) {
// Set fake sandbox and fake containers to fakeRuntime.
sandbox, _ := makeAndSetFakePod(t, m, fakeRuntime, pod)
actual, err := m.GetNetNS(kubecontainer.ContainerID{ID: sandbox.GetId()})
actual, err := m.GetNetNS(kubecontainer.ContainerID{ID: sandbox.Id})
assert.Equal(t, "", actual)
assert.Equal(t, "not supported", err.Error())
}
@ -498,7 +498,7 @@ func TestKillPod(t *testing.T) {
Sandboxes: []*kubecontainer.Container{
{
ID: kubecontainer.ContainerID{
ID: fakeSandbox.GetId(),
ID: fakeSandbox.Id,
Type: apitest.FakeRuntimeName,
},
},
@ -510,10 +510,10 @@ func TestKillPod(t *testing.T) {
assert.Equal(t, 2, len(fakeRuntime.Containers))
assert.Equal(t, 1, len(fakeRuntime.Sandboxes))
for _, sandbox := range fakeRuntime.Sandboxes {
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, sandbox.GetState())
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, sandbox.State)
}
for _, c := range fakeRuntime.Containers {
assert.Equal(t, runtimeapi.ContainerState_CONTAINER_EXITED, c.GetState())
assert.Equal(t, runtimeapi.ContainerState_CONTAINER_EXITED, c.State)
}
}
@ -551,10 +551,10 @@ func TestSyncPod(t *testing.T) {
assert.Equal(t, 2, len(fakeImage.Images))
assert.Equal(t, 1, len(fakeRuntime.Sandboxes))
for _, sandbox := range fakeRuntime.Sandboxes {
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_READY, sandbox.GetState())
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_READY, sandbox.State)
}
for _, c := range fakeRuntime.Containers {
assert.Equal(t, runtimeapi.ContainerState_CONTAINER_RUNNING, c.GetState())
assert.Equal(t, runtimeapi.ContainerState_CONTAINER_RUNNING, c.State)
}
}
@ -589,7 +589,7 @@ func TestPruneInitContainers(t *testing.T) {
keep := map[kubecontainer.ContainerID]int{}
m.pruneInitContainersBeforeStart(pod, podStatus, keep)
expectedContainers := []string{fakes[0].GetId(), fakes[2].GetId()}
expectedContainers := []string{fakes[0].Id, fakes[2].Id}
if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok {
t.Errorf("expected %q, got %q", expectedContainers, actual)
}
@ -635,11 +635,11 @@ func TestSyncPodWithInitContainers(t *testing.T) {
buildContainerID := func(pod *v1.Pod, container v1.Container) string {
uid := string(pod.UID)
sandboxID := apitest.BuildSandboxName(&runtimeapi.PodSandboxMetadata{
Name: &pod.Name,
Uid: &uid,
Namespace: &pod.Namespace,
Name: pod.Name,
Uid: uid,
Namespace: pod.Namespace,
})
return apitest.BuildContainerName(&runtimeapi.ContainerMetadata{Name: &container.Name}, sandboxID)
return apitest.BuildContainerName(&runtimeapi.ContainerMetadata{Name: container.Name}, sandboxID)
}
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)

View File

@ -41,7 +41,7 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32
}
// Create pod logs directory
err = m.osInterface.MkdirAll(podSandboxConfig.GetLogDirectory(), 0755)
err = m.osInterface.MkdirAll(podSandboxConfig.LogDirectory, 0755)
if err != nil {
message := fmt.Sprintf("Create pod log directory for pod %q failed: %v", format.Pod(pod), err)
glog.Errorf(message)
@ -65,10 +65,10 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp
podUID := string(pod.UID)
podSandboxConfig := &runtimeapi.PodSandboxConfig{
Metadata: &runtimeapi.PodSandboxMetadata{
Name: &pod.Name,
Namespace: &pod.Namespace,
Uid: &podUID,
Attempt: &attempt,
Name: pod.Name,
Namespace: pod.Namespace,
Uid: podUID,
Attempt: attempt,
},
Labels: newPodLabels(pod),
Annotations: newPodAnnotations(pod),
@ -89,11 +89,11 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp
if err != nil {
return nil, err
}
podSandboxConfig.Hostname = &hostname
podSandboxConfig.Hostname = hostname
}
logDir := buildPodLogsDirectory(pod.UID)
podSandboxConfig.LogDirectory = &logDir
podSandboxConfig.LogDirectory = logDir
cgroupParent := ""
portMappings := []*runtimeapi.PortMapping{}
@ -110,10 +110,10 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp
containerPort := int32(port.ContainerPort)
protocol := toRuntimeProtocol(port.Protocol)
portMappings = append(portMappings, &runtimeapi.PortMapping{
HostIp: &port.HostIP,
HostPort: &hostPort,
ContainerPort: &containerPort,
Protocol: &protocol,
HostIp: port.HostIP,
HostPort: hostPort,
ContainerPort: containerPort,
Protocol: protocol,
})
}
@ -131,20 +131,21 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp
// generatePodSandboxLinuxConfig generates LinuxPodSandboxConfig from v1.Pod.
func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod, cgroupParent string) *runtimeapi.LinuxPodSandboxConfig {
lc := &runtimeapi.LinuxPodSandboxConfig{
SecurityContext: &runtimeapi.LinuxSandboxSecurityContext{},
}
if cgroupParent != "" {
lc.CgroupParent = &cgroupParent
CgroupParent: cgroupParent,
SecurityContext: &runtimeapi.LinuxSandboxSecurityContext{
Privileged: kubecontainer.HasPrivilegedContainer(pod),
},
}
if pod.Spec.SecurityContext != nil {
sc := pod.Spec.SecurityContext
lc.SecurityContext.RunAsUser = sc.RunAsUser
if sc.RunAsUser != nil {
lc.SecurityContext.RunAsUser = &runtimeapi.Int64Value{Value: *sc.RunAsUser}
}
lc.SecurityContext.NamespaceOptions = &runtimeapi.NamespaceOption{
HostNetwork: &pod.Spec.HostNetwork,
HostIpc: &pod.Spec.HostIPC,
HostPid: &pod.Spec.HostPID,
HostNetwork: pod.Spec.HostNetwork,
HostIpc: pod.Spec.HostIPC,
HostPid: pod.Spec.HostPID,
}
if sc.FSGroup != nil {
@ -158,19 +159,14 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod, c
}
if sc.SELinuxOptions != nil {
lc.SecurityContext.SelinuxOptions = &runtimeapi.SELinuxOption{
User: &sc.SELinuxOptions.User,
Role: &sc.SELinuxOptions.Role,
Type: &sc.SELinuxOptions.Type,
Level: &sc.SELinuxOptions.Level,
User: sc.SELinuxOptions.User,
Role: sc.SELinuxOptions.Role,
Type: sc.SELinuxOptions.Type,
Level: sc.SELinuxOptions.Level,
}
}
}
if kubecontainer.HasPrivilegedContainer(pod) {
privileged := true
lc.SecurityContext.Privileged = &privileged
}
return lc
}
@ -180,7 +176,9 @@ func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeapi
if !all {
readyState := runtimeapi.PodSandboxState_SANDBOX_READY
filter = &runtimeapi.PodSandboxFilter{
State: &readyState,
State: &runtimeapi.PodSandboxStateValue{
State: readyState,
},
}
}
@ -194,7 +192,7 @@ func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeapi
for _, s := range resp {
if !isManagedByKubelet(s.Labels) {
glog.V(5).Infof("Sandbox %s is not managed by kubelet", kubecontainer.BuildPodFullName(
s.Metadata.GetName(), s.Metadata.GetNamespace()))
s.Metadata.Name, s.Metadata.Namespace))
continue
}
@ -210,7 +208,7 @@ func (m *kubeGenericRuntimeManager) determinePodSandboxIP(podNamespace, podName
glog.Warningf("Pod Sandbox status doesn't have network information, cannot report IP")
return ""
}
ip := podSandbox.Network.GetIp()
ip := podSandbox.Network.Ip
if net.ParseIP(ip) == nil {
glog.Warningf("Pod Sandbox reported an unparseable IP %v", ip)
return ""
@ -222,9 +220,13 @@ func (m *kubeGenericRuntimeManager) determinePodSandboxIP(podNamespace, podName
// Param state could be nil in order to get all sandboxes belonging to same pod.
func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(podUID kubetypes.UID, state *runtimeapi.PodSandboxState) ([]string, error) {
filter := &runtimeapi.PodSandboxFilter{
State: state,
LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(podUID)},
}
if state != nil {
filter.State = &runtimeapi.PodSandboxStateValue{
State: *state,
}
}
sandboxes, err := m.runtimeService.ListPodSandbox(filter)
if err != nil {
glog.Errorf("ListPodSandbox with pod UID %q failed: %v", podUID, err)
@ -239,7 +241,7 @@ func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(podUID kubetypes.UID, s
sandboxIDs := make([]string, len(sandboxes))
sort.Sort(podSandboxByCreated(sandboxes))
for i, s := range sandboxes {
sandboxIDs[i] = s.GetId()
sandboxIDs[i] = s.Id
}
return sandboxIDs, nil
@ -256,11 +258,11 @@ func (m *kubeGenericRuntimeManager) GetPortForward(podName, podNamespace string,
}
// TODO: Port is unused for now, but we may need it in the future.
req := &runtimeapi.PortForwardRequest{
PodSandboxId: &sandboxIDs[0],
PodSandboxId: sandboxIDs[0],
}
resp, err := m.runtimeService.PortForward(req)
if err != nil {
return nil, err
}
return url.Parse(resp.GetUrl())
return url.Parse(resp.Url)
}

View File

@ -58,7 +58,7 @@ func TestCreatePodSandbox(t *testing.T) {
id, _, err := m.createPodSandbox(pod, 1)
assert.NoError(t, err)
fakeRuntime.AssertCalls([]string{"RunPodSandbox"})
sandboxes, err := fakeRuntime.ListPodSandbox(&runtimeapi.PodSandboxFilter{Id: &id})
sandboxes, err := fakeRuntime.ListPodSandbox(&runtimeapi.PodSandboxFilter{Id: id})
assert.NoError(t, err)
assert.Equal(t, len(sandboxes), 1)
// TODO Check pod sandbox configuration

View File

@ -25,7 +25,7 @@ import (
)
// determineEffectiveSecurityContext gets container's security context from v1.Pod and v1.Container.
func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Pod, container *v1.Container, uid *int64, username *string) *runtimeapi.LinuxContainerSecurityContext {
func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Pod, container *v1.Container, uid *int64, username string) *runtimeapi.LinuxContainerSecurityContext {
effectiveSc := securitycontext.DetermineEffectiveSecurityContext(pod, container)
synthesized := convertToRuntimeSecurityContext(effectiveSc)
if synthesized == nil {
@ -34,7 +34,9 @@ func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Po
// set RunAsUser.
if synthesized.RunAsUser == nil {
synthesized.RunAsUser = uid
if uid != nil {
synthesized.RunAsUser = &runtimeapi.Int64Value{Value: *uid}
}
synthesized.RunAsUsername = username
}
@ -44,9 +46,9 @@ func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Po
return synthesized
}
synthesized.NamespaceOptions = &runtimeapi.NamespaceOption{
HostNetwork: &pod.Spec.HostNetwork,
HostIpc: &pod.Spec.HostIPC,
HostPid: &pod.Spec.HostPID,
HostNetwork: pod.Spec.HostNetwork,
HostIpc: pod.Spec.HostIPC,
HostPid: pod.Spec.HostPID,
}
if podSc.FSGroup != nil {
synthesized.SupplementalGroups = append(synthesized.SupplementalGroups, *podSc.FSGroup)
@ -88,13 +90,21 @@ func convertToRuntimeSecurityContext(securityContext *v1.SecurityContext) *runti
return nil
}
return &runtimeapi.LinuxContainerSecurityContext{
RunAsUser: securityContext.RunAsUser,
Privileged: securityContext.Privileged,
ReadonlyRootfs: securityContext.ReadOnlyRootFilesystem,
sc := &runtimeapi.LinuxContainerSecurityContext{
Capabilities: convertToRuntimeCapabilities(securityContext.Capabilities),
SelinuxOptions: convertToRuntimeSELinuxOption(securityContext.SELinuxOptions),
}
if securityContext.RunAsUser != nil {
sc.RunAsUser = &runtimeapi.Int64Value{Value: *securityContext.RunAsUser}
}
if securityContext.Privileged != nil {
sc.Privileged = *securityContext.Privileged
}
if securityContext.ReadOnlyRootFilesystem != nil {
sc.ReadonlyRootfs = *securityContext.ReadOnlyRootFilesystem
}
return sc
}
// convertToRuntimeSELinuxOption converts v1.SELinuxOptions to runtimeapi.SELinuxOption.
@ -104,10 +114,10 @@ func convertToRuntimeSELinuxOption(opts *v1.SELinuxOptions) *runtimeapi.SELinuxO
}
return &runtimeapi.SELinuxOption{
User: &opts.User,
Role: &opts.Role,
Type: &opts.Type,
Level: &opts.Level,
User: opts.User,
Role: opts.Role,
Type: opts.Type,
Level: opts.Level,
}
}