fix golint issues in pkg/kubelet/container

This commit is contained in:
Sergey Kanzhelev 2020-06-19 15:48:08 +00:00
parent 2f2923fc33
commit ee53488f19
28 changed files with 122 additions and 133 deletions

View File

@ -49,7 +49,7 @@ func getTestPodIDAndStatus(numContainers int) (types.UID, *PodStatus) {
status = &PodStatus{ID: id} status = &PodStatus{ID: id}
} }
for i := 0; i < numContainers; i++ { for i := 0; i < numContainers; i++ {
status.ContainerStatuses = append(status.ContainerStatuses, &ContainerStatus{Name: string(i)}) status.ContainerStatuses = append(status.ContainerStatuses, &Status{Name: string(i)})
} }
return id, status return id, status
} }

View File

@ -23,8 +23,8 @@ import (
"k8s.io/klog/v2" "k8s.io/klog/v2"
) )
// ContainerGCPolicy specifies a policy for garbage collecting containers. // GCPolicy specifies a policy for garbage collecting containers.
type ContainerGCPolicy struct { type GCPolicy struct {
// Minimum age at which a container can be garbage collected, zero for no limit. // Minimum age at which a container can be garbage collected, zero for no limit.
MinAge time.Duration MinAge time.Duration
@ -36,10 +36,10 @@ type ContainerGCPolicy struct {
MaxContainers int MaxContainers int
} }
// ContainerGC manages garbage collection of dead containers. // GC manages garbage collection of dead containers.
// //
// Implementation is thread-compatible. // Implementation is thread-compatible.
type ContainerGC interface { type GC interface {
// Garbage collect containers. // Garbage collect containers.
GarbageCollect() error GarbageCollect() error
// Deletes all unused containers, including containers belonging to pods that are terminated but not deleted // Deletes all unused containers, including containers belonging to pods that are terminated but not deleted
@ -58,14 +58,14 @@ type realContainerGC struct {
runtime Runtime runtime Runtime
// Policy for garbage collection. // Policy for garbage collection.
policy ContainerGCPolicy policy GCPolicy
// sourcesReadyProvider provides the readiness of kubelet configuration sources. // sourcesReadyProvider provides the readiness of kubelet configuration sources.
sourcesReadyProvider SourcesReadyProvider sourcesReadyProvider SourcesReadyProvider
} }
// NewContainerGC creates a new instance of ContainerGC with the specified policy. // NewContainerGC creates a new instance of GC with the specified policy.
func NewContainerGC(runtime Runtime, policy ContainerGCPolicy, sourcesReadyProvider SourcesReadyProvider) (ContainerGC, error) { func NewContainerGC(runtime Runtime, policy GCPolicy, sourcesReadyProvider SourcesReadyProvider) (GC, error) {
if policy.MinAge < 0 { if policy.MinAge < 0 {
return nil, fmt.Errorf("invalid minimum garbage collection age: %v", policy.MinAge) return nil, fmt.Errorf("invalid minimum garbage collection age: %v", policy.MinAge)
} }

View File

@ -107,9 +107,9 @@ func HashContainer(container *v1.Container) uint64 {
return uint64(hash.Sum32()) return uint64(hash.Sum32())
} }
// EnvVarsToMap constructs a map of environment name to value from a slice // envVarsToMap constructs a map of environment name to value from a slice
// of env vars. // of env vars.
func EnvVarsToMap(envs []EnvVar) map[string]string { func envVarsToMap(envs []EnvVar) map[string]string {
result := map[string]string{} result := map[string]string{}
for _, env := range envs { for _, env := range envs {
result[env.Name] = env.Value result[env.Name] = env.Value
@ -117,9 +117,9 @@ func EnvVarsToMap(envs []EnvVar) map[string]string {
return result return result
} }
// V1EnvVarsToMap constructs a map of environment name to value from a slice // v1EnvVarsToMap constructs a map of environment name to value from a slice
// of env vars. // of env vars.
func V1EnvVarsToMap(envs []v1.EnvVar) map[string]string { func v1EnvVarsToMap(envs []v1.EnvVar) map[string]string {
result := map[string]string{} result := map[string]string{}
for _, env := range envs { for _, env := range envs {
result[env.Name] = env.Value result[env.Name] = env.Value
@ -132,7 +132,7 @@ func V1EnvVarsToMap(envs []v1.EnvVar) map[string]string {
// container environment definitions. This does *not* include valueFrom substitutions. // container environment definitions. This does *not* include valueFrom substitutions.
// TODO: callers should use ExpandContainerCommandAndArgs with a fully resolved list of environment. // TODO: callers should use ExpandContainerCommandAndArgs with a fully resolved list of environment.
func ExpandContainerCommandOnlyStatic(containerCommand []string, envs []v1.EnvVar) (command []string) { func ExpandContainerCommandOnlyStatic(containerCommand []string, envs []v1.EnvVar) (command []string) {
mapping := expansion.MappingFuncFor(V1EnvVarsToMap(envs)) mapping := expansion.MappingFuncFor(v1EnvVarsToMap(envs))
if len(containerCommand) != 0 { if len(containerCommand) != 0 {
for _, cmd := range containerCommand { for _, cmd := range containerCommand {
command = append(command, expansion.Expand(cmd, mapping)) command = append(command, expansion.Expand(cmd, mapping))
@ -144,7 +144,7 @@ func ExpandContainerCommandOnlyStatic(containerCommand []string, envs []v1.EnvVa
// ExpandContainerVolumeMounts expands the subpath of the given VolumeMount by replacing variable references with the values of given EnvVar. // ExpandContainerVolumeMounts expands the subpath of the given VolumeMount by replacing variable references with the values of given EnvVar.
func ExpandContainerVolumeMounts(mount v1.VolumeMount, envs []EnvVar) (string, error) { func ExpandContainerVolumeMounts(mount v1.VolumeMount, envs []EnvVar) (string, error) {
envmap := EnvVarsToMap(envs) envmap := envVarsToMap(envs)
missingKeys := sets.NewString() missingKeys := sets.NewString()
expanded := expansion.Expand(mount.SubPathExpr, func(key string) string { expanded := expansion.Expand(mount.SubPathExpr, func(key string) string {
value, ok := envmap[key] value, ok := envmap[key]
@ -162,7 +162,7 @@ func ExpandContainerVolumeMounts(mount v1.VolumeMount, envs []EnvVar) (string, e
// ExpandContainerCommandAndArgs expands the given Container's command by replacing variable references `with the values of given EnvVar. // ExpandContainerCommandAndArgs expands the given Container's command by replacing variable references `with the values of given EnvVar.
func ExpandContainerCommandAndArgs(container *v1.Container, envs []EnvVar) (command []string, args []string) { func ExpandContainerCommandAndArgs(container *v1.Container, envs []EnvVar) (command []string, args []string) {
mapping := expansion.MappingFuncFor(EnvVarsToMap(envs)) mapping := expansion.MappingFuncFor(envVarsToMap(envs))
if len(container.Command) != 0 { if len(container.Command) != 0 {
for _, cmd := range container.Command { for _, cmd := range container.Command {
@ -262,11 +262,11 @@ func ConvertPodStatusToRunningPod(runtimeName string, podStatus *PodStatus) Pod
} }
// SandboxToContainerState converts runtimeapi.PodSandboxState to // SandboxToContainerState converts runtimeapi.PodSandboxState to
// kubecontainer.ContainerState. // kubecontainer.State.
// This is only needed because we need to return sandboxes as if they were // This is only needed because we need to return sandboxes as if they were
// kubecontainer.Containers to avoid substantial changes to PLEG. // kubecontainer.Containers to avoid substantial changes to PLEG.
// TODO: Remove this once it becomes obsolete. // TODO: Remove this once it becomes obsolete.
func SandboxToContainerState(state runtimeapi.PodSandboxState) ContainerState { func SandboxToContainerState(state runtimeapi.PodSandboxState) State {
switch state { switch state {
case runtimeapi.PodSandboxState_SANDBOX_READY: case runtimeapi.PodSandboxState_SANDBOX_READY:
return ContainerStateRunning return ContainerStateRunning

View File

@ -24,7 +24,7 @@ import (
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing" featuregatetesting "k8s.io/component-base/featuregate/testing"
@ -43,7 +43,7 @@ func TestEnvVarsToMap(t *testing.T) {
}, },
} }
varMap := EnvVarsToMap(vars) varMap := envVarsToMap(vars)
if e, a := len(vars), len(varMap); e != a { if e, a := len(vars), len(varMap); e != a {
t.Errorf("Unexpected map length; expected: %d, got %d", e, a) t.Errorf("Unexpected map length; expected: %d, got %d", e, a)
@ -414,7 +414,7 @@ func TestShouldContainerBeRestarted(t *testing.T) {
ID: pod.UID, ID: pod.UID,
Name: pod.Name, Name: pod.Name,
Namespace: pod.Namespace, Namespace: pod.Namespace,
ContainerStatuses: []*ContainerStatus{ ContainerStatuses: []*Status{
{ {
Name: "alive", Name: "alive",
State: ContainerStateRunning, State: ContainerStateRunning,

View File

@ -94,7 +94,7 @@ type Runtime interface {
// If evictNonDeletedPods is set to true, containers and sandboxes belonging to pods // If evictNonDeletedPods is set to true, containers and sandboxes belonging to pods
// that are terminated, but not deleted will be evicted. Otherwise, only deleted pods will be GC'd. // that are terminated, but not deleted will be evicted. Otherwise, only deleted pods will be GC'd.
// TODO: Revisit this method and make it cleaner. // TODO: Revisit this method and make it cleaner.
GarbageCollect(gcPolicy ContainerGCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error GarbageCollect(gcPolicy GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error
// Syncs the running pod into the desired pod. // Syncs the running pod into the desired pod.
SyncPod(pod *v1.Pod, podStatus *PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) PodSyncResult SyncPod(pod *v1.Pod, podStatus *PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) PodSyncResult
// KillPod kills all the containers of a pod. Pod may be nil, running pod must not be. // KillPod kills all the containers of a pod. Pod may be nil, running pod must not be.
@ -147,13 +147,13 @@ type ImageService interface {
ImageStats() (*ImageStats, error) ImageStats() (*ImageStats, error)
} }
// ContainerAttacher interface allows to attach a container. // Attacher interface allows to attach a container.
type ContainerAttacher interface { type Attacher interface {
AttachContainer(id ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) (err error) AttachContainer(id ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) (err error)
} }
// ContainerCommandRunner interface allows to run command in a container. // CommandRunner interface allows to run command in a container.
type ContainerCommandRunner interface { type CommandRunner interface {
// RunInContainer synchronously executes the command in the container, and returns the output. // RunInContainer synchronously executes the command in the container, and returns the output.
// If the command completes with a non-0 exit code, a k8s.io/utils/exec.ExitError will be returned. // If the command completes with a non-0 exit code, a k8s.io/utils/exec.ExitError will be returned.
RunInContainer(id ContainerID, cmd []string, timeout time.Duration) ([]byte, error) RunInContainer(id ContainerID, cmd []string, timeout time.Duration) ([]byte, error)
@ -250,18 +250,18 @@ func (id DockerID) ContainerID() ContainerID {
} }
} }
// ContainerState represents the state of a container // State represents the state of a container
type ContainerState string type State string
const ( const (
// ContainerStateCreated indicates a container that has been created (e.g. with docker create) but not started. // ContainerStateCreated indicates a container that has been created (e.g. with docker create) but not started.
ContainerStateCreated ContainerState = "created" ContainerStateCreated State = "created"
// ContainerStateRunning indicates a currently running container. // ContainerStateRunning indicates a currently running container.
ContainerStateRunning ContainerState = "running" ContainerStateRunning State = "running"
// ContainerStateExited indicates a container that ran and completed ("stopped" in other contexts, although a created container is technically also "stopped"). // ContainerStateExited indicates a container that ran and completed ("stopped" in other contexts, although a created container is technically also "stopped").
ContainerStateExited ContainerState = "exited" ContainerStateExited State = "exited"
// ContainerStateUnknown encompasses all the states that we currently don't care about (like restarting, paused, dead). // ContainerStateUnknown encompasses all the states that we currently don't care about (like restarting, paused, dead).
ContainerStateUnknown ContainerState = "unknown" ContainerStateUnknown State = "unknown"
) )
// Container provides the runtime information for a container, such as ID, hash, // Container provides the runtime information for a container, such as ID, hash,
@ -282,7 +282,7 @@ type Container struct {
// not managed by kubelet. // not managed by kubelet.
Hash uint64 Hash uint64
// State is the state of the container. // State is the state of the container.
State ContainerState State State
} }
// PodStatus represents the status of the pod and its containers. // PodStatus represents the status of the pod and its containers.
@ -297,20 +297,20 @@ type PodStatus struct {
// All IPs assigned to this pod // All IPs assigned to this pod
IPs []string IPs []string
// Status of containers in the pod. // Status of containers in the pod.
ContainerStatuses []*ContainerStatus ContainerStatuses []*Status
// Status of the pod sandbox. // Status of the pod sandbox.
// Only for kuberuntime now, other runtime may keep it nil. // Only for kuberuntime now, other runtime may keep it nil.
SandboxStatuses []*runtimeapi.PodSandboxStatus SandboxStatuses []*runtimeapi.PodSandboxStatus
} }
// ContainerStatus represents the status of a container. // Status represents the status of a container.
type ContainerStatus struct { type Status struct {
// ID of the container. // ID of the container.
ID ContainerID ID ContainerID
// Name of the container. // Name of the container.
Name string Name string
// Status of the container. // Status of the container.
State ContainerState State State
// Creation time of the container. // Creation time of the container.
CreatedAt time.Time CreatedAt time.Time
// Start time of the container. // Start time of the container.
@ -337,7 +337,7 @@ type ContainerStatus struct {
// FindContainerStatusByName returns container status in the pod status with the given name. // FindContainerStatusByName returns container status in the pod status with the given name.
// When there are multiple containers' statuses with the same name, the first match will be returned. // When there are multiple containers' statuses with the same name, the first match will be returned.
func (podStatus *PodStatus) FindContainerStatusByName(containerName string) *ContainerStatus { func (podStatus *PodStatus) FindContainerStatusByName(containerName string) *Status {
for _, containerStatus := range podStatus.ContainerStatuses { for _, containerStatus := range podStatus.ContainerStatuses {
if containerStatus.Name == containerName { if containerStatus.Name == containerName {
return containerStatus return containerStatus
@ -347,8 +347,8 @@ func (podStatus *PodStatus) FindContainerStatusByName(containerName string) *Con
} }
// GetRunningContainerStatuses returns container status of all the running containers in a pod // GetRunningContainerStatuses returns container status of all the running containers in a pod
func (podStatus *PodStatus) GetRunningContainerStatuses() []*ContainerStatus { func (podStatus *PodStatus) GetRunningContainerStatuses() []*Status {
runningContainerStatuses := []*ContainerStatus{} runningContainerStatuses := []*Status{}
for _, containerStatus := range podStatus.ContainerStatuses { for _, containerStatus := range podStatus.ContainerStatuses {
if containerStatus.State == ContainerStateRunning { if containerStatus.State == ContainerStateRunning {
runningContainerStatuses = append(runningContainerStatuses, containerStatus) runningContainerStatuses = append(runningContainerStatuses, containerStatus)
@ -643,7 +643,7 @@ func ParsePodFullName(podFullName string) (string, string, error) {
type Option func(Runtime) type Option func(Runtime)
// SortContainerStatusesByCreationTime sorts the container statuses by creation time. // SortContainerStatusesByCreationTime sorts the container statuses by creation time.
type SortContainerStatusesByCreationTime []*ContainerStatus type SortContainerStatusesByCreationTime []*Status
func (s SortContainerStatusesByCreationTime) Len() int { return len(s) } func (s SortContainerStatusesByCreationTime) Len() int { return len(s) }
func (s SortContainerStatusesByCreationTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s SortContainerStatusesByCreationTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }

View File

@ -39,10 +39,6 @@ var (
ErrRunContainer = errors.New("RunContainerError") ErrRunContainer = errors.New("RunContainerError")
// ErrKillContainer returned when runtime failed to kill any of pod's containers. // ErrKillContainer returned when runtime failed to kill any of pod's containers.
ErrKillContainer = errors.New("KillContainerError") ErrKillContainer = errors.New("KillContainerError")
// ErrVerifyNonRoot returned if the container or image will run as the root user.
ErrVerifyNonRoot = errors.New("VerifyNonRootError")
// ErrRunInitContainer returned when container init failed.
ErrRunInitContainer = errors.New("RunInitContainerError")
// ErrCreatePodSandbox returned when runtime failed to create a sandbox for pod. // ErrCreatePodSandbox returned when runtime failed to create a sandbox for pod.
ErrCreatePodSandbox = errors.New("CreatePodSandboxError") ErrCreatePodSandbox = errors.New("CreatePodSandboxError")
// ErrConfigPodSandbox returned when runetime failed to get pod sandbox config from pod. // ErrConfigPodSandbox returned when runetime failed to get pod sandbox config from pod.
@ -51,13 +47,6 @@ var (
ErrKillPodSandbox = errors.New("KillPodSandboxError") ErrKillPodSandbox = errors.New("KillPodSandboxError")
) )
var (
// ErrSetupNetwork returned when network setup failed.
ErrSetupNetwork = errors.New("SetupNetworkError")
// ErrTeardownNetwork returned when network tear down failed.
ErrTeardownNetwork = errors.New("TeardownNetworkError")
)
// SyncAction indicates different kind of actions in SyncPod() and KillPod(). Now there are only actions // SyncAction indicates different kind of actions in SyncPod() and KillPod(). Now there are only actions
// about start/kill container and setup/teardown network. // about start/kill container and setup/teardown network.
type SyncAction string type SyncAction string

View File

@ -348,7 +348,7 @@ func (f *FakeRuntime) RemoveImage(image kubecontainer.ImageSpec) error {
return f.Err return f.Err
} }
func (f *FakeRuntime) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, ready bool, evictNonDeletedPods bool) error { func (f *FakeRuntime) GarbageCollect(gcPolicy kubecontainer.GCPolicy, ready bool, evictNonDeletedPods bool) error {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
@ -406,7 +406,7 @@ type FakeContainerCommandRunner struct {
Cmd []string Cmd []string
} }
var _ kubecontainer.ContainerCommandRunner = &FakeContainerCommandRunner{} var _ kubecontainer.CommandRunner = &FakeContainerCommandRunner{}
func (f *FakeContainerCommandRunner) RunInContainer(containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) { func (f *FakeContainerCommandRunner) RunInContainer(containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) {
// record invoked values // record invoked values

View File

@ -137,7 +137,7 @@ func (r *Mock) PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWr
return args.Error(0) return args.Error(0)
} }
func (r *Mock) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, ready bool, evictNonDeletedPods bool) error { func (r *Mock) GarbageCollect(gcPolicy kubecontainer.GCPolicy, ready bool, evictNonDeletedPods bool) error {
args := r.Called(gcPolicy, ready, evictNonDeletedPods) args := r.Called(gcPolicy, ready, evictNonDeletedPods)
return args.Error(0) return args.Error(0)
} }

View File

@ -405,7 +405,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
} }
} }
containerGCPolicy := kubecontainer.ContainerGCPolicy{ containerGCPolicy := kubecontainer.GCPolicy{
MinAge: minimumGCAge.Duration, MinAge: minimumGCAge.Duration,
MaxPerPodContainer: int(maxPerPodContainerCount), MaxPerPodContainer: int(maxPerPodContainerCount),
MaxContainers: int(maxContainerCount), MaxContainers: int(maxContainerCount),
@ -870,7 +870,7 @@ type Kubelet struct {
// Optional, defaults to /logs/ from /var/log // Optional, defaults to /logs/ from /var/log
logServer http.Handler logServer http.Handler
// Optional, defaults to simple Docker implementation // Optional, defaults to simple Docker implementation
runner kubecontainer.ContainerCommandRunner runner kubecontainer.CommandRunner
// cAdvisor used for container information. // cAdvisor used for container information.
cadvisor cadvisor.Interface cadvisor cadvisor.Interface
@ -921,7 +921,7 @@ type Kubelet struct {
recorder record.EventRecorder recorder record.EventRecorder
// Policy for handling garbage collection of dead containers. // Policy for handling garbage collection of dead containers.
containerGC kubecontainer.ContainerGC containerGC kubecontainer.GC
// Manager for image garbage collection. // Manager for image garbage collection.
imageManager images.ImageGCManager imageManager images.ImageGCManager

View File

@ -1482,7 +1482,7 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontaine
// convertToAPIContainerStatuses converts the given internal container // convertToAPIContainerStatuses converts the given internal container
// statuses into API container statuses. // statuses into API container statuses.
func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecontainer.PodStatus, previousStatus []v1.ContainerStatus, containers []v1.Container, hasInitContainers, isInitContainer bool) []v1.ContainerStatus { func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecontainer.PodStatus, previousStatus []v1.ContainerStatus, containers []v1.Container, hasInitContainers, isInitContainer bool) []v1.ContainerStatus {
convertContainerStatus := func(cs *kubecontainer.ContainerStatus) *v1.ContainerStatus { convertContainerStatus := func(cs *kubecontainer.Status) *v1.ContainerStatus {
cid := cs.ID.String() cid := cs.ID.String()
status := &v1.ContainerStatus{ status := &v1.ContainerStatus{
Name: cs.Name, Name: cs.Name,

View File

@ -394,7 +394,7 @@ func TestRunInContainer(t *testing.T) {
actualOutput, err := kubelet.RunInContainer("podFoo_nsFoo", "", "containerFoo", cmd) actualOutput, err := kubelet.RunInContainer("podFoo_nsFoo", "", "containerFoo", cmd)
assert.Equal(t, containerID, fakeCommandRunner.ContainerID, "(testError=%v) ID", testError) assert.Equal(t, containerID, fakeCommandRunner.ContainerID, "(testError=%v) ID", testError)
assert.Equal(t, cmd, fakeCommandRunner.Cmd, "(testError=%v) command", testError) assert.Equal(t, cmd, fakeCommandRunner.Cmd, "(testError=%v) command", testError)
// this isn't 100% foolproof as a bug in a real ContainerCommandRunner where it fails to copy to stdout/stderr wouldn't be caught by this test // this isn't 100% foolproof as a bug in a real CommandRunner where it fails to copy to stdout/stderr wouldn't be caught by this test
assert.Equal(t, "foo", string(actualOutput), "(testError=%v) output", testError) assert.Equal(t, "foo", string(actualOutput), "(testError=%v) output", testError)
assert.Equal(t, err, testError, "(testError=%v) err", testError) assert.Equal(t, err, testError, "(testError=%v) err", testError)
} }

View File

@ -267,7 +267,7 @@ func newTestKubeletWithImageList(
ImageGCManager: imageGCManager, ImageGCManager: imageGCManager,
} }
kubelet.containerLogManager = logs.NewStubContainerLogManager() kubelet.containerLogManager = logs.NewStubContainerLogManager()
containerGCPolicy := kubecontainer.ContainerGCPolicy{ containerGCPolicy := kubecontainer.GCPolicy{
MinAge: time.Duration(0), MinAge: time.Duration(0),
MaxPerPodContainer: 1, MaxPerPodContainer: 1,
MaxContainers: -1, MaxContainers: -1,
@ -1401,13 +1401,13 @@ func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) {
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
numContainers := 10 numContainers := 10
expectedOrder := []string{} expectedOrder := []string{}
cStatuses := []*kubecontainer.ContainerStatus{} cStatuses := []*kubecontainer.Status{}
specContainerList := []v1.Container{} specContainerList := []v1.Container{}
for i := 0; i < numContainers; i++ { for i := 0; i < numContainers; i++ {
id := fmt.Sprintf("%v", i) id := fmt.Sprintf("%v", i)
containerName := fmt.Sprintf("%vcontainer", id) containerName := fmt.Sprintf("%vcontainer", id)
expectedOrder = append(expectedOrder, containerName) expectedOrder = append(expectedOrder, containerName)
cStatus := &kubecontainer.ContainerStatus{ cStatus := &kubecontainer.Status{
ID: kubecontainer.BuildContainerID("test", id), ID: kubecontainer.BuildContainerID("test", id),
Name: containerName, Name: containerName,
} }
@ -1415,7 +1415,7 @@ func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) {
if i%2 == 0 { if i%2 == 0 {
cStatuses = append(cStatuses, cStatus) cStatuses = append(cStatuses, cStatus)
} else { } else {
cStatuses = append([]*kubecontainer.ContainerStatus{cStatus}, cStatuses...) cStatuses = append([]*kubecontainer.Status{cStatus}, cStatuses...)
} }
specContainerList = append(specContainerList, v1.Container{Name: containerName}) specContainerList = append(specContainerList, v1.Container{Name: containerName})
} }
@ -1468,7 +1468,7 @@ func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
} }
tests := []struct { tests := []struct {
containers []v1.Container containers []v1.Container
statuses []*kubecontainer.ContainerStatus statuses []*kubecontainer.Status
reasons map[string]error reasons map[string]error
oldStatuses []v1.ContainerStatus oldStatuses []v1.ContainerStatus
expectedState map[string]v1.ContainerState expectedState map[string]v1.ContainerState
@ -1480,7 +1480,7 @@ func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
// old status from apiserver. // old status from apiserver.
{ {
containers: []v1.Container{{Name: "without-old-record"}, {Name: "with-old-record"}}, containers: []v1.Container{{Name: "without-old-record"}, {Name: "with-old-record"}},
statuses: []*kubecontainer.ContainerStatus{}, statuses: []*kubecontainer.Status{},
reasons: map[string]error{}, reasons: map[string]error{},
oldStatuses: []v1.ContainerStatus{{ oldStatuses: []v1.ContainerStatus{{
Name: "with-old-record", Name: "with-old-record",
@ -1509,7 +1509,7 @@ func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
// For running container, State should be Running, LastTerminationState should be retrieved from latest terminated status. // For running container, State should be Running, LastTerminationState should be retrieved from latest terminated status.
{ {
containers: []v1.Container{{Name: "running"}}, containers: []v1.Container{{Name: "running"}},
statuses: []*kubecontainer.ContainerStatus{ statuses: []*kubecontainer.Status{
{ {
Name: "running", Name: "running",
State: kubecontainer.ContainerStateRunning, State: kubecontainer.ContainerStateRunning,
@ -1545,7 +1545,7 @@ func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
// terminated status. // terminated status.
{ {
containers: []v1.Container{{Name: "without-reason"}, {Name: "with-reason"}}, containers: []v1.Container{{Name: "without-reason"}, {Name: "with-reason"}},
statuses: []*kubecontainer.ContainerStatus{ statuses: []*kubecontainer.Status{
{ {
Name: "without-reason", Name: "without-reason",
State: kubecontainer.ContainerStateExited, State: kubecontainer.ContainerStateExited,
@ -1650,7 +1650,7 @@ func TestGenerateAPIPodStatusWithDifferentRestartPolicies(t *testing.T) {
ID: pod.UID, ID: pod.UID,
Name: pod.Name, Name: pod.Name,
Namespace: pod.Namespace, Namespace: pod.Namespace,
ContainerStatuses: []*kubecontainer.ContainerStatus{ ContainerStatuses: []*kubecontainer.Status{
{ {
Name: "succeed", Name: "succeed",
State: kubecontainer.ContainerStateExited, State: kubecontainer.ContainerStateExited,

View File

@ -48,14 +48,14 @@ func (p podSandboxByCreated) Len() int { return len(p) }
func (p podSandboxByCreated) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p podSandboxByCreated) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p podSandboxByCreated) Less(i, j int) bool { return p[i].CreatedAt > p[j].CreatedAt } func (p podSandboxByCreated) Less(i, j int) bool { return p[i].CreatedAt > p[j].CreatedAt }
type containerStatusByCreated []*kubecontainer.ContainerStatus type containerStatusByCreated []*kubecontainer.Status
func (c containerStatusByCreated) Len() int { return len(c) } func (c containerStatusByCreated) Len() int { return len(c) }
func (c containerStatusByCreated) Swap(i, j int) { c[i], c[j] = c[j], c[i] } func (c containerStatusByCreated) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c containerStatusByCreated) Less(i, j int) bool { return c[i].CreatedAt.After(c[j].CreatedAt) } func (c containerStatusByCreated) Less(i, j int) bool { return c[i].CreatedAt.After(c[j].CreatedAt) }
// toKubeContainerState converts runtimeapi.ContainerState to kubecontainer.ContainerState. // toKubeContainerState converts runtimeapi.ContainerState to kubecontainer.State.
func toKubeContainerState(state runtimeapi.ContainerState) kubecontainer.ContainerState { func toKubeContainerState(state runtimeapi.ContainerState) kubecontainer.State {
switch state { switch state {
case runtimeapi.ContainerState_CONTAINER_CREATED: case runtimeapi.ContainerState_CONTAINER_CREATED:
return kubecontainer.ContainerStateCreated return kubecontainer.ContainerStateCreated
@ -141,7 +141,7 @@ func (m *kubeGenericRuntimeManager) getImageUser(image string) (*int64, string,
// isInitContainerFailed returns true if container has exited and exitcode is not zero // isInitContainerFailed returns true if container has exited and exitcode is not zero
// or is in unknown state. // or is in unknown state.
func isInitContainerFailed(status *kubecontainer.ContainerStatus) bool { func isInitContainerFailed(status *kubecontainer.Status) bool {
if status.State == kubecontainer.ContainerStateExited && status.ExitCode != 0 { if status.State == kubecontainer.ContainerStateExited && status.ExitCode != 0 {
return true return true
} }

View File

@ -428,7 +428,7 @@ func (m *kubeGenericRuntimeManager) readLastStringFromContainerLogs(path string)
} }
// getPodContainerStatuses gets all containers' statuses for the pod. // getPodContainerStatuses gets all containers' statuses for the pod.
func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, name, namespace string) ([]*kubecontainer.ContainerStatus, error) { func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, name, namespace string) ([]*kubecontainer.Status, error) {
// Select all containers of the given pod. // Select all containers of the given pod.
containers, err := m.runtimeService.ListContainers(&runtimeapi.ContainerFilter{ containers, err := m.runtimeService.ListContainers(&runtimeapi.ContainerFilter{
LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(uid)}, LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(uid)},
@ -438,7 +438,7 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
return nil, err return nil, err
} }
statuses := make([]*kubecontainer.ContainerStatus, len(containers)) statuses := make([]*kubecontainer.Status, len(containers))
// TODO: optimization: set maximum number of containers per container name to examine. // TODO: optimization: set maximum number of containers per container name to examine.
for i, c := range containers { for i, c := range containers {
status, err := m.runtimeService.ContainerStatus(c.Id) status, err := m.runtimeService.ContainerStatus(c.Id)
@ -481,10 +481,10 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
return statuses, nil return statuses, nil
} }
func toKubeContainerStatus(status *runtimeapi.ContainerStatus, runtimeName string) *kubecontainer.ContainerStatus { func toKubeContainerStatus(status *runtimeapi.ContainerStatus, runtimeName string) *kubecontainer.Status {
annotatedInfo := getContainerInfoFromAnnotations(status.Annotations) annotatedInfo := getContainerInfoFromAnnotations(status.Annotations)
labeledInfo := getContainerInfoFromLabels(status.Labels) labeledInfo := getContainerInfoFromLabels(status.Labels)
cStatus := &kubecontainer.ContainerStatus{ cStatus := &kubecontainer.Status{
ID: kubecontainer.ContainerID{ ID: kubecontainer.ContainerID{
Type: runtimeName, Type: runtimeName,
ID: status.Id, ID: status.Id,
@ -737,7 +737,7 @@ func (m *kubeGenericRuntimeManager) purgeInitContainers(pod *v1.Pod, podStatus *
// index of next init container to start, or done if there are no further init containers. // index of next init container to start, or done if there are no further init containers.
// Status is only returned if an init container is failed, in which case next will // Status is only returned if an init container is failed, in which case next will
// point to the current container. // point to the current container.
func findNextInitContainerToRun(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (status *kubecontainer.ContainerStatus, next *v1.Container, done bool) { func findNextInitContainerToRun(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (status *kubecontainer.Status, next *v1.Container, done bool) {
if len(pod.Spec.InitContainers) == 0 { if len(pod.Spec.InitContainers) == 0 {
return nil, nil, true return nil, nil, true
} }

View File

@ -128,7 +128,7 @@ func TestToKubeContainerStatus(t *testing.T) {
for desc, test := range map[string]struct { for desc, test := range map[string]struct {
input *runtimeapi.ContainerStatus input *runtimeapi.ContainerStatus
expected *kubecontainer.ContainerStatus expected *kubecontainer.Status
}{ }{
"created container": { "created container": {
input: &runtimeapi.ContainerStatus{ input: &runtimeapi.ContainerStatus{
@ -138,7 +138,7 @@ func TestToKubeContainerStatus(t *testing.T) {
State: runtimeapi.ContainerState_CONTAINER_CREATED, State: runtimeapi.ContainerState_CONTAINER_CREATED,
CreatedAt: createdAt, CreatedAt: createdAt,
}, },
expected: &kubecontainer.ContainerStatus{ expected: &kubecontainer.Status{
ID: *cid, ID: *cid,
Image: imageSpec.Image, Image: imageSpec.Image,
State: kubecontainer.ContainerStateCreated, State: kubecontainer.ContainerStateCreated,
@ -154,7 +154,7 @@ func TestToKubeContainerStatus(t *testing.T) {
CreatedAt: createdAt, CreatedAt: createdAt,
StartedAt: startedAt, StartedAt: startedAt,
}, },
expected: &kubecontainer.ContainerStatus{ expected: &kubecontainer.Status{
ID: *cid, ID: *cid,
Image: imageSpec.Image, Image: imageSpec.Image,
State: kubecontainer.ContainerStateRunning, State: kubecontainer.ContainerStateRunning,
@ -175,7 +175,7 @@ func TestToKubeContainerStatus(t *testing.T) {
Reason: "GotKilled", Reason: "GotKilled",
Message: "The container was killed", Message: "The container was killed",
}, },
expected: &kubecontainer.ContainerStatus{ expected: &kubecontainer.Status{
ID: *cid, ID: *cid,
Image: imageSpec.Image, Image: imageSpec.Image,
State: kubecontainer.ContainerStateExited, State: kubecontainer.ContainerStateExited,
@ -196,7 +196,7 @@ func TestToKubeContainerStatus(t *testing.T) {
CreatedAt: createdAt, CreatedAt: createdAt,
StartedAt: startedAt, StartedAt: startedAt,
}, },
expected: &kubecontainer.ContainerStatus{ expected: &kubecontainer.Status{
ID: *cid, ID: *cid,
Image: imageSpec.Image, Image: imageSpec.Image,
State: kubecontainer.ContainerStateUnknown, State: kubecontainer.ContainerStateUnknown,
@ -316,7 +316,7 @@ func TestLifeCycleHook(t *testing.T) {
testPod.Spec.Containers[0].Lifecycle = cmdPostStart testPod.Spec.Containers[0].Lifecycle = cmdPostStart
testContainer := &testPod.Spec.Containers[0] testContainer := &testPod.Spec.Containers[0]
fakePodStatus := &kubecontainer.PodStatus{ fakePodStatus := &kubecontainer.PodStatus{
ContainerStatuses: []*kubecontainer.ContainerStatus{ ContainerStatuses: []*kubecontainer.Status{
{ {
ID: kubecontainer.ContainerID{ ID: kubecontainer.ContainerID{
Type: "docker", Type: "docker",
@ -342,7 +342,7 @@ func TestLifeCycleHook(t *testing.T) {
func TestStartSpec(t *testing.T) { func TestStartSpec(t *testing.T) {
podStatus := &kubecontainer.PodStatus{ podStatus := &kubecontainer.PodStatus{
ContainerStatuses: []*kubecontainer.ContainerStatus{ ContainerStatuses: []*kubecontainer.Status{
{ {
ID: kubecontainer.ContainerID{ ID: kubecontainer.ContainerID{
Type: "docker", Type: "docker",

View File

@ -220,7 +220,7 @@ func (cgc *containerGC) evictableContainers(minAge time.Duration) (containersByE
} }
// evict all containers that are evictable // evict all containers that are evictable
func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.ContainerGCPolicy, allSourcesReady bool, evictTerminatedPods bool) error { func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictTerminatedPods bool) error {
// Separate containers by evict units. // Separate containers by evict units.
evictUnits, err := cgc.evictableContainers(gcPolicy.MinAge) evictUnits, err := cgc.evictableContainers(gcPolicy.MinAge)
if err != nil { if err != nil {
@ -397,7 +397,7 @@ func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error {
// * removes oldest dead containers by enforcing gcPolicy.MaxContainers. // * removes oldest dead containers by enforcing gcPolicy.MaxContainers.
// * gets evictable sandboxes which are not ready and contains no containers. // * gets evictable sandboxes which are not ready and contains no containers.
// * removes evictable sandboxes. // * removes evictable sandboxes.
func (cgc *containerGC) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, allSourcesReady bool, evictTerminatedPods bool) error { func (cgc *containerGC) GarbageCollect(gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictTerminatedPods bool) error {
errors := []error{} errors := []error{}
// Remove evictable containers // Remove evictable containers
if err := cgc.evictContainers(gcPolicy, allSourcesReady, evictTerminatedPods); err != nil { if err := cgc.evictContainers(gcPolicy, allSourcesReady, evictTerminatedPods); err != nil {

View File

@ -202,13 +202,13 @@ func TestContainerGC(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
podStateProvider := m.containerGC.podStateProvider.(*fakePodStateProvider) podStateProvider := m.containerGC.podStateProvider.(*fakePodStateProvider)
defaultGCPolicy := kubecontainer.ContainerGCPolicy{MinAge: time.Hour, MaxPerPodContainer: 2, MaxContainers: 6} defaultGCPolicy := kubecontainer.GCPolicy{MinAge: time.Hour, MaxPerPodContainer: 2, MaxContainers: 6}
for c, test := range []struct { for c, test := range []struct {
description string // description of the test case description string // description of the test case
containers []containerTemplate // templates of containers containers []containerTemplate // templates of containers
policy *kubecontainer.ContainerGCPolicy // container gc policy policy *kubecontainer.GCPolicy // container gc policy
remain []int // template indexes of remaining containers remain []int // template indexes of remaining containers
evictTerminatedPods bool evictTerminatedPods bool
allSourcesReady bool allSourcesReady bool
}{ }{
@ -217,7 +217,7 @@ func TestContainerGC(t *testing.T) {
containers: []containerTemplate{ containers: []containerTemplate{
makeGCContainer(podStateProvider, "foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED), makeGCContainer(podStateProvider, "foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
}, },
policy: &kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: 1, MaxContainers: 0}, policy: &kubecontainer.GCPolicy{MinAge: time.Minute, MaxPerPodContainer: 1, MaxContainers: 0},
remain: []int{}, remain: []int{},
evictTerminatedPods: false, evictTerminatedPods: false,
allSourcesReady: true, allSourcesReady: true,
@ -231,7 +231,7 @@ func TestContainerGC(t *testing.T) {
makeGCContainer(podStateProvider, "foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), makeGCContainer(podStateProvider, "foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer(podStateProvider, "foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED), makeGCContainer(podStateProvider, "foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
}, },
policy: &kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: 4}, policy: &kubecontainer.GCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: 4},
remain: []int{0, 1, 2, 3}, remain: []int{0, 1, 2, 3},
evictTerminatedPods: false, evictTerminatedPods: false,
allSourcesReady: true, allSourcesReady: true,
@ -243,7 +243,7 @@ func TestContainerGC(t *testing.T) {
makeGCContainer(podStateProvider, "foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), makeGCContainer(podStateProvider, "foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer(podStateProvider, "foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED), makeGCContainer(podStateProvider, "foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
}, },
policy: &kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: -1}, policy: &kubecontainer.GCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: -1},
remain: []int{0, 1, 2}, remain: []int{0, 1, 2},
evictTerminatedPods: false, evictTerminatedPods: false,
allSourcesReady: true, allSourcesReady: true,
@ -453,7 +453,7 @@ func TestUnknownStateContainerGC(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
podStateProvider := m.containerGC.podStateProvider.(*fakePodStateProvider) podStateProvider := m.containerGC.podStateProvider.(*fakePodStateProvider)
defaultGCPolicy := kubecontainer.ContainerGCPolicy{MinAge: time.Hour, MaxPerPodContainer: 0, MaxContainers: 0} defaultGCPolicy := kubecontainer.GCPolicy{MinAge: time.Hour, MaxPerPodContainer: 0, MaxContainers: 0}
fakeContainers := makeFakeContainers(t, m, []containerTemplate{ fakeContainers := makeFakeContainers(t, m, []containerTemplate{
makeGCContainer(podStateProvider, "foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_UNKNOWN), makeGCContainer(podStateProvider, "foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_UNKNOWN),

View File

@ -138,7 +138,7 @@ type kubeGenericRuntimeManager struct {
type KubeGenericRuntime interface { type KubeGenericRuntime interface {
kubecontainer.Runtime kubecontainer.Runtime
kubecontainer.StreamingRuntime kubecontainer.StreamingRuntime
kubecontainer.ContainerCommandRunner kubecontainer.CommandRunner
} }
// LegacyLogProvider gives the ability to use unsupported docker log drivers (e.g. journald) // LegacyLogProvider gives the ability to use unsupported docker log drivers (e.g. journald)
@ -453,7 +453,7 @@ func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *v1.Pod, podStatus *ku
return false, sandboxStatus.Metadata.Attempt, sandboxStatus.Id return false, sandboxStatus.Metadata.Attempt, sandboxStatus.Id
} }
func containerChanged(container *v1.Container, containerStatus *kubecontainer.ContainerStatus) (uint64, uint64, bool) { func containerChanged(container *v1.Container, containerStatus *kubecontainer.Status) (uint64, uint64, bool) {
expectedHash := kubecontainer.HashContainer(container) expectedHash := kubecontainer.HashContainer(container)
return expectedHash, containerStatus.Hash, containerStatus.Hash != expectedHash return expectedHash, containerStatus.Hash, containerStatus.Hash != expectedHash
} }
@ -835,7 +835,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
// If a container is still in backoff, the function will return a brief backoff error and // If a container is still in backoff, the function will return a brief backoff error and
// a detailed error message. // a detailed error message.
func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Container, podStatus *kubecontainer.PodStatus, backOff *flowcontrol.Backoff) (bool, string, error) { func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Container, podStatus *kubecontainer.PodStatus, backOff *flowcontrol.Backoff) (bool, string, error) {
var cStatus *kubecontainer.ContainerStatus var cStatus *kubecontainer.Status
for _, c := range podStatus.ContainerStatuses { for _, c := range podStatus.ContainerStatuses {
if c.Name == container.Name && c.State == kubecontainer.ContainerStateExited { if c.Name == container.Name && c.State == kubecontainer.ContainerStateExited {
cStatus = c cStatus = c
@ -963,7 +963,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp
} }
// GarbageCollect removes dead containers using the specified container gc policy. // GarbageCollect removes dead containers using the specified container gc policy.
func (m *kubeGenericRuntimeManager) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error { func (m *kubeGenericRuntimeManager) GarbageCollect(gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error {
return m.containerGC.GarbageCollect(gcPolicy, allSourcesReady, evictNonDeletedPods) return m.containerGC.GarbageCollect(gcPolicy, allSourcesReady, evictNonDeletedPods)
} }

View File

@ -695,7 +695,7 @@ func makeBasePodAndStatus() (*v1.Pod, *kubecontainer.PodStatus) {
Network: &runtimeapi.PodSandboxNetworkStatus{Ip: "10.0.0.1"}, Network: &runtimeapi.PodSandboxNetworkStatus{Ip: "10.0.0.1"},
}, },
}, },
ContainerStatuses: []*kubecontainer.ContainerStatus{ ContainerStatuses: []*kubecontainer.Status{
{ {
ID: kubecontainer.ContainerID{ID: "id1"}, ID: kubecontainer.ContainerID{ID: "id1"},
Name: "foo1", State: kubecontainer.ContainerStateRunning, Name: "foo1", State: kubecontainer.ContainerStateRunning,
@ -742,7 +742,7 @@ func TestComputePodActions(t *testing.T) {
mutateStatusFn: func(status *kubecontainer.PodStatus) { mutateStatusFn: func(status *kubecontainer.PodStatus) {
// No container or sandbox exists. // No container or sandbox exists.
status.SandboxStatuses = []*runtimeapi.PodSandboxStatus{} status.SandboxStatuses = []*runtimeapi.PodSandboxStatus{}
status.ContainerStatuses = []*kubecontainer.ContainerStatus{} status.ContainerStatuses = []*kubecontainer.Status{}
}, },
actions: podActions{ actions: podActions{
KillPod: true, KillPod: true,
@ -917,7 +917,7 @@ func TestComputePodActions(t *testing.T) {
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
status.SandboxStatuses[0].Metadata.Attempt = uint32(2) status.SandboxStatuses[0].Metadata.Attempt = uint32(2)
// no visible containers // no visible containers
status.ContainerStatuses = []*kubecontainer.ContainerStatus{} status.ContainerStatuses = []*kubecontainer.Status{}
}, },
actions: podActions{ actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id, SandboxID: baseStatus.SandboxStatuses[0].Id,
@ -1125,7 +1125,7 @@ func TestComputePodActionsWithInitContainers(t *testing.T) {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever }, mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
mutateStatusFn: func(status *kubecontainer.PodStatus) { mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
status.ContainerStatuses = []*kubecontainer.ContainerStatus{} status.ContainerStatuses = []*kubecontainer.Status{}
}, },
actions: podActions{ actions: podActions{
KillPod: true, KillPod: true,
@ -1168,7 +1168,7 @@ func makeBasePodAndStatusWithInitContainers() (*v1.Pod, *kubecontainer.PodStatus
} }
// Replace the original statuses of the containers with those for the init // Replace the original statuses of the containers with those for the init
// containers. // containers.
status.ContainerStatuses = []*kubecontainer.ContainerStatus{ status.ContainerStatuses = []*kubecontainer.Status{
{ {
ID: kubecontainer.ContainerID{ID: "initid1"}, ID: kubecontainer.ContainerID{ID: "initid1"},
Name: "init1", State: kubecontainer.ContainerStateExited, Name: "init1", State: kubecontainer.ContainerStateExited,
@ -1332,11 +1332,11 @@ func makeBasePodAndStatusWithInitAndEphemeralContainers() (*v1.Pod, *kubecontain
}, },
}, },
} }
status.ContainerStatuses = append(status.ContainerStatuses, &kubecontainer.ContainerStatus{ status.ContainerStatuses = append(status.ContainerStatuses, &kubecontainer.Status{
ID: kubecontainer.ContainerID{ID: "initid1"}, ID: kubecontainer.ContainerID{ID: "initid1"},
Name: "init1", State: kubecontainer.ContainerStateExited, Name: "init1", State: kubecontainer.ContainerStateExited,
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]), Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]),
}, &kubecontainer.ContainerStatus{ }, &kubecontainer.Status{
ID: kubecontainer.ContainerID{ID: "debug1"}, ID: kubecontainer.ContainerID{ID: "debug1"},
Name: "debug", State: kubecontainer.ContainerStateRunning, Name: "debug", State: kubecontainer.ContainerStateRunning,
Hash: kubecontainer.HashContainer((*v1.Container)(&pod.Spec.EphemeralContainers[0].EphemeralContainerCommon)), Hash: kubecontainer.HashContainer((*v1.Container)(&pod.Spec.EphemeralContainers[0].EphemeralContainerCommon)),

View File

@ -39,7 +39,7 @@ const (
type HandlerRunner struct { type HandlerRunner struct {
httpGetter kubetypes.HTTPGetter httpGetter kubetypes.HTTPGetter
commandRunner kubecontainer.ContainerCommandRunner commandRunner kubecontainer.CommandRunner
containerManager podStatusProvider containerManager podStatusProvider
} }
@ -47,7 +47,7 @@ type podStatusProvider interface {
GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error)
} }
func NewHandlerRunner(httpGetter kubetypes.HTTPGetter, commandRunner kubecontainer.ContainerCommandRunner, containerManager podStatusProvider) kubecontainer.HandlerRunner { func NewHandlerRunner(httpGetter kubetypes.HTTPGetter, commandRunner kubecontainer.CommandRunner, containerManager podStatusProvider) kubecontainer.HandlerRunner {
return &HandlerRunner{ return &HandlerRunner{
httpGetter: httpGetter, httpGetter: httpGetter,
commandRunner: commandRunner, commandRunner: commandRunner,

View File

@ -67,7 +67,7 @@ type GenericPLEG struct {
} }
// plegContainerState has a one-to-one mapping to the // plegContainerState has a one-to-one mapping to the
// kubecontainer.ContainerState except for the non-existent state. This state // kubecontainer.State except for the non-existent state. This state
// is introduced here to complete the state transition scenarios. // is introduced here to complete the state transition scenarios.
type plegContainerState string type plegContainerState string
@ -83,7 +83,7 @@ const (
relistThreshold = 3 * time.Minute relistThreshold = 3 * time.Minute
) )
func convertState(state kubecontainer.ContainerState) plegContainerState { func convertState(state kubecontainer.State) plegContainerState {
switch state { switch state {
case kubecontainer.ContainerStateCreated: case kubecontainer.ContainerStateCreated:
// kubelet doesn't use the "created" state yet, hence convert it to "unknown". // kubelet doesn't use the "created" state yet, hence convert it to "unknown".

View File

@ -76,7 +76,7 @@ func getEventsFromChannel(ch <-chan *PodLifecycleEvent) []*PodLifecycleEvent {
return events return events
} }
func createTestContainer(ID string, state kubecontainer.ContainerState) *kubecontainer.Container { func createTestContainer(ID string, state kubecontainer.State) *kubecontainer.Container {
return &kubecontainer.Container{ return &kubecontainer.Container{
ID: kubecontainer.ContainerID{Type: testContainerRuntimeType, ID: ID}, ID: kubecontainer.ContainerID{Type: testContainerRuntimeType, ID: ID},
State: state, State: state,
@ -336,7 +336,7 @@ func createTestPodsStatusesAndEvents(num int) ([]*kubecontainer.Pod, []*kubecont
} }
status := &kubecontainer.PodStatus{ status := &kubecontainer.PodStatus{
ID: id, ID: id,
ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: container.ID, State: cState}}, ContainerStatuses: []*kubecontainer.Status{{ID: container.ID, State: cState}},
} }
event := &PodLifecycleEvent{ID: pod.ID, Type: ContainerStarted, Data: container.ID.ID} event := &PodLifecycleEvent{ID: pod.ID, Type: ContainerStarted, Data: container.ID.ID}
pods = append(pods, pod) pods = append(pods, pod)
@ -457,7 +457,7 @@ func TestRelistWithReinspection(t *testing.T) {
goodStatus := &kubecontainer.PodStatus{ goodStatus := &kubecontainer.PodStatus{
ID: podID, ID: podID,
ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: infraContainer.ID, State: infraContainer.State}}, ContainerStatuses: []*kubecontainer.Status{{ID: infraContainer.ID, State: infraContainer.State}},
} }
runtimeMock.On("GetPodStatus", podID, "", "").Return(goodStatus, nil).Once() runtimeMock.On("GetPodStatus", podID, "", "").Return(goodStatus, nil).Once()
@ -482,7 +482,7 @@ func TestRelistWithReinspection(t *testing.T) {
badStatus := &kubecontainer.PodStatus{ badStatus := &kubecontainer.PodStatus{
ID: podID, ID: podID,
ContainerStatuses: []*kubecontainer.ContainerStatus{}, ContainerStatuses: []*kubecontainer.Status{},
} }
runtimeMock.On("GetPodStatus", podID, "", "").Return(badStatus, errors.New("inspection error")).Once() runtimeMock.On("GetPodStatus", podID, "", "").Return(badStatus, errors.New("inspection error")).Once()
@ -607,7 +607,7 @@ func TestRelistIPChange(t *testing.T) {
status := &kubecontainer.PodStatus{ status := &kubecontainer.PodStatus{
ID: id, ID: id,
IPs: tc.podIPs, IPs: tc.podIPs,
ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: container.ID, State: cState}}, ContainerStatuses: []*kubecontainer.Status{{ID: container.ID, State: cState}},
} }
event := &PodLifecycleEvent{ID: pod.ID, Type: ContainerStarted, Data: container.ID.ID} event := &PodLifecycleEvent{ID: pod.ID, Type: ContainerStarted, Data: container.ID.ID}
@ -629,7 +629,7 @@ func TestRelistIPChange(t *testing.T) {
} }
status = &kubecontainer.PodStatus{ status = &kubecontainer.PodStatus{
ID: id, ID: id,
ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: container.ID, State: kubecontainer.ContainerStateExited}}, ContainerStatuses: []*kubecontainer.Status{{ID: container.ID, State: kubecontainer.ContainerStateExited}},
} }
event = &PodLifecycleEvent{ID: pod.ID, Type: ContainerDied, Data: container.ID.ID} event = &PodLifecycleEvent{ID: pod.ID, Type: ContainerDied, Data: container.ID.ID}
runtimeMock.On("GetPods", true).Return([]*kubecontainer.Pod{pod}, nil).Once() runtimeMock.On("GetPods", true).Return([]*kubecontainer.Pod{pod}, nil).Once()

View File

@ -30,7 +30,7 @@ const (
containerDeletorBufferLimit = 50 containerDeletorBufferLimit = 50
) )
type containerStatusbyCreatedList []*kubecontainer.ContainerStatus type containerStatusbyCreatedList []*kubecontainer.Status
type podContainerDeletor struct { type podContainerDeletor struct {
worker chan<- kubecontainer.ContainerID worker chan<- kubecontainer.ContainerID
@ -63,7 +63,7 @@ func newPodContainerDeletor(runtime kubecontainer.Runtime, containersToKeep int)
// getContainersToDeleteInPod returns the exited containers in a pod whose name matches the name inferred from filterContainerId (if not empty), ordered by the creation time from the latest to the earliest. // getContainersToDeleteInPod returns the exited containers in a pod whose name matches the name inferred from filterContainerId (if not empty), ordered by the creation time from the latest to the earliest.
// If filterContainerID is empty, all dead containers in the pod are returned. // If filterContainerID is empty, all dead containers in the pod are returned.
func getContainersToDeleteInPod(filterContainerID string, podStatus *kubecontainer.PodStatus, containersToKeep int) containerStatusbyCreatedList { func getContainersToDeleteInPod(filterContainerID string, podStatus *kubecontainer.PodStatus, containersToKeep int) containerStatusbyCreatedList {
matchedContainer := func(filterContainerId string, podStatus *kubecontainer.PodStatus) *kubecontainer.ContainerStatus { matchedContainer := func(filterContainerId string, podStatus *kubecontainer.PodStatus) *kubecontainer.Status {
if filterContainerId == "" { if filterContainerId == "" {
return nil return nil
} }

View File

@ -26,7 +26,7 @@ import (
func TestGetContainersToDeleteInPodWithFilter(t *testing.T) { func TestGetContainersToDeleteInPodWithFilter(t *testing.T) {
pod := kubecontainer.PodStatus{ pod := kubecontainer.PodStatus{
ContainerStatuses: []*kubecontainer.ContainerStatus{ ContainerStatuses: []*kubecontainer.Status{
{ {
ID: kubecontainer.ContainerID{Type: "test", ID: "1"}, ID: kubecontainer.ContainerID{Type: "test", ID: "1"},
Name: "foo", Name: "foo",
@ -66,15 +66,15 @@ func TestGetContainersToDeleteInPodWithFilter(t *testing.T) {
}{ }{
{ {
0, 0,
[]*kubecontainer.ContainerStatus{pod.ContainerStatuses[3], pod.ContainerStatuses[2], pod.ContainerStatuses[1]}, []*kubecontainer.Status{pod.ContainerStatuses[3], pod.ContainerStatuses[2], pod.ContainerStatuses[1]},
}, },
{ {
1, 1,
[]*kubecontainer.ContainerStatus{pod.ContainerStatuses[2], pod.ContainerStatuses[1]}, []*kubecontainer.Status{pod.ContainerStatuses[2], pod.ContainerStatuses[1]},
}, },
{ {
2, 2,
[]*kubecontainer.ContainerStatus{pod.ContainerStatuses[1]}, []*kubecontainer.Status{pod.ContainerStatuses[1]},
}, },
} }
@ -88,7 +88,7 @@ func TestGetContainersToDeleteInPodWithFilter(t *testing.T) {
func TestGetContainersToDeleteInPod(t *testing.T) { func TestGetContainersToDeleteInPod(t *testing.T) {
pod := kubecontainer.PodStatus{ pod := kubecontainer.PodStatus{
ContainerStatuses: []*kubecontainer.ContainerStatus{ ContainerStatuses: []*kubecontainer.Status{
{ {
ID: kubecontainer.ContainerID{Type: "test", ID: "1"}, ID: kubecontainer.ContainerID{Type: "test", ID: "1"},
Name: "foo", Name: "foo",
@ -128,15 +128,15 @@ func TestGetContainersToDeleteInPod(t *testing.T) {
}{ }{
{ {
0, 0,
[]*kubecontainer.ContainerStatus{pod.ContainerStatuses[3], pod.ContainerStatuses[2], pod.ContainerStatuses[1], pod.ContainerStatuses[0]}, []*kubecontainer.Status{pod.ContainerStatuses[3], pod.ContainerStatuses[2], pod.ContainerStatuses[1], pod.ContainerStatuses[0]},
}, },
{ {
1, 1,
[]*kubecontainer.ContainerStatus{pod.ContainerStatuses[2], pod.ContainerStatuses[1], pod.ContainerStatuses[0]}, []*kubecontainer.Status{pod.ContainerStatuses[2], pod.ContainerStatuses[1], pod.ContainerStatuses[0]},
}, },
{ {
2, 2,
[]*kubecontainer.ContainerStatus{pod.ContainerStatuses[1], pod.ContainerStatuses[0]}, []*kubecontainer.Status{pod.ContainerStatuses[1], pod.ContainerStatuses[0]},
}, },
} }
@ -150,7 +150,7 @@ func TestGetContainersToDeleteInPod(t *testing.T) {
func TestGetContainersToDeleteInPodWithNoMatch(t *testing.T) { func TestGetContainersToDeleteInPodWithNoMatch(t *testing.T) {
pod := kubecontainer.PodStatus{ pod := kubecontainer.PodStatus{
ContainerStatuses: []*kubecontainer.ContainerStatus{ ContainerStatuses: []*kubecontainer.Status{
{ {
ID: kubecontainer.ContainerID{Type: "test", ID: "1"}, ID: kubecontainer.ContainerID{Type: "test", ID: "1"},
Name: "foo", Name: "foo",
@ -190,7 +190,7 @@ func TestGetContainersToDeleteInPodWithNoMatch(t *testing.T) {
}{ }{
{ {
"abc", "abc",
[]*kubecontainer.ContainerStatus{}, []*kubecontainer.Status{},
}, },
} }

View File

@ -54,7 +54,7 @@ type prober struct {
livenessHTTP httpprobe.Prober livenessHTTP httpprobe.Prober
startupHTTP httpprobe.Prober startupHTTP httpprobe.Prober
tcp tcpprobe.Prober tcp tcpprobe.Prober
runner kubecontainer.ContainerCommandRunner runner kubecontainer.CommandRunner
recorder record.EventRecorder recorder record.EventRecorder
} }
@ -62,7 +62,7 @@ type prober struct {
// NewProber creates a Prober, it takes a command runner and // NewProber creates a Prober, it takes a command runner and
// several container info managers. // several container info managers.
func newProber( func newProber(
runner kubecontainer.ContainerCommandRunner, runner kubecontainer.CommandRunner,
recorder record.EventRecorder) *prober { recorder record.EventRecorder) *prober {
const followNonLocalRedirects = false const followNonLocalRedirects = false

View File

@ -103,7 +103,7 @@ func NewManager(
statusManager status.Manager, statusManager status.Manager,
livenessManager results.Manager, livenessManager results.Manager,
startupManager results.Manager, startupManager results.Manager,
runner kubecontainer.ContainerCommandRunner, runner kubecontainer.CommandRunner,
recorder record.EventRecorder) Manager { recorder record.EventRecorder) Manager {
prober := newProber(runner, recorder) prober := newProber(runner, recorder)

View File

@ -390,7 +390,7 @@ func TestNewExecInContainer(t *testing.T) {
if e, a := cmd, runner.Cmd; !reflect.DeepEqual(e, a) { if e, a := cmd, runner.Cmd; !reflect.DeepEqual(e, a) {
t.Errorf("%s: cmd: expected %v, got %v", test.name, e, a) t.Errorf("%s: cmd: expected %v, got %v", test.name, e, a)
} }
// this isn't 100% foolproof as a bug in a real ContainerCommandRunner where it fails to copy to stdout/stderr wouldn't be caught by this test // this isn't 100% foolproof as a bug in a real CommandRunner where it fails to copy to stdout/stderr wouldn't be caught by this test
if e, a := test.expected, string(actualOutput); e != a { if e, a := test.expected, string(actualOutput); e != a {
t.Errorf("%s: output: expected %q, got %q", test.name, e, a) t.Errorf("%s: output: expected %q, got %q", test.name, e, a)
} }

View File

@ -157,7 +157,7 @@ func TestRunOnce(t *testing.T) {
// because runonce is never used in kubernetes now, we should deprioritize the cleanup work. // because runonce is never used in kubernetes now, we should deprioritize the cleanup work.
// TODO(random-liu) Fix the test, make it meaningful. // TODO(random-liu) Fix the test, make it meaningful.
fakeRuntime.PodStatus = kubecontainer.PodStatus{ fakeRuntime.PodStatus = kubecontainer.PodStatus{
ContainerStatuses: []*kubecontainer.ContainerStatus{ ContainerStatuses: []*kubecontainer.Status{
{ {
Name: "bar", Name: "bar",
State: kubecontainer.ContainerStateRunning, State: kubecontainer.ContainerStateRunning,