mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
fix golint issues in pkg/kubelet/container
This commit is contained in:
parent
2f2923fc33
commit
ee53488f19
@ -49,7 +49,7 @@ func getTestPodIDAndStatus(numContainers int) (types.UID, *PodStatus) {
|
||||
status = &PodStatus{ID: id}
|
||||
}
|
||||
for i := 0; i < numContainers; i++ {
|
||||
status.ContainerStatuses = append(status.ContainerStatuses, &ContainerStatus{Name: string(i)})
|
||||
status.ContainerStatuses = append(status.ContainerStatuses, &Status{Name: string(i)})
|
||||
}
|
||||
return id, status
|
||||
}
|
||||
|
@ -23,8 +23,8 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// ContainerGCPolicy specifies a policy for garbage collecting containers.
|
||||
type ContainerGCPolicy struct {
|
||||
// GCPolicy specifies a policy for garbage collecting containers.
|
||||
type GCPolicy struct {
|
||||
// Minimum age at which a container can be garbage collected, zero for no limit.
|
||||
MinAge time.Duration
|
||||
|
||||
@ -36,10 +36,10 @@ type ContainerGCPolicy struct {
|
||||
MaxContainers int
|
||||
}
|
||||
|
||||
// ContainerGC manages garbage collection of dead containers.
|
||||
// GC manages garbage collection of dead containers.
|
||||
//
|
||||
// Implementation is thread-compatible.
|
||||
type ContainerGC interface {
|
||||
type GC interface {
|
||||
// Garbage collect containers.
|
||||
GarbageCollect() error
|
||||
// Deletes all unused containers, including containers belonging to pods that are terminated but not deleted
|
||||
@ -58,14 +58,14 @@ type realContainerGC struct {
|
||||
runtime Runtime
|
||||
|
||||
// Policy for garbage collection.
|
||||
policy ContainerGCPolicy
|
||||
policy GCPolicy
|
||||
|
||||
// sourcesReadyProvider provides the readiness of kubelet configuration sources.
|
||||
sourcesReadyProvider SourcesReadyProvider
|
||||
}
|
||||
|
||||
// NewContainerGC creates a new instance of ContainerGC with the specified policy.
|
||||
func NewContainerGC(runtime Runtime, policy ContainerGCPolicy, sourcesReadyProvider SourcesReadyProvider) (ContainerGC, error) {
|
||||
// NewContainerGC creates a new instance of GC with the specified policy.
|
||||
func NewContainerGC(runtime Runtime, policy GCPolicy, sourcesReadyProvider SourcesReadyProvider) (GC, error) {
|
||||
if policy.MinAge < 0 {
|
||||
return nil, fmt.Errorf("invalid minimum garbage collection age: %v", policy.MinAge)
|
||||
}
|
||||
|
@ -107,9 +107,9 @@ func HashContainer(container *v1.Container) uint64 {
|
||||
return uint64(hash.Sum32())
|
||||
}
|
||||
|
||||
// EnvVarsToMap constructs a map of environment name to value from a slice
|
||||
// envVarsToMap constructs a map of environment name to value from a slice
|
||||
// of env vars.
|
||||
func EnvVarsToMap(envs []EnvVar) map[string]string {
|
||||
func envVarsToMap(envs []EnvVar) map[string]string {
|
||||
result := map[string]string{}
|
||||
for _, env := range envs {
|
||||
result[env.Name] = env.Value
|
||||
@ -117,9 +117,9 @@ func EnvVarsToMap(envs []EnvVar) map[string]string {
|
||||
return result
|
||||
}
|
||||
|
||||
// V1EnvVarsToMap constructs a map of environment name to value from a slice
|
||||
// v1EnvVarsToMap constructs a map of environment name to value from a slice
|
||||
// of env vars.
|
||||
func V1EnvVarsToMap(envs []v1.EnvVar) map[string]string {
|
||||
func v1EnvVarsToMap(envs []v1.EnvVar) map[string]string {
|
||||
result := map[string]string{}
|
||||
for _, env := range envs {
|
||||
result[env.Name] = env.Value
|
||||
@ -132,7 +132,7 @@ func V1EnvVarsToMap(envs []v1.EnvVar) map[string]string {
|
||||
// container environment definitions. This does *not* include valueFrom substitutions.
|
||||
// TODO: callers should use ExpandContainerCommandAndArgs with a fully resolved list of environment.
|
||||
func ExpandContainerCommandOnlyStatic(containerCommand []string, envs []v1.EnvVar) (command []string) {
|
||||
mapping := expansion.MappingFuncFor(V1EnvVarsToMap(envs))
|
||||
mapping := expansion.MappingFuncFor(v1EnvVarsToMap(envs))
|
||||
if len(containerCommand) != 0 {
|
||||
for _, cmd := range containerCommand {
|
||||
command = append(command, expansion.Expand(cmd, mapping))
|
||||
@ -144,7 +144,7 @@ func ExpandContainerCommandOnlyStatic(containerCommand []string, envs []v1.EnvVa
|
||||
// ExpandContainerVolumeMounts expands the subpath of the given VolumeMount by replacing variable references with the values of given EnvVar.
|
||||
func ExpandContainerVolumeMounts(mount v1.VolumeMount, envs []EnvVar) (string, error) {
|
||||
|
||||
envmap := EnvVarsToMap(envs)
|
||||
envmap := envVarsToMap(envs)
|
||||
missingKeys := sets.NewString()
|
||||
expanded := expansion.Expand(mount.SubPathExpr, func(key string) string {
|
||||
value, ok := envmap[key]
|
||||
@ -162,7 +162,7 @@ func ExpandContainerVolumeMounts(mount v1.VolumeMount, envs []EnvVar) (string, e
|
||||
|
||||
// ExpandContainerCommandAndArgs expands the given Container's command by replacing variable references `with the values of given EnvVar.
|
||||
func ExpandContainerCommandAndArgs(container *v1.Container, envs []EnvVar) (command []string, args []string) {
|
||||
mapping := expansion.MappingFuncFor(EnvVarsToMap(envs))
|
||||
mapping := expansion.MappingFuncFor(envVarsToMap(envs))
|
||||
|
||||
if len(container.Command) != 0 {
|
||||
for _, cmd := range container.Command {
|
||||
@ -262,11 +262,11 @@ func ConvertPodStatusToRunningPod(runtimeName string, podStatus *PodStatus) Pod
|
||||
}
|
||||
|
||||
// SandboxToContainerState converts runtimeapi.PodSandboxState to
|
||||
// kubecontainer.ContainerState.
|
||||
// kubecontainer.State.
|
||||
// This is only needed because we need to return sandboxes as if they were
|
||||
// kubecontainer.Containers to avoid substantial changes to PLEG.
|
||||
// TODO: Remove this once it becomes obsolete.
|
||||
func SandboxToContainerState(state runtimeapi.PodSandboxState) ContainerState {
|
||||
func SandboxToContainerState(state runtimeapi.PodSandboxState) State {
|
||||
switch state {
|
||||
case runtimeapi.PodSandboxState_SANDBOX_READY:
|
||||
return ContainerStateRunning
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
@ -43,7 +43,7 @@ func TestEnvVarsToMap(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
varMap := EnvVarsToMap(vars)
|
||||
varMap := envVarsToMap(vars)
|
||||
|
||||
if e, a := len(vars), len(varMap); e != a {
|
||||
t.Errorf("Unexpected map length; expected: %d, got %d", e, a)
|
||||
@ -414,7 +414,7 @@ func TestShouldContainerBeRestarted(t *testing.T) {
|
||||
ID: pod.UID,
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
ContainerStatuses: []*ContainerStatus{
|
||||
ContainerStatuses: []*Status{
|
||||
{
|
||||
Name: "alive",
|
||||
State: ContainerStateRunning,
|
||||
|
@ -94,7 +94,7 @@ type Runtime interface {
|
||||
// If evictNonDeletedPods is set to true, containers and sandboxes belonging to pods
|
||||
// that are terminated, but not deleted will be evicted. Otherwise, only deleted pods will be GC'd.
|
||||
// TODO: Revisit this method and make it cleaner.
|
||||
GarbageCollect(gcPolicy ContainerGCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error
|
||||
GarbageCollect(gcPolicy GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error
|
||||
// Syncs the running pod into the desired pod.
|
||||
SyncPod(pod *v1.Pod, podStatus *PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) PodSyncResult
|
||||
// KillPod kills all the containers of a pod. Pod may be nil, running pod must not be.
|
||||
@ -147,13 +147,13 @@ type ImageService interface {
|
||||
ImageStats() (*ImageStats, error)
|
||||
}
|
||||
|
||||
// ContainerAttacher interface allows to attach a container.
|
||||
type ContainerAttacher interface {
|
||||
// Attacher interface allows to attach a container.
|
||||
type Attacher interface {
|
||||
AttachContainer(id ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) (err error)
|
||||
}
|
||||
|
||||
// ContainerCommandRunner interface allows to run command in a container.
|
||||
type ContainerCommandRunner interface {
|
||||
// CommandRunner interface allows to run command in a container.
|
||||
type CommandRunner interface {
|
||||
// RunInContainer synchronously executes the command in the container, and returns the output.
|
||||
// If the command completes with a non-0 exit code, a k8s.io/utils/exec.ExitError will be returned.
|
||||
RunInContainer(id ContainerID, cmd []string, timeout time.Duration) ([]byte, error)
|
||||
@ -250,18 +250,18 @@ func (id DockerID) ContainerID() ContainerID {
|
||||
}
|
||||
}
|
||||
|
||||
// ContainerState represents the state of a container
|
||||
type ContainerState string
|
||||
// State represents the state of a container
|
||||
type State string
|
||||
|
||||
const (
|
||||
// ContainerStateCreated indicates a container that has been created (e.g. with docker create) but not started.
|
||||
ContainerStateCreated ContainerState = "created"
|
||||
ContainerStateCreated State = "created"
|
||||
// ContainerStateRunning indicates a currently running container.
|
||||
ContainerStateRunning ContainerState = "running"
|
||||
ContainerStateRunning State = "running"
|
||||
// ContainerStateExited indicates a container that ran and completed ("stopped" in other contexts, although a created container is technically also "stopped").
|
||||
ContainerStateExited ContainerState = "exited"
|
||||
ContainerStateExited State = "exited"
|
||||
// ContainerStateUnknown encompasses all the states that we currently don't care about (like restarting, paused, dead).
|
||||
ContainerStateUnknown ContainerState = "unknown"
|
||||
ContainerStateUnknown State = "unknown"
|
||||
)
|
||||
|
||||
// Container provides the runtime information for a container, such as ID, hash,
|
||||
@ -282,7 +282,7 @@ type Container struct {
|
||||
// not managed by kubelet.
|
||||
Hash uint64
|
||||
// State is the state of the container.
|
||||
State ContainerState
|
||||
State State
|
||||
}
|
||||
|
||||
// PodStatus represents the status of the pod and its containers.
|
||||
@ -297,20 +297,20 @@ type PodStatus struct {
|
||||
// All IPs assigned to this pod
|
||||
IPs []string
|
||||
// Status of containers in the pod.
|
||||
ContainerStatuses []*ContainerStatus
|
||||
ContainerStatuses []*Status
|
||||
// Status of the pod sandbox.
|
||||
// Only for kuberuntime now, other runtime may keep it nil.
|
||||
SandboxStatuses []*runtimeapi.PodSandboxStatus
|
||||
}
|
||||
|
||||
// ContainerStatus represents the status of a container.
|
||||
type ContainerStatus struct {
|
||||
// Status represents the status of a container.
|
||||
type Status struct {
|
||||
// ID of the container.
|
||||
ID ContainerID
|
||||
// Name of the container.
|
||||
Name string
|
||||
// Status of the container.
|
||||
State ContainerState
|
||||
State State
|
||||
// Creation time of the container.
|
||||
CreatedAt time.Time
|
||||
// Start time of the container.
|
||||
@ -337,7 +337,7 @@ type ContainerStatus struct {
|
||||
|
||||
// FindContainerStatusByName returns container status in the pod status with the given name.
|
||||
// When there are multiple containers' statuses with the same name, the first match will be returned.
|
||||
func (podStatus *PodStatus) FindContainerStatusByName(containerName string) *ContainerStatus {
|
||||
func (podStatus *PodStatus) FindContainerStatusByName(containerName string) *Status {
|
||||
for _, containerStatus := range podStatus.ContainerStatuses {
|
||||
if containerStatus.Name == containerName {
|
||||
return containerStatus
|
||||
@ -347,8 +347,8 @@ func (podStatus *PodStatus) FindContainerStatusByName(containerName string) *Con
|
||||
}
|
||||
|
||||
// GetRunningContainerStatuses returns container status of all the running containers in a pod
|
||||
func (podStatus *PodStatus) GetRunningContainerStatuses() []*ContainerStatus {
|
||||
runningContainerStatuses := []*ContainerStatus{}
|
||||
func (podStatus *PodStatus) GetRunningContainerStatuses() []*Status {
|
||||
runningContainerStatuses := []*Status{}
|
||||
for _, containerStatus := range podStatus.ContainerStatuses {
|
||||
if containerStatus.State == ContainerStateRunning {
|
||||
runningContainerStatuses = append(runningContainerStatuses, containerStatus)
|
||||
@ -643,7 +643,7 @@ func ParsePodFullName(podFullName string) (string, string, error) {
|
||||
type Option func(Runtime)
|
||||
|
||||
// SortContainerStatusesByCreationTime sorts the container statuses by creation time.
|
||||
type SortContainerStatusesByCreationTime []*ContainerStatus
|
||||
type SortContainerStatusesByCreationTime []*Status
|
||||
|
||||
func (s SortContainerStatusesByCreationTime) Len() int { return len(s) }
|
||||
func (s SortContainerStatusesByCreationTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
@ -39,10 +39,6 @@ var (
|
||||
ErrRunContainer = errors.New("RunContainerError")
|
||||
// ErrKillContainer returned when runtime failed to kill any of pod's containers.
|
||||
ErrKillContainer = errors.New("KillContainerError")
|
||||
// ErrVerifyNonRoot returned if the container or image will run as the root user.
|
||||
ErrVerifyNonRoot = errors.New("VerifyNonRootError")
|
||||
// ErrRunInitContainer returned when container init failed.
|
||||
ErrRunInitContainer = errors.New("RunInitContainerError")
|
||||
// ErrCreatePodSandbox returned when runtime failed to create a sandbox for pod.
|
||||
ErrCreatePodSandbox = errors.New("CreatePodSandboxError")
|
||||
// ErrConfigPodSandbox returned when runetime failed to get pod sandbox config from pod.
|
||||
@ -51,13 +47,6 @@ var (
|
||||
ErrKillPodSandbox = errors.New("KillPodSandboxError")
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrSetupNetwork returned when network setup failed.
|
||||
ErrSetupNetwork = errors.New("SetupNetworkError")
|
||||
// ErrTeardownNetwork returned when network tear down failed.
|
||||
ErrTeardownNetwork = errors.New("TeardownNetworkError")
|
||||
)
|
||||
|
||||
// SyncAction indicates different kind of actions in SyncPod() and KillPod(). Now there are only actions
|
||||
// about start/kill container and setup/teardown network.
|
||||
type SyncAction string
|
||||
|
@ -348,7 +348,7 @@ func (f *FakeRuntime) RemoveImage(image kubecontainer.ImageSpec) error {
|
||||
return f.Err
|
||||
}
|
||||
|
||||
func (f *FakeRuntime) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, ready bool, evictNonDeletedPods bool) error {
|
||||
func (f *FakeRuntime) GarbageCollect(gcPolicy kubecontainer.GCPolicy, ready bool, evictNonDeletedPods bool) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
@ -406,7 +406,7 @@ type FakeContainerCommandRunner struct {
|
||||
Cmd []string
|
||||
}
|
||||
|
||||
var _ kubecontainer.ContainerCommandRunner = &FakeContainerCommandRunner{}
|
||||
var _ kubecontainer.CommandRunner = &FakeContainerCommandRunner{}
|
||||
|
||||
func (f *FakeContainerCommandRunner) RunInContainer(containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) {
|
||||
// record invoked values
|
||||
|
@ -137,7 +137,7 @@ func (r *Mock) PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWr
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (r *Mock) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, ready bool, evictNonDeletedPods bool) error {
|
||||
func (r *Mock) GarbageCollect(gcPolicy kubecontainer.GCPolicy, ready bool, evictNonDeletedPods bool) error {
|
||||
args := r.Called(gcPolicy, ready, evictNonDeletedPods)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
@ -405,7 +405,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
}
|
||||
}
|
||||
|
||||
containerGCPolicy := kubecontainer.ContainerGCPolicy{
|
||||
containerGCPolicy := kubecontainer.GCPolicy{
|
||||
MinAge: minimumGCAge.Duration,
|
||||
MaxPerPodContainer: int(maxPerPodContainerCount),
|
||||
MaxContainers: int(maxContainerCount),
|
||||
@ -870,7 +870,7 @@ type Kubelet struct {
|
||||
// Optional, defaults to /logs/ from /var/log
|
||||
logServer http.Handler
|
||||
// Optional, defaults to simple Docker implementation
|
||||
runner kubecontainer.ContainerCommandRunner
|
||||
runner kubecontainer.CommandRunner
|
||||
|
||||
// cAdvisor used for container information.
|
||||
cadvisor cadvisor.Interface
|
||||
@ -921,7 +921,7 @@ type Kubelet struct {
|
||||
recorder record.EventRecorder
|
||||
|
||||
// Policy for handling garbage collection of dead containers.
|
||||
containerGC kubecontainer.ContainerGC
|
||||
containerGC kubecontainer.GC
|
||||
|
||||
// Manager for image garbage collection.
|
||||
imageManager images.ImageGCManager
|
||||
|
@ -1482,7 +1482,7 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontaine
|
||||
// convertToAPIContainerStatuses converts the given internal container
|
||||
// statuses into API container statuses.
|
||||
func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecontainer.PodStatus, previousStatus []v1.ContainerStatus, containers []v1.Container, hasInitContainers, isInitContainer bool) []v1.ContainerStatus {
|
||||
convertContainerStatus := func(cs *kubecontainer.ContainerStatus) *v1.ContainerStatus {
|
||||
convertContainerStatus := func(cs *kubecontainer.Status) *v1.ContainerStatus {
|
||||
cid := cs.ID.String()
|
||||
status := &v1.ContainerStatus{
|
||||
Name: cs.Name,
|
||||
|
@ -394,7 +394,7 @@ func TestRunInContainer(t *testing.T) {
|
||||
actualOutput, err := kubelet.RunInContainer("podFoo_nsFoo", "", "containerFoo", cmd)
|
||||
assert.Equal(t, containerID, fakeCommandRunner.ContainerID, "(testError=%v) ID", testError)
|
||||
assert.Equal(t, cmd, fakeCommandRunner.Cmd, "(testError=%v) command", testError)
|
||||
// this isn't 100% foolproof as a bug in a real ContainerCommandRunner where it fails to copy to stdout/stderr wouldn't be caught by this test
|
||||
// this isn't 100% foolproof as a bug in a real CommandRunner where it fails to copy to stdout/stderr wouldn't be caught by this test
|
||||
assert.Equal(t, "foo", string(actualOutput), "(testError=%v) output", testError)
|
||||
assert.Equal(t, err, testError, "(testError=%v) err", testError)
|
||||
}
|
||||
|
@ -267,7 +267,7 @@ func newTestKubeletWithImageList(
|
||||
ImageGCManager: imageGCManager,
|
||||
}
|
||||
kubelet.containerLogManager = logs.NewStubContainerLogManager()
|
||||
containerGCPolicy := kubecontainer.ContainerGCPolicy{
|
||||
containerGCPolicy := kubecontainer.GCPolicy{
|
||||
MinAge: time.Duration(0),
|
||||
MaxPerPodContainer: 1,
|
||||
MaxContainers: -1,
|
||||
@ -1401,13 +1401,13 @@ func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) {
|
||||
kubelet := testKubelet.kubelet
|
||||
numContainers := 10
|
||||
expectedOrder := []string{}
|
||||
cStatuses := []*kubecontainer.ContainerStatus{}
|
||||
cStatuses := []*kubecontainer.Status{}
|
||||
specContainerList := []v1.Container{}
|
||||
for i := 0; i < numContainers; i++ {
|
||||
id := fmt.Sprintf("%v", i)
|
||||
containerName := fmt.Sprintf("%vcontainer", id)
|
||||
expectedOrder = append(expectedOrder, containerName)
|
||||
cStatus := &kubecontainer.ContainerStatus{
|
||||
cStatus := &kubecontainer.Status{
|
||||
ID: kubecontainer.BuildContainerID("test", id),
|
||||
Name: containerName,
|
||||
}
|
||||
@ -1415,7 +1415,7 @@ func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) {
|
||||
if i%2 == 0 {
|
||||
cStatuses = append(cStatuses, cStatus)
|
||||
} else {
|
||||
cStatuses = append([]*kubecontainer.ContainerStatus{cStatus}, cStatuses...)
|
||||
cStatuses = append([]*kubecontainer.Status{cStatus}, cStatuses...)
|
||||
}
|
||||
specContainerList = append(specContainerList, v1.Container{Name: containerName})
|
||||
}
|
||||
@ -1468,7 +1468,7 @@ func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
|
||||
}
|
||||
tests := []struct {
|
||||
containers []v1.Container
|
||||
statuses []*kubecontainer.ContainerStatus
|
||||
statuses []*kubecontainer.Status
|
||||
reasons map[string]error
|
||||
oldStatuses []v1.ContainerStatus
|
||||
expectedState map[string]v1.ContainerState
|
||||
@ -1480,7 +1480,7 @@ func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
|
||||
// old status from apiserver.
|
||||
{
|
||||
containers: []v1.Container{{Name: "without-old-record"}, {Name: "with-old-record"}},
|
||||
statuses: []*kubecontainer.ContainerStatus{},
|
||||
statuses: []*kubecontainer.Status{},
|
||||
reasons: map[string]error{},
|
||||
oldStatuses: []v1.ContainerStatus{{
|
||||
Name: "with-old-record",
|
||||
@ -1509,7 +1509,7 @@ func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
|
||||
// For running container, State should be Running, LastTerminationState should be retrieved from latest terminated status.
|
||||
{
|
||||
containers: []v1.Container{{Name: "running"}},
|
||||
statuses: []*kubecontainer.ContainerStatus{
|
||||
statuses: []*kubecontainer.Status{
|
||||
{
|
||||
Name: "running",
|
||||
State: kubecontainer.ContainerStateRunning,
|
||||
@ -1545,7 +1545,7 @@ func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
|
||||
// terminated status.
|
||||
{
|
||||
containers: []v1.Container{{Name: "without-reason"}, {Name: "with-reason"}},
|
||||
statuses: []*kubecontainer.ContainerStatus{
|
||||
statuses: []*kubecontainer.Status{
|
||||
{
|
||||
Name: "without-reason",
|
||||
State: kubecontainer.ContainerStateExited,
|
||||
@ -1650,7 +1650,7 @@ func TestGenerateAPIPodStatusWithDifferentRestartPolicies(t *testing.T) {
|
||||
ID: pod.UID,
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
ContainerStatuses: []*kubecontainer.ContainerStatus{
|
||||
ContainerStatuses: []*kubecontainer.Status{
|
||||
{
|
||||
Name: "succeed",
|
||||
State: kubecontainer.ContainerStateExited,
|
||||
|
@ -48,14 +48,14 @@ func (p podSandboxByCreated) Len() int { return len(p) }
|
||||
func (p podSandboxByCreated) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
func (p podSandboxByCreated) Less(i, j int) bool { return p[i].CreatedAt > p[j].CreatedAt }
|
||||
|
||||
type containerStatusByCreated []*kubecontainer.ContainerStatus
|
||||
type containerStatusByCreated []*kubecontainer.Status
|
||||
|
||||
func (c containerStatusByCreated) Len() int { return len(c) }
|
||||
func (c containerStatusByCreated) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
|
||||
func (c containerStatusByCreated) Less(i, j int) bool { return c[i].CreatedAt.After(c[j].CreatedAt) }
|
||||
|
||||
// toKubeContainerState converts runtimeapi.ContainerState to kubecontainer.ContainerState.
|
||||
func toKubeContainerState(state runtimeapi.ContainerState) kubecontainer.ContainerState {
|
||||
// toKubeContainerState converts runtimeapi.ContainerState to kubecontainer.State.
|
||||
func toKubeContainerState(state runtimeapi.ContainerState) kubecontainer.State {
|
||||
switch state {
|
||||
case runtimeapi.ContainerState_CONTAINER_CREATED:
|
||||
return kubecontainer.ContainerStateCreated
|
||||
@ -141,7 +141,7 @@ func (m *kubeGenericRuntimeManager) getImageUser(image string) (*int64, string,
|
||||
|
||||
// isInitContainerFailed returns true if container has exited and exitcode is not zero
|
||||
// or is in unknown state.
|
||||
func isInitContainerFailed(status *kubecontainer.ContainerStatus) bool {
|
||||
func isInitContainerFailed(status *kubecontainer.Status) bool {
|
||||
if status.State == kubecontainer.ContainerStateExited && status.ExitCode != 0 {
|
||||
return true
|
||||
}
|
||||
|
@ -428,7 +428,7 @@ func (m *kubeGenericRuntimeManager) readLastStringFromContainerLogs(path string)
|
||||
}
|
||||
|
||||
// getPodContainerStatuses gets all containers' statuses for the pod.
|
||||
func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, name, namespace string) ([]*kubecontainer.ContainerStatus, error) {
|
||||
func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, name, namespace string) ([]*kubecontainer.Status, error) {
|
||||
// Select all containers of the given pod.
|
||||
containers, err := m.runtimeService.ListContainers(&runtimeapi.ContainerFilter{
|
||||
LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(uid)},
|
||||
@ -438,7 +438,7 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
|
||||
return nil, err
|
||||
}
|
||||
|
||||
statuses := make([]*kubecontainer.ContainerStatus, len(containers))
|
||||
statuses := make([]*kubecontainer.Status, len(containers))
|
||||
// TODO: optimization: set maximum number of containers per container name to examine.
|
||||
for i, c := range containers {
|
||||
status, err := m.runtimeService.ContainerStatus(c.Id)
|
||||
@ -481,10 +481,10 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
|
||||
return statuses, nil
|
||||
}
|
||||
|
||||
func toKubeContainerStatus(status *runtimeapi.ContainerStatus, runtimeName string) *kubecontainer.ContainerStatus {
|
||||
func toKubeContainerStatus(status *runtimeapi.ContainerStatus, runtimeName string) *kubecontainer.Status {
|
||||
annotatedInfo := getContainerInfoFromAnnotations(status.Annotations)
|
||||
labeledInfo := getContainerInfoFromLabels(status.Labels)
|
||||
cStatus := &kubecontainer.ContainerStatus{
|
||||
cStatus := &kubecontainer.Status{
|
||||
ID: kubecontainer.ContainerID{
|
||||
Type: runtimeName,
|
||||
ID: status.Id,
|
||||
@ -737,7 +737,7 @@ func (m *kubeGenericRuntimeManager) purgeInitContainers(pod *v1.Pod, podStatus *
|
||||
// index of next init container to start, or done if there are no further init containers.
|
||||
// Status is only returned if an init container is failed, in which case next will
|
||||
// point to the current container.
|
||||
func findNextInitContainerToRun(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (status *kubecontainer.ContainerStatus, next *v1.Container, done bool) {
|
||||
func findNextInitContainerToRun(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (status *kubecontainer.Status, next *v1.Container, done bool) {
|
||||
if len(pod.Spec.InitContainers) == 0 {
|
||||
return nil, nil, true
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ func TestToKubeContainerStatus(t *testing.T) {
|
||||
|
||||
for desc, test := range map[string]struct {
|
||||
input *runtimeapi.ContainerStatus
|
||||
expected *kubecontainer.ContainerStatus
|
||||
expected *kubecontainer.Status
|
||||
}{
|
||||
"created container": {
|
||||
input: &runtimeapi.ContainerStatus{
|
||||
@ -138,7 +138,7 @@ func TestToKubeContainerStatus(t *testing.T) {
|
||||
State: runtimeapi.ContainerState_CONTAINER_CREATED,
|
||||
CreatedAt: createdAt,
|
||||
},
|
||||
expected: &kubecontainer.ContainerStatus{
|
||||
expected: &kubecontainer.Status{
|
||||
ID: *cid,
|
||||
Image: imageSpec.Image,
|
||||
State: kubecontainer.ContainerStateCreated,
|
||||
@ -154,7 +154,7 @@ func TestToKubeContainerStatus(t *testing.T) {
|
||||
CreatedAt: createdAt,
|
||||
StartedAt: startedAt,
|
||||
},
|
||||
expected: &kubecontainer.ContainerStatus{
|
||||
expected: &kubecontainer.Status{
|
||||
ID: *cid,
|
||||
Image: imageSpec.Image,
|
||||
State: kubecontainer.ContainerStateRunning,
|
||||
@ -175,7 +175,7 @@ func TestToKubeContainerStatus(t *testing.T) {
|
||||
Reason: "GotKilled",
|
||||
Message: "The container was killed",
|
||||
},
|
||||
expected: &kubecontainer.ContainerStatus{
|
||||
expected: &kubecontainer.Status{
|
||||
ID: *cid,
|
||||
Image: imageSpec.Image,
|
||||
State: kubecontainer.ContainerStateExited,
|
||||
@ -196,7 +196,7 @@ func TestToKubeContainerStatus(t *testing.T) {
|
||||
CreatedAt: createdAt,
|
||||
StartedAt: startedAt,
|
||||
},
|
||||
expected: &kubecontainer.ContainerStatus{
|
||||
expected: &kubecontainer.Status{
|
||||
ID: *cid,
|
||||
Image: imageSpec.Image,
|
||||
State: kubecontainer.ContainerStateUnknown,
|
||||
@ -316,7 +316,7 @@ func TestLifeCycleHook(t *testing.T) {
|
||||
testPod.Spec.Containers[0].Lifecycle = cmdPostStart
|
||||
testContainer := &testPod.Spec.Containers[0]
|
||||
fakePodStatus := &kubecontainer.PodStatus{
|
||||
ContainerStatuses: []*kubecontainer.ContainerStatus{
|
||||
ContainerStatuses: []*kubecontainer.Status{
|
||||
{
|
||||
ID: kubecontainer.ContainerID{
|
||||
Type: "docker",
|
||||
@ -342,7 +342,7 @@ func TestLifeCycleHook(t *testing.T) {
|
||||
|
||||
func TestStartSpec(t *testing.T) {
|
||||
podStatus := &kubecontainer.PodStatus{
|
||||
ContainerStatuses: []*kubecontainer.ContainerStatus{
|
||||
ContainerStatuses: []*kubecontainer.Status{
|
||||
{
|
||||
ID: kubecontainer.ContainerID{
|
||||
Type: "docker",
|
||||
|
@ -220,7 +220,7 @@ func (cgc *containerGC) evictableContainers(minAge time.Duration) (containersByE
|
||||
}
|
||||
|
||||
// evict all containers that are evictable
|
||||
func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.ContainerGCPolicy, allSourcesReady bool, evictTerminatedPods bool) error {
|
||||
func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictTerminatedPods bool) error {
|
||||
// Separate containers by evict units.
|
||||
evictUnits, err := cgc.evictableContainers(gcPolicy.MinAge)
|
||||
if err != nil {
|
||||
@ -397,7 +397,7 @@ func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error {
|
||||
// * removes oldest dead containers by enforcing gcPolicy.MaxContainers.
|
||||
// * gets evictable sandboxes which are not ready and contains no containers.
|
||||
// * removes evictable sandboxes.
|
||||
func (cgc *containerGC) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, allSourcesReady bool, evictTerminatedPods bool) error {
|
||||
func (cgc *containerGC) GarbageCollect(gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictTerminatedPods bool) error {
|
||||
errors := []error{}
|
||||
// Remove evictable containers
|
||||
if err := cgc.evictContainers(gcPolicy, allSourcesReady, evictTerminatedPods); err != nil {
|
||||
|
@ -202,13 +202,13 @@ func TestContainerGC(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
podStateProvider := m.containerGC.podStateProvider.(*fakePodStateProvider)
|
||||
defaultGCPolicy := kubecontainer.ContainerGCPolicy{MinAge: time.Hour, MaxPerPodContainer: 2, MaxContainers: 6}
|
||||
defaultGCPolicy := kubecontainer.GCPolicy{MinAge: time.Hour, MaxPerPodContainer: 2, MaxContainers: 6}
|
||||
|
||||
for c, test := range []struct {
|
||||
description string // description of the test case
|
||||
containers []containerTemplate // templates of containers
|
||||
policy *kubecontainer.ContainerGCPolicy // container gc policy
|
||||
remain []int // template indexes of remaining containers
|
||||
description string // description of the test case
|
||||
containers []containerTemplate // templates of containers
|
||||
policy *kubecontainer.GCPolicy // container gc policy
|
||||
remain []int // template indexes of remaining containers
|
||||
evictTerminatedPods bool
|
||||
allSourcesReady bool
|
||||
}{
|
||||
@ -217,7 +217,7 @@ func TestContainerGC(t *testing.T) {
|
||||
containers: []containerTemplate{
|
||||
makeGCContainer(podStateProvider, "foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
|
||||
},
|
||||
policy: &kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: 1, MaxContainers: 0},
|
||||
policy: &kubecontainer.GCPolicy{MinAge: time.Minute, MaxPerPodContainer: 1, MaxContainers: 0},
|
||||
remain: []int{},
|
||||
evictTerminatedPods: false,
|
||||
allSourcesReady: true,
|
||||
@ -231,7 +231,7 @@ func TestContainerGC(t *testing.T) {
|
||||
makeGCContainer(podStateProvider, "foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
|
||||
makeGCContainer(podStateProvider, "foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
|
||||
},
|
||||
policy: &kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: 4},
|
||||
policy: &kubecontainer.GCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: 4},
|
||||
remain: []int{0, 1, 2, 3},
|
||||
evictTerminatedPods: false,
|
||||
allSourcesReady: true,
|
||||
@ -243,7 +243,7 @@ func TestContainerGC(t *testing.T) {
|
||||
makeGCContainer(podStateProvider, "foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
|
||||
makeGCContainer(podStateProvider, "foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
|
||||
},
|
||||
policy: &kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: -1},
|
||||
policy: &kubecontainer.GCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: -1},
|
||||
remain: []int{0, 1, 2},
|
||||
evictTerminatedPods: false,
|
||||
allSourcesReady: true,
|
||||
@ -453,7 +453,7 @@ func TestUnknownStateContainerGC(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
podStateProvider := m.containerGC.podStateProvider.(*fakePodStateProvider)
|
||||
defaultGCPolicy := kubecontainer.ContainerGCPolicy{MinAge: time.Hour, MaxPerPodContainer: 0, MaxContainers: 0}
|
||||
defaultGCPolicy := kubecontainer.GCPolicy{MinAge: time.Hour, MaxPerPodContainer: 0, MaxContainers: 0}
|
||||
|
||||
fakeContainers := makeFakeContainers(t, m, []containerTemplate{
|
||||
makeGCContainer(podStateProvider, "foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_UNKNOWN),
|
||||
|
@ -138,7 +138,7 @@ type kubeGenericRuntimeManager struct {
|
||||
type KubeGenericRuntime interface {
|
||||
kubecontainer.Runtime
|
||||
kubecontainer.StreamingRuntime
|
||||
kubecontainer.ContainerCommandRunner
|
||||
kubecontainer.CommandRunner
|
||||
}
|
||||
|
||||
// LegacyLogProvider gives the ability to use unsupported docker log drivers (e.g. journald)
|
||||
@ -453,7 +453,7 @@ func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *v1.Pod, podStatus *ku
|
||||
return false, sandboxStatus.Metadata.Attempt, sandboxStatus.Id
|
||||
}
|
||||
|
||||
func containerChanged(container *v1.Container, containerStatus *kubecontainer.ContainerStatus) (uint64, uint64, bool) {
|
||||
func containerChanged(container *v1.Container, containerStatus *kubecontainer.Status) (uint64, uint64, bool) {
|
||||
expectedHash := kubecontainer.HashContainer(container)
|
||||
return expectedHash, containerStatus.Hash, containerStatus.Hash != expectedHash
|
||||
}
|
||||
@ -835,7 +835,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontaine
|
||||
// If a container is still in backoff, the function will return a brief backoff error and
|
||||
// a detailed error message.
|
||||
func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Container, podStatus *kubecontainer.PodStatus, backOff *flowcontrol.Backoff) (bool, string, error) {
|
||||
var cStatus *kubecontainer.ContainerStatus
|
||||
var cStatus *kubecontainer.Status
|
||||
for _, c := range podStatus.ContainerStatuses {
|
||||
if c.Name == container.Name && c.State == kubecontainer.ContainerStateExited {
|
||||
cStatus = c
|
||||
@ -963,7 +963,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp
|
||||
}
|
||||
|
||||
// GarbageCollect removes dead containers using the specified container gc policy.
|
||||
func (m *kubeGenericRuntimeManager) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error {
|
||||
func (m *kubeGenericRuntimeManager) GarbageCollect(gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error {
|
||||
return m.containerGC.GarbageCollect(gcPolicy, allSourcesReady, evictNonDeletedPods)
|
||||
}
|
||||
|
||||
|
@ -695,7 +695,7 @@ func makeBasePodAndStatus() (*v1.Pod, *kubecontainer.PodStatus) {
|
||||
Network: &runtimeapi.PodSandboxNetworkStatus{Ip: "10.0.0.1"},
|
||||
},
|
||||
},
|
||||
ContainerStatuses: []*kubecontainer.ContainerStatus{
|
||||
ContainerStatuses: []*kubecontainer.Status{
|
||||
{
|
||||
ID: kubecontainer.ContainerID{ID: "id1"},
|
||||
Name: "foo1", State: kubecontainer.ContainerStateRunning,
|
||||
@ -742,7 +742,7 @@ func TestComputePodActions(t *testing.T) {
|
||||
mutateStatusFn: func(status *kubecontainer.PodStatus) {
|
||||
// No container or sandbox exists.
|
||||
status.SandboxStatuses = []*runtimeapi.PodSandboxStatus{}
|
||||
status.ContainerStatuses = []*kubecontainer.ContainerStatus{}
|
||||
status.ContainerStatuses = []*kubecontainer.Status{}
|
||||
},
|
||||
actions: podActions{
|
||||
KillPod: true,
|
||||
@ -917,7 +917,7 @@ func TestComputePodActions(t *testing.T) {
|
||||
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
|
||||
status.SandboxStatuses[0].Metadata.Attempt = uint32(2)
|
||||
// no visible containers
|
||||
status.ContainerStatuses = []*kubecontainer.ContainerStatus{}
|
||||
status.ContainerStatuses = []*kubecontainer.Status{}
|
||||
},
|
||||
actions: podActions{
|
||||
SandboxID: baseStatus.SandboxStatuses[0].Id,
|
||||
@ -1125,7 +1125,7 @@ func TestComputePodActionsWithInitContainers(t *testing.T) {
|
||||
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
|
||||
mutateStatusFn: func(status *kubecontainer.PodStatus) {
|
||||
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
|
||||
status.ContainerStatuses = []*kubecontainer.ContainerStatus{}
|
||||
status.ContainerStatuses = []*kubecontainer.Status{}
|
||||
},
|
||||
actions: podActions{
|
||||
KillPod: true,
|
||||
@ -1168,7 +1168,7 @@ func makeBasePodAndStatusWithInitContainers() (*v1.Pod, *kubecontainer.PodStatus
|
||||
}
|
||||
// Replace the original statuses of the containers with those for the init
|
||||
// containers.
|
||||
status.ContainerStatuses = []*kubecontainer.ContainerStatus{
|
||||
status.ContainerStatuses = []*kubecontainer.Status{
|
||||
{
|
||||
ID: kubecontainer.ContainerID{ID: "initid1"},
|
||||
Name: "init1", State: kubecontainer.ContainerStateExited,
|
||||
@ -1332,11 +1332,11 @@ func makeBasePodAndStatusWithInitAndEphemeralContainers() (*v1.Pod, *kubecontain
|
||||
},
|
||||
},
|
||||
}
|
||||
status.ContainerStatuses = append(status.ContainerStatuses, &kubecontainer.ContainerStatus{
|
||||
status.ContainerStatuses = append(status.ContainerStatuses, &kubecontainer.Status{
|
||||
ID: kubecontainer.ContainerID{ID: "initid1"},
|
||||
Name: "init1", State: kubecontainer.ContainerStateExited,
|
||||
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]),
|
||||
}, &kubecontainer.ContainerStatus{
|
||||
}, &kubecontainer.Status{
|
||||
ID: kubecontainer.ContainerID{ID: "debug1"},
|
||||
Name: "debug", State: kubecontainer.ContainerStateRunning,
|
||||
Hash: kubecontainer.HashContainer((*v1.Container)(&pod.Spec.EphemeralContainers[0].EphemeralContainerCommon)),
|
||||
|
@ -39,7 +39,7 @@ const (
|
||||
|
||||
type HandlerRunner struct {
|
||||
httpGetter kubetypes.HTTPGetter
|
||||
commandRunner kubecontainer.ContainerCommandRunner
|
||||
commandRunner kubecontainer.CommandRunner
|
||||
containerManager podStatusProvider
|
||||
}
|
||||
|
||||
@ -47,7 +47,7 @@ type podStatusProvider interface {
|
||||
GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error)
|
||||
}
|
||||
|
||||
func NewHandlerRunner(httpGetter kubetypes.HTTPGetter, commandRunner kubecontainer.ContainerCommandRunner, containerManager podStatusProvider) kubecontainer.HandlerRunner {
|
||||
func NewHandlerRunner(httpGetter kubetypes.HTTPGetter, commandRunner kubecontainer.CommandRunner, containerManager podStatusProvider) kubecontainer.HandlerRunner {
|
||||
return &HandlerRunner{
|
||||
httpGetter: httpGetter,
|
||||
commandRunner: commandRunner,
|
||||
|
@ -67,7 +67,7 @@ type GenericPLEG struct {
|
||||
}
|
||||
|
||||
// plegContainerState has a one-to-one mapping to the
|
||||
// kubecontainer.ContainerState except for the non-existent state. This state
|
||||
// kubecontainer.State except for the non-existent state. This state
|
||||
// is introduced here to complete the state transition scenarios.
|
||||
type plegContainerState string
|
||||
|
||||
@ -83,7 +83,7 @@ const (
|
||||
relistThreshold = 3 * time.Minute
|
||||
)
|
||||
|
||||
func convertState(state kubecontainer.ContainerState) plegContainerState {
|
||||
func convertState(state kubecontainer.State) plegContainerState {
|
||||
switch state {
|
||||
case kubecontainer.ContainerStateCreated:
|
||||
// kubelet doesn't use the "created" state yet, hence convert it to "unknown".
|
||||
|
@ -76,7 +76,7 @@ func getEventsFromChannel(ch <-chan *PodLifecycleEvent) []*PodLifecycleEvent {
|
||||
return events
|
||||
}
|
||||
|
||||
func createTestContainer(ID string, state kubecontainer.ContainerState) *kubecontainer.Container {
|
||||
func createTestContainer(ID string, state kubecontainer.State) *kubecontainer.Container {
|
||||
return &kubecontainer.Container{
|
||||
ID: kubecontainer.ContainerID{Type: testContainerRuntimeType, ID: ID},
|
||||
State: state,
|
||||
@ -336,7 +336,7 @@ func createTestPodsStatusesAndEvents(num int) ([]*kubecontainer.Pod, []*kubecont
|
||||
}
|
||||
status := &kubecontainer.PodStatus{
|
||||
ID: id,
|
||||
ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: container.ID, State: cState}},
|
||||
ContainerStatuses: []*kubecontainer.Status{{ID: container.ID, State: cState}},
|
||||
}
|
||||
event := &PodLifecycleEvent{ID: pod.ID, Type: ContainerStarted, Data: container.ID.ID}
|
||||
pods = append(pods, pod)
|
||||
@ -457,7 +457,7 @@ func TestRelistWithReinspection(t *testing.T) {
|
||||
|
||||
goodStatus := &kubecontainer.PodStatus{
|
||||
ID: podID,
|
||||
ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: infraContainer.ID, State: infraContainer.State}},
|
||||
ContainerStatuses: []*kubecontainer.Status{{ID: infraContainer.ID, State: infraContainer.State}},
|
||||
}
|
||||
runtimeMock.On("GetPodStatus", podID, "", "").Return(goodStatus, nil).Once()
|
||||
|
||||
@ -482,7 +482,7 @@ func TestRelistWithReinspection(t *testing.T) {
|
||||
|
||||
badStatus := &kubecontainer.PodStatus{
|
||||
ID: podID,
|
||||
ContainerStatuses: []*kubecontainer.ContainerStatus{},
|
||||
ContainerStatuses: []*kubecontainer.Status{},
|
||||
}
|
||||
runtimeMock.On("GetPodStatus", podID, "", "").Return(badStatus, errors.New("inspection error")).Once()
|
||||
|
||||
@ -607,7 +607,7 @@ func TestRelistIPChange(t *testing.T) {
|
||||
status := &kubecontainer.PodStatus{
|
||||
ID: id,
|
||||
IPs: tc.podIPs,
|
||||
ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: container.ID, State: cState}},
|
||||
ContainerStatuses: []*kubecontainer.Status{{ID: container.ID, State: cState}},
|
||||
}
|
||||
event := &PodLifecycleEvent{ID: pod.ID, Type: ContainerStarted, Data: container.ID.ID}
|
||||
|
||||
@ -629,7 +629,7 @@ func TestRelistIPChange(t *testing.T) {
|
||||
}
|
||||
status = &kubecontainer.PodStatus{
|
||||
ID: id,
|
||||
ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: container.ID, State: kubecontainer.ContainerStateExited}},
|
||||
ContainerStatuses: []*kubecontainer.Status{{ID: container.ID, State: kubecontainer.ContainerStateExited}},
|
||||
}
|
||||
event = &PodLifecycleEvent{ID: pod.ID, Type: ContainerDied, Data: container.ID.ID}
|
||||
runtimeMock.On("GetPods", true).Return([]*kubecontainer.Pod{pod}, nil).Once()
|
||||
|
@ -30,7 +30,7 @@ const (
|
||||
containerDeletorBufferLimit = 50
|
||||
)
|
||||
|
||||
type containerStatusbyCreatedList []*kubecontainer.ContainerStatus
|
||||
type containerStatusbyCreatedList []*kubecontainer.Status
|
||||
|
||||
type podContainerDeletor struct {
|
||||
worker chan<- kubecontainer.ContainerID
|
||||
@ -63,7 +63,7 @@ func newPodContainerDeletor(runtime kubecontainer.Runtime, containersToKeep int)
|
||||
// getContainersToDeleteInPod returns the exited containers in a pod whose name matches the name inferred from filterContainerId (if not empty), ordered by the creation time from the latest to the earliest.
|
||||
// If filterContainerID is empty, all dead containers in the pod are returned.
|
||||
func getContainersToDeleteInPod(filterContainerID string, podStatus *kubecontainer.PodStatus, containersToKeep int) containerStatusbyCreatedList {
|
||||
matchedContainer := func(filterContainerId string, podStatus *kubecontainer.PodStatus) *kubecontainer.ContainerStatus {
|
||||
matchedContainer := func(filterContainerId string, podStatus *kubecontainer.PodStatus) *kubecontainer.Status {
|
||||
if filterContainerId == "" {
|
||||
return nil
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
|
||||
func TestGetContainersToDeleteInPodWithFilter(t *testing.T) {
|
||||
pod := kubecontainer.PodStatus{
|
||||
ContainerStatuses: []*kubecontainer.ContainerStatus{
|
||||
ContainerStatuses: []*kubecontainer.Status{
|
||||
{
|
||||
ID: kubecontainer.ContainerID{Type: "test", ID: "1"},
|
||||
Name: "foo",
|
||||
@ -66,15 +66,15 @@ func TestGetContainersToDeleteInPodWithFilter(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
0,
|
||||
[]*kubecontainer.ContainerStatus{pod.ContainerStatuses[3], pod.ContainerStatuses[2], pod.ContainerStatuses[1]},
|
||||
[]*kubecontainer.Status{pod.ContainerStatuses[3], pod.ContainerStatuses[2], pod.ContainerStatuses[1]},
|
||||
},
|
||||
{
|
||||
1,
|
||||
[]*kubecontainer.ContainerStatus{pod.ContainerStatuses[2], pod.ContainerStatuses[1]},
|
||||
[]*kubecontainer.Status{pod.ContainerStatuses[2], pod.ContainerStatuses[1]},
|
||||
},
|
||||
{
|
||||
2,
|
||||
[]*kubecontainer.ContainerStatus{pod.ContainerStatuses[1]},
|
||||
[]*kubecontainer.Status{pod.ContainerStatuses[1]},
|
||||
},
|
||||
}
|
||||
|
||||
@ -88,7 +88,7 @@ func TestGetContainersToDeleteInPodWithFilter(t *testing.T) {
|
||||
|
||||
func TestGetContainersToDeleteInPod(t *testing.T) {
|
||||
pod := kubecontainer.PodStatus{
|
||||
ContainerStatuses: []*kubecontainer.ContainerStatus{
|
||||
ContainerStatuses: []*kubecontainer.Status{
|
||||
{
|
||||
ID: kubecontainer.ContainerID{Type: "test", ID: "1"},
|
||||
Name: "foo",
|
||||
@ -128,15 +128,15 @@ func TestGetContainersToDeleteInPod(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
0,
|
||||
[]*kubecontainer.ContainerStatus{pod.ContainerStatuses[3], pod.ContainerStatuses[2], pod.ContainerStatuses[1], pod.ContainerStatuses[0]},
|
||||
[]*kubecontainer.Status{pod.ContainerStatuses[3], pod.ContainerStatuses[2], pod.ContainerStatuses[1], pod.ContainerStatuses[0]},
|
||||
},
|
||||
{
|
||||
1,
|
||||
[]*kubecontainer.ContainerStatus{pod.ContainerStatuses[2], pod.ContainerStatuses[1], pod.ContainerStatuses[0]},
|
||||
[]*kubecontainer.Status{pod.ContainerStatuses[2], pod.ContainerStatuses[1], pod.ContainerStatuses[0]},
|
||||
},
|
||||
{
|
||||
2,
|
||||
[]*kubecontainer.ContainerStatus{pod.ContainerStatuses[1], pod.ContainerStatuses[0]},
|
||||
[]*kubecontainer.Status{pod.ContainerStatuses[1], pod.ContainerStatuses[0]},
|
||||
},
|
||||
}
|
||||
|
||||
@ -150,7 +150,7 @@ func TestGetContainersToDeleteInPod(t *testing.T) {
|
||||
|
||||
func TestGetContainersToDeleteInPodWithNoMatch(t *testing.T) {
|
||||
pod := kubecontainer.PodStatus{
|
||||
ContainerStatuses: []*kubecontainer.ContainerStatus{
|
||||
ContainerStatuses: []*kubecontainer.Status{
|
||||
{
|
||||
ID: kubecontainer.ContainerID{Type: "test", ID: "1"},
|
||||
Name: "foo",
|
||||
@ -190,7 +190,7 @@ func TestGetContainersToDeleteInPodWithNoMatch(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
"abc",
|
||||
[]*kubecontainer.ContainerStatus{},
|
||||
[]*kubecontainer.Status{},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -54,7 +54,7 @@ type prober struct {
|
||||
livenessHTTP httpprobe.Prober
|
||||
startupHTTP httpprobe.Prober
|
||||
tcp tcpprobe.Prober
|
||||
runner kubecontainer.ContainerCommandRunner
|
||||
runner kubecontainer.CommandRunner
|
||||
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
@ -62,7 +62,7 @@ type prober struct {
|
||||
// NewProber creates a Prober, it takes a command runner and
|
||||
// several container info managers.
|
||||
func newProber(
|
||||
runner kubecontainer.ContainerCommandRunner,
|
||||
runner kubecontainer.CommandRunner,
|
||||
recorder record.EventRecorder) *prober {
|
||||
|
||||
const followNonLocalRedirects = false
|
||||
|
@ -103,7 +103,7 @@ func NewManager(
|
||||
statusManager status.Manager,
|
||||
livenessManager results.Manager,
|
||||
startupManager results.Manager,
|
||||
runner kubecontainer.ContainerCommandRunner,
|
||||
runner kubecontainer.CommandRunner,
|
||||
recorder record.EventRecorder) Manager {
|
||||
|
||||
prober := newProber(runner, recorder)
|
||||
|
@ -390,7 +390,7 @@ func TestNewExecInContainer(t *testing.T) {
|
||||
if e, a := cmd, runner.Cmd; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("%s: cmd: expected %v, got %v", test.name, e, a)
|
||||
}
|
||||
// this isn't 100% foolproof as a bug in a real ContainerCommandRunner where it fails to copy to stdout/stderr wouldn't be caught by this test
|
||||
// this isn't 100% foolproof as a bug in a real CommandRunner where it fails to copy to stdout/stderr wouldn't be caught by this test
|
||||
if e, a := test.expected, string(actualOutput); e != a {
|
||||
t.Errorf("%s: output: expected %q, got %q", test.name, e, a)
|
||||
}
|
||||
|
@ -157,7 +157,7 @@ func TestRunOnce(t *testing.T) {
|
||||
// because runonce is never used in kubernetes now, we should deprioritize the cleanup work.
|
||||
// TODO(random-liu) Fix the test, make it meaningful.
|
||||
fakeRuntime.PodStatus = kubecontainer.PodStatus{
|
||||
ContainerStatuses: []*kubecontainer.ContainerStatus{
|
||||
ContainerStatuses: []*kubecontainer.Status{
|
||||
{
|
||||
Name: "bar",
|
||||
State: kubecontainer.ContainerStateRunning,
|
||||
|
Loading…
Reference in New Issue
Block a user