Remove ConvertPodStatusToAPIPodStatus from runtime interface

This commit is contained in:
Random-Liu 2016-01-26 19:25:35 -08:00 committed by Lantao Liu
parent bd67b8a5db
commit 2b7d0182ca
7 changed files with 3 additions and 240 deletions

View File

@ -245,15 +245,6 @@ func (f *FakeRuntime) GetPodStatus(uid types.UID, name, namespace string) (*PodS
return &status, f.Err
}
func (f *FakeRuntime) ConvertPodStatusToAPIPodStatus(_ *api.Pod, _ *PodStatus) (*api.PodStatus, error) {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "ConvertPodStatusToAPIPodStatus")
status := f.APIPodStatus
return &status, f.Err
}
func (f *FakeRuntime) ExecInContainer(containerID ContainerID, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
f.Lock()
defer f.Unlock()

View File

@ -72,12 +72,6 @@ type Runtime interface {
// GetPodStatus retrieves the status of the pod, including the
// information of all containers in the pod that are visble in Runtime.
GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error)
// ConvertPodStatusToAPIPodStatus converts the PodStatus object to api.PodStatus.
// This function is needed because Docker generates some high-level and/or
// pod-level information for api.PodStatus (e.g., check whether the image
// exists to determine the reason). We should try generalizing the logic
// for all container runtimes in kubelet and remove this funciton.
ConvertPodStatusToAPIPodStatus(*api.Pod, *PodStatus) (*api.PodStatus, error)
// PullImage pulls an image from the network to local storage using the supplied
// secrets if necessary.
PullImage(image ImageSpec, pullSecrets []api.Secret) error

View File

@ -82,11 +82,6 @@ func (r *Mock) GetPodStatus(uid types.UID, name, namespace string) (*PodStatus,
return args.Get(0).(*PodStatus), args.Error(1)
}
func (r *Mock) ConvertPodStatusToAPIPodStatus(pod *api.Pod, podStatus *PodStatus) (*api.PodStatus, error) {
args := r.Called(pod, podStatus)
return args.Get(0).(*api.PodStatus), args.Error(1)
}
func (r *Mock) ExecInContainer(containerID ContainerID, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error {
args := r.Called(containerID, cmd, stdin, stdout, stderr, tty)
return args.Error(0)

View File

@ -21,8 +21,6 @@ import (
"strings"
docker "github.com/fsouza/go-dockerclient"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
@ -83,31 +81,3 @@ func toRuntimeImage(image *docker.APIImages) (*kubecontainer.Image, error) {
Size: image.VirtualSize,
}, nil
}
// convert ContainerStatus to api.ContainerStatus.
func containerStatusToAPIContainerStatus(containerStatus *kubecontainer.ContainerStatus) *api.ContainerStatus {
containerID := DockerPrefix + containerStatus.ID.ID
status := api.ContainerStatus{
Name: containerStatus.Name,
RestartCount: containerStatus.RestartCount,
Image: containerStatus.Image,
ImageID: containerStatus.ImageID,
ContainerID: containerID,
}
switch containerStatus.State {
case kubecontainer.ContainerStateRunning:
status.State.Running = &api.ContainerStateRunning{StartedAt: unversioned.NewTime(containerStatus.StartedAt)}
case kubecontainer.ContainerStateExited:
status.State.Terminated = &api.ContainerStateTerminated{
ExitCode: containerStatus.ExitCode,
Reason: containerStatus.Reason,
Message: containerStatus.Message,
StartedAt: unversioned.NewTime(containerStatus.StartedAt),
FinishedAt: unversioned.NewTime(containerStatus.FinishedAt),
ContainerID: containerID,
}
default:
status.State.Waiting = &api.ContainerStateWaiting{}
}
return &status
}

View File

@ -25,7 +25,6 @@ import (
"os"
"os/exec"
"path"
"sort"
"strconv"
"strings"
"sync"
@ -55,7 +54,6 @@ import (
"k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/util/procfs"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/sets"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
)
@ -410,112 +408,6 @@ func (dm *DockerManager) inspectContainer(id string, podName, podNamespace strin
return &status, "", nil
}
func (dm *DockerManager) ConvertPodStatusToAPIPodStatus(pod *api.Pod, podStatus *kubecontainer.PodStatus) (*api.PodStatus, error) {
var apiPodStatus api.PodStatus
uid := pod.UID
statuses := make(map[string]*api.ContainerStatus, len(pod.Spec.Containers))
// Create a map of expected containers based on the pod spec.
expectedContainers := make(map[string]api.Container)
for _, container := range pod.Spec.Containers {
expectedContainers[container.Name] = container
}
containerDone := sets.NewString()
apiPodStatus.PodIP = podStatus.IP
for _, containerStatus := range podStatus.ContainerStatuses {
cName := containerStatus.Name
if _, ok := expectedContainers[cName]; !ok {
// This would also ignore the infra container.
continue
}
if containerDone.Has(cName) {
continue
}
status := containerStatusToAPIContainerStatus(containerStatus)
if existing, found := statuses[cName]; found {
existing.LastTerminationState = status.State
containerDone.Insert(cName)
} else {
statuses[cName] = status
}
}
// Handle the containers for which we cannot find any associated active or dead docker containers or are in restart backoff
// Fetch old containers statuses from old pod status.
oldStatuses := make(map[string]api.ContainerStatus, len(pod.Spec.Containers))
for _, status := range pod.Status.ContainerStatuses {
oldStatuses[status.Name] = status
}
for _, container := range pod.Spec.Containers {
if containerStatus, found := statuses[container.Name]; found {
reasonInfo, ok := dm.reasonCache.Get(uid, container.Name)
if ok && reasonInfo.reason == kubecontainer.ErrCrashLoopBackOff.Error() {
containerStatus.LastTerminationState = containerStatus.State
containerStatus.State = api.ContainerState{
Waiting: &api.ContainerStateWaiting{
Reason: reasonInfo.reason,
Message: reasonInfo.message,
},
}
}
continue
}
var containerStatus api.ContainerStatus
containerStatus.Name = container.Name
containerStatus.Image = container.Image
if oldStatus, found := oldStatuses[container.Name]; found {
// Some states may be lost due to GC; apply the last observed
// values if possible.
containerStatus.RestartCount = oldStatus.RestartCount
containerStatus.LastTerminationState = oldStatus.LastTerminationState
}
// TODO(dchen1107): docker/docker/issues/8365 to figure out if the image exists
reasonInfo, ok := dm.reasonCache.Get(uid, container.Name)
if !ok {
// default position for a container
// At this point there are no active or dead containers, the reasonCache is empty (no entry or the entry has expired)
// its reasonable to say the container is being created till a more accurate reason is logged
containerStatus.State = api.ContainerState{
Waiting: &api.ContainerStateWaiting{
Reason: fmt.Sprintf("ContainerCreating"),
Message: fmt.Sprintf("Image: %s is ready, container is creating", container.Image),
},
}
} else if reasonInfo.reason == kubecontainer.ErrImagePullBackOff.Error() ||
reasonInfo.reason == kubecontainer.ErrImageInspect.Error() ||
reasonInfo.reason == kubecontainer.ErrImagePull.Error() ||
reasonInfo.reason == kubecontainer.ErrImageNeverPull.Error() {
// mark it as waiting, reason will be filled bellow
containerStatus.State = api.ContainerState{Waiting: &api.ContainerStateWaiting{}}
} else if reasonInfo.reason == kubecontainer.ErrRunContainer.Error() {
// mark it as waiting, reason will be filled bellow
containerStatus.State = api.ContainerState{Waiting: &api.ContainerStateWaiting{}}
}
statuses[container.Name] = &containerStatus
}
apiPodStatus.ContainerStatuses = make([]api.ContainerStatus, 0)
for containerName, status := range statuses {
if status.State.Waiting != nil {
status.State.Running = nil
// For containers in the waiting state, fill in a specific reason if it is recorded.
if reasonInfo, ok := dm.reasonCache.Get(uid, containerName); ok {
status.State.Waiting.Reason = reasonInfo.reason
status.State.Waiting.Message = reasonInfo.message
}
}
apiPodStatus.ContainerStatuses = append(apiPodStatus.ContainerStatuses, *status)
}
// Sort the container statuses since clients of this interface expect the list
// of containers in a pod to behave like the output of `docker list`, which has a
// deterministic order.
sort.Sort(kubetypes.SortedContainerStatuses(apiPodStatus.ContainerStatuses))
return &apiPodStatus, nil
}
// makeEnvList converts EnvVar list to a list of strings, in the form of
// '<key>=<value>', which can be understood by docker.
func makeEnvList(envs []kubecontainer.EnvVar) (result []string) {

View File

@ -550,17 +550,14 @@ func runSyncPod(t *testing.T, dm *DockerManager, fakeDocker *FakeDockerClient, p
if err != nil {
t.Errorf("unexpected error: %v", err)
}
var apiPodStatus *api.PodStatus
apiPodStatus, err = dm.ConvertPodStatusToAPIPodStatus(pod, podStatus)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
fakeDocker.ClearCalls()
if backOff == nil {
backOff = util.NewBackOff(time.Second, time.Minute)
}
result := dm.SyncPod(pod, *apiPodStatus, podStatus, []api.Secret{}, backOff)
// TODO(random-liu): Add test for PodSyncResult
// api.PodStatus is not used in SyncPod now, pass in an empty one.
result := dm.SyncPod(pod, api.PodStatus{}, podStatus, []api.Secret{}, backOff)
err = result.Error()
if err != nil && !expectErr {
t.Errorf("unexpected error: %v", err)

View File

@ -25,7 +25,6 @@ import (
"os"
"os/exec"
"path"
"sort"
"strconv"
"strings"
"syscall"
@ -39,7 +38,6 @@ import (
"golang.org/x/net/context"
"google.golang.org/grpc"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/credentialprovider"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -1490,77 +1488,3 @@ type sortByRestartCount []*kubecontainer.ContainerStatus
func (s sortByRestartCount) Len() int { return len(s) }
func (s sortByRestartCount) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s sortByRestartCount) Less(i, j int) bool { return s[i].RestartCount < s[j].RestartCount }
// TODO(yifan): Delete this function when the logic is moved to kubelet.
func (r *Runtime) ConvertPodStatusToAPIPodStatus(pod *api.Pod, status *kubecontainer.PodStatus) (*api.PodStatus, error) {
apiPodStatus := &api.PodStatus{PodIP: status.IP}
// Sort in the reverse order of the restart count because the
// lastest one will have the largest restart count.
sort.Sort(sort.Reverse(sortByRestartCount(status.ContainerStatuses)))
containerStatuses := make(map[string]*api.ContainerStatus)
for _, c := range status.ContainerStatuses {
var st api.ContainerState
switch c.State {
case kubecontainer.ContainerStateRunning:
st.Running = &api.ContainerStateRunning{
StartedAt: unversioned.NewTime(c.StartedAt),
}
case kubecontainer.ContainerStateExited:
if pod.Spec.RestartPolicy == api.RestartPolicyAlways ||
pod.Spec.RestartPolicy == api.RestartPolicyOnFailure && c.ExitCode != 0 {
// TODO(yifan): Add reason and message.
st.Waiting = &api.ContainerStateWaiting{}
break
}
st.Terminated = &api.ContainerStateTerminated{
ExitCode: c.ExitCode,
StartedAt: unversioned.NewTime(c.StartedAt),
Reason: c.Reason,
Message: c.Message,
// TODO(yifan): Add finishedAt, signal.
ContainerID: c.ID.String(),
}
default:
// Unknown state.
// TODO(yifan): Add reason and message.
st.Waiting = &api.ContainerStateWaiting{}
}
status, ok := containerStatuses[c.Name]
if !ok {
containerStatuses[c.Name] = &api.ContainerStatus{
Name: c.Name,
Image: c.Image,
ImageID: c.ImageID,
ContainerID: c.ID.String(),
RestartCount: c.RestartCount,
State: st,
}
continue
}
// Found multiple container statuses, fill that as last termination state.
if status.LastTerminationState.Waiting == nil &&
status.LastTerminationState.Running == nil &&
status.LastTerminationState.Terminated == nil {
status.LastTerminationState = st
}
}
for _, c := range pod.Spec.Containers {
cs, ok := containerStatuses[c.Name]
if !ok {
cs = &api.ContainerStatus{
Name: c.Name,
Image: c.Image,
// TODO(yifan): Add reason and message.
State: api.ContainerState{Waiting: &api.ContainerStateWaiting{}},
}
}
apiPodStatus.ContainerStatuses = append(apiPodStatus.ContainerStatuses, *cs)
}
return apiPodStatus, nil
}