CRI: Add missing sandbox in runningPod.

Append containers that represent pod sandboxes when converting the
pod status to runningPod.
This commit is contained in:
Yifan Gu 2016-09-16 19:18:18 -07:00
parent af3050dd15
commit 27d4866c4e
6 changed files with 39 additions and 28 deletions

View File

@ -83,30 +83,6 @@ func ShouldContainerBeRestarted(container *api.Container, pod *api.Pod, podStatu
return true
}
// TODO(random-liu): Convert PodStatus to running Pod, should be deprecated soon
func ConvertPodStatusToRunningPod(podStatus *PodStatus) Pod {
runningPod := Pod{
ID: podStatus.ID,
Name: podStatus.Name,
Namespace: podStatus.Namespace,
}
for _, containerStatus := range podStatus.ContainerStatuses {
if containerStatus.State != ContainerStateRunning {
continue
}
container := &Container{
ID: containerStatus.ID,
Name: containerStatus.Name,
Image: containerStatus.Image,
ImageID: containerStatus.ImageID,
Hash: containerStatus.Hash,
State: containerStatus.State,
}
runningPod.Containers = append(runningPod.Containers, container)
}
return runningPod
}
// HashContainer returns the hash of the container. It is used to compare
// the running container with its desired spec.
func HashContainer(container *api.Container) uint64 {

View File

@ -48,6 +48,7 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/kuberuntime"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/network"
@ -2051,7 +2052,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
// Killing phase: if we want to start new infra container, or nothing is running kill everything (including infra container)
// TODO(random-liu): We'll use pod status directly in the future
killResult := dm.killPodWithSyncResult(pod, kubecontainer.ConvertPodStatusToRunningPod(podStatus), nil)
killResult := dm.killPodWithSyncResult(pod, kuberuntime.ConvertPodStatusToRunningPod(dm.Type(), podStatus), nil)
result.AddPodSyncResult(killResult)
if killResult.Error() != nil {
return

View File

@ -1675,7 +1675,7 @@ func (kl *Kubelet) killPod(pod *api.Pod, runningPod *kubecontainer.Pod, status *
if runningPod != nil {
p = *runningPod
} else if status != nil {
p = kubecontainer.ConvertPodStatusToRunningPod(status)
p = kuberuntime.ConvertPodStatusToRunningPod(kl.GetRuntime().Type(), status)
}
return kl.containerRuntime.KillPod(pod, p, gracePeriodOverride)
}

View File

@ -92,6 +92,39 @@ func sandboxToKubeContainerState(state runtimeApi.PodSandBoxState) kubecontainer
return kubecontainer.ContainerStateUnknown
}
// TODO(random-liu): Convert PodStatus to running Pod, should be deprecated soon
func ConvertPodStatusToRunningPod(runtimeName string, podStatus *kubecontainer.PodStatus) kubecontainer.Pod {
runningPod := kubecontainer.Pod{
ID: podStatus.ID,
Name: podStatus.Name,
Namespace: podStatus.Namespace,
}
for _, containerStatus := range podStatus.ContainerStatuses {
if containerStatus.State != kubecontainer.ContainerStateRunning {
continue
}
container := &kubecontainer.Container{
ID: containerStatus.ID,
Name: containerStatus.Name,
Image: containerStatus.Image,
ImageID: containerStatus.ImageID,
Hash: containerStatus.Hash,
State: containerStatus.State,
}
runningPod.Containers = append(runningPod.Containers, container)
}
// Need to place a sandbox in the Pod as well.
for _, sandbox := range podStatus.SandboxStatuses {
runningPod.Sandboxes = append(runningPod.Sandboxes, &kubecontainer.Container{
ID: kubecontainer.ContainerID{Type: runtimeName, ID: *sandbox.Id},
State: sandboxToKubeContainerState(*sandbox.State),
})
}
return runningPod
}
// toRuntimeProtocol converts api.Protocol to runtimeApi.Protocol.
func toRuntimeProtocol(protocol api.Protocol) runtimeApi.Protocol {
switch protocol {

View File

@ -483,7 +483,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *api.Pod, _ api.PodStatus, podSt
glog.V(4).Infof("Stopping PodSandbox for %q, will start new one", format.Pod(pod))
}
killResult := m.killPodWithSyncResult(pod, kubecontainer.ConvertPodStatusToRunningPod(podStatus), nil)
killResult := m.killPodWithSyncResult(pod, ConvertPodStatusToRunningPod(m.runtimeName, podStatus), nil)
result.AddPodSyncResult(killResult)
if killResult.Error() != nil {
glog.Errorf("killPodWithSyncResult failed: %v", killResult.Error())

View File

@ -47,6 +47,7 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/kuberuntime"
"k8s.io/kubernetes/pkg/kubelet/leaky"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/network"
@ -1710,7 +1711,7 @@ func (r *Runtime) SyncPod(pod *api.Pod, podStatus api.PodStatus, internalPodStat
// TODO: (random-liu) Stop using running pod in SyncPod()
// TODO: (random-liu) Rename podStatus to apiPodStatus, rename internalPodStatus to podStatus, and use new pod status as much as possible,
// we may stop using apiPodStatus someday.
runningPod := kubecontainer.ConvertPodStatusToRunningPod(internalPodStatus)
runningPod := kuberuntime.ConvertPodStatusToRunningPod(r.Type(), internalPodStatus)
// Add references to all containers.
unidentifiedContainers := make(map[kubecontainer.ContainerID]*kubecontainer.Container)
for _, c := range runningPod.Containers {