Initial support of propogating the termination reasons and image failure

to apiserver. Deprecated docker.Container from API completely.

Conflicts:
	pkg/api/types.go
	pkg/kubelet/kubelet.go
This commit is contained in:
Dawn Chen 2014-10-02 23:39:02 -07:00
parent ab6065944c
commit 9861eb7c8e
8 changed files with 142 additions and 58 deletions

View File

@ -18,9 +18,9 @@ package api
import (
"strings"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/fsouza/go-dockerclient"
)
// Common string formats
@ -302,12 +302,15 @@ type ContainerStateWaiting struct {
}
type ContainerStateRunning struct {
StartedAt time.Time `json:"startedAt,omitempty" yaml:"startedAt,omitempty"`
}
type ContainerStateTerminated struct {
ExitCode int `json:"exitCode,omitempty" yaml:"exitCode,omitempty"`
Signal int `json:"signal,omitempty" yaml:"signal,omitempty"`
Reason string `json:"reason,omitempty" yaml:"reason,omitempty"`
ExitCode int `json:"exitCode" yaml:"exitCode"`
Signal int `json:"signal,omitempty" yaml:"signal,omitempty"`
Reason string `json:"reason,omitempty" yaml:"reason,omitempty"`
StartedAt time.Time `json:"startedAt,omitempty" yaml:"startedAt,omitempty"`
FinishedAt time.Time `json:"finishedAt,omitempty" yaml:"finishedAt,omitempty"`
}
type ContainerState struct {
@ -323,12 +326,11 @@ type ContainerStatus struct {
// defined for container?
State ContainerState `json:"state,omitempty" yaml:"state,omitempty"`
RestartCount int `json:"restartCount" yaml:"restartCount"`
// TODO(dchen1107): Introduce our own NetworkSettings struct here?
// TODO(dchen1107): Deprecated this soon once we pull entire PodStatus from node,
// not just PodInfo. Now we need this to remove docker.Container from API
PodIP string `json:"podIP,omitempty" yaml:"podIP,omitempty"`
// TODO(dchen1107): Once we have done with integration with cadvisor, resource
// usage should be included.
// TODO(dchen1107): In long run, I think we should replace this with our own struct to remove
// the dependency on docker.
DetailInfo docker.Container `json:"detailInfo,omitempty" yaml:"detailInfo,omitempty"`
}
// PodInfo contains one entry for every container with available info.

View File

@ -17,8 +17,9 @@ limitations under the License.
package v1beta1
import (
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/fsouza/go-dockerclient"
)
// Common string formats
@ -286,12 +287,15 @@ type ContainerStateWaiting struct {
}
type ContainerStateRunning struct {
StartedAt time.Time `json:"startedAt,omitempty" yaml:"startedAt,omitempty"`
}
type ContainerStateTerminated struct {
ExitCode int `json:"exitCode,omitempty" yaml:"exitCode,omitempty"`
Signal int `json:"signal,omitempty" yaml:"signal,omitempty"`
Reason string `json:"reason,omitempty" yaml:"reason,omitempty"`
ExitCode int `json:"exitCode" yaml:"exitCode"`
Signal int `json:"signal,omitempty" yaml:"signal,omitempty"`
Reason string `json:"reason,omitempty" yaml:"reason,omitempty"`
StartedAt time.Time `json:"startedAt,omitempty" yaml:"startedAt,omitempty"`
FinishedAt time.Time `json:"finishedAt,omitempty" yaml:"finishedAt,omitempty"`
}
type ContainerState struct {
@ -307,12 +311,11 @@ type ContainerStatus struct {
// defined for container?
State ContainerState `json:"state,omitempty" yaml:"state,omitempty"`
RestartCount int `json:"restartCount" yaml:"restartCount"`
// TODO(dchen1107): Introduce our own NetworkSettings struct here?
// TODO(dchen1107): Deprecated this soon once we pull entire PodStatus from node,
// not just PodInfo. Now we need this to remove docker.Container from API
PodIP string `json:"podIP,omitempty" yaml:"podIP,omitempty"`
// TODO(dchen1107): Once we have done with integration with cadvisor, resource
// usage should be included.
// TODO(dchen1107): In long run, I think we should replace this with our own struct to remove
// the dependency on docker.
DetailInfo docker.Container `json:"detailInfo,omitempty" yaml:"detailInfo,omitempty"`
}
// PodInfo contains one entry for every container with available info.

View File

@ -17,8 +17,9 @@ limitations under the License.
package v1beta2
import (
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/fsouza/go-dockerclient"
)
// Common string formats
@ -282,12 +283,15 @@ type ContainerStateWaiting struct {
}
type ContainerStateRunning struct {
StartedAt time.Time `json:"startedAt,omitempty" yaml:"startedAt,omitempty"`
}
type ContainerStateTerminated struct {
ExitCode int `json:"exitCode,omitempty" yaml:"exitCode,omitempty"`
Signal int `json:"signal,omitempty" yaml:"signal,omitempty"`
Reason string `json:"reason,omitempty" yaml:"reason,omitempty"`
ExitCode int `json:"exitCode" yaml:"exitCode"`
Signal int `json:"signal,omitempty" yaml:"signal,omitempty"`
Reason string `json:"reason,omitempty" yaml:"reason,omitempty"`
StartedAt time.Time `json:"startedAt,omitempty" yaml:"startedAt,omitempty"`
FinishedAt time.Time `json:"finishedAt,omitempty" yaml:"finishedAt,omitempty"`
}
type ContainerState struct {
@ -303,16 +307,14 @@ type ContainerStatus struct {
// defined for container?
State ContainerState `json:"state,omitempty" yaml:"state,omitempty"`
RestartCount int `json:"restartCount" yaml:"restartCount"`
// TODO(dchen1107): Introduce our own NetworkSettings struct here?
// TODO(dchen1107): Deprecated this soon once we pull entire PodStatus from node,
// not just PodInfo. Now we need this to remove docker.Container from API
PodIP string `json:"podIP,omitempty" yaml:"podIP,omitempty"`
// TODO(dchen1107): Once we have done with integration with cadvisor, resource
// usage should be included.
// TODO(dchen1107): In long run, I think we should replace this with our own struct to remove
// the dependency on docker.
DetailInfo docker.Container `json:"detailInfo,omitempty" yaml:"detailInfo,omitempty"`
}
// PodInfo contains one entry for every container with available info.
// TODO(dchen1107): Replace docker.Container below with ContainerStatus defined above.
type PodInfo map[string]ContainerStatus
type RestartPolicyAlways struct{}

View File

@ -17,8 +17,9 @@ limitations under the License.
package v1beta3
import (
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/fsouza/go-dockerclient"
)
// Common string formats
@ -312,12 +313,15 @@ type ContainerStateWaiting struct {
}
type ContainerStateRunning struct {
StartedAt time.Time `json:"startedAt,omitempty" yaml:"startedAt,omitempty"`
}
type ContainerStateTerminated struct {
ExitCode int `json:"exitCode,omitempty" yaml:"exitCode,omitempty"`
Signal int `json:"signal,omitempty" yaml:"signal,omitempty"`
Reason string `json:"reason,omitempty" yaml:"reason,omitempty"`
ExitCode int `json:"exitCode" yaml:"exitCode"`
Signal int `json:"signal,omitempty" yaml:"signal,omitempty"`
Reason string `json:"reason,omitempty" yaml:"reason,omitempty"`
StartedAt time.Time `json:"startedAt,omitempty" yaml:"startedAt,omitempty"`
FinishedAt time.Time `json:"finishedAt,omitempty" yaml:"finishedAt,omitempty"`
}
type ContainerState struct {
@ -336,14 +340,11 @@ type ContainerStatus struct {
// TODO(dchen1107): Introduce our own NetworkSettings struct here?
// TODO(dchen1107): Once we have done with integration with cadvisor, resource
// usage should be included.
// TODO(dchen1107): In long run, I think we should replace this with our own struct to remove
// the dependency on docker.
DetailInfo docker.Container `json:"detailInfo,omitempty" yaml:"detailInfo,omitempty"`
}
// PodInfo contains one entry for every container with available info.
// TODO(dchen1107): Replace docker.Container below with ContainerStatus defined above.
type PodInfo map[string]docker.Container
type PodInfo map[string]ContainerStatus
type RestartPolicyAlways struct{}

View File

@ -44,6 +44,7 @@ type DockerContainerData struct {
type DockerInterface interface {
ListContainers(options docker.ListContainersOptions) ([]docker.APIContainers, error)
InspectContainer(id string) (*docker.Container, error)
InspectImage(name string) (*docker.Image, error)
CreateContainer(docker.CreateContainerOptions) (*docker.Container, error)
StartContainer(id string, hostConfig *docker.HostConfig) error
StopContainer(id string, timeout uint) error
@ -270,30 +271,64 @@ func GetKubeletDockerContainerLogs(client DockerInterface, containerID, tail str
return
}
func generateContainerStatus(inspectResult *docker.Container) api.ContainerStatus {
var (
// ErrNoContainersInPod is returned when there are no containers for a given pod
ErrNoContainersInPod = errors.New("no containers exist for this pod")
// ErrNoNetworkContainerInPod is returned when there is no network container for a given pod
ErrNoNetworkContainerInPod = errors.New("No network container exist for this pod")
// ErrContainerCannotRun is returned when a container is created, but cannot run properly
ErrContainerCannotRun = errors.New("Container cannot run")
)
func inspectContainer(client DockerInterface, dockerID, containerName string) (*api.ContainerStatus, error) {
inspectResult, err := client.InspectContainer(dockerID)
if err != nil {
return nil, err
}
if inspectResult == nil {
// Why did we not get an error?
return api.ContainerStatus{}
return &api.ContainerStatus{}, nil
}
glog.V(3).Infof("Container: %s [%s] inspect result %+v", *inspectResult)
var containerStatus api.ContainerStatus
waiting := true
if inspectResult.State.Running {
containerStatus.State.Running = &api.ContainerStateRunning{}
} else {
containerStatus.State.Running = &api.ContainerStateRunning{
StartedAt: inspectResult.State.StartedAt,
}
if containerName == "net" && inspectResult.NetworkSettings != nil {
containerStatus.PodIP = inspectResult.NetworkSettings.IPAddress
}
waiting = false
} else if !inspectResult.State.FinishedAt.IsZero() {
// TODO(dchen1107): Integrate with event to provide a better reason
containerStatus.State.Termination = &api.ContainerStateTerminated{
ExitCode: inspectResult.State.ExitCode,
ExitCode: inspectResult.State.ExitCode,
Reason: "",
StartedAt: inspectResult.State.StartedAt,
FinishedAt: inspectResult.State.FinishedAt,
}
waiting = false
}
if waiting {
// TODO(dchen1107): Separate issue docker/docker#8294 was filed
// TODO(dchen1107): Need to figure out why we are still waiting
// Check any issue to run container
containerStatus.State.Waiting = &api.ContainerStateWaiting{
Reason: ErrContainerCannotRun.Error(),
}
}
containerStatus.DetailInfo = *inspectResult
return containerStatus
return &containerStatus, nil
}
// ErrNoContainersInPod is returned when there are no containers for a given pod
var ErrNoContainersInPod = errors.New("no containers exist for this pod")
// GetDockerPodInfo returns docker info for all containers in the pod/manifest.
func GetDockerPodInfo(client DockerInterface, podFullName, uuid string) (api.PodInfo, error) {
func GetDockerPodInfo(client DockerInterface, manifest api.ContainerManifest, podFullName, uuid string) (api.PodInfo, error) {
info := api.PodInfo{}
containers, err := client.ListContainers(docker.ListContainersOptions{All: true})
@ -316,16 +351,49 @@ func GetDockerPodInfo(client DockerInterface, podFullName, uuid string) (api.Pod
continue
}
inspectResult, err := client.InspectContainer(value.ID)
containerStatus, err := inspectContainer(client, value.ID, dockerContainerName)
if err != nil {
return nil, err
}
info[dockerContainerName] = generateContainerStatus(inspectResult)
info[dockerContainerName] = *containerStatus
}
if len(info) == 0 {
return nil, ErrNoContainersInPod
}
// First make sure we are not missing network container
if _, found := info["net"]; !found {
return nil, ErrNoNetworkContainerInPod
}
if len(info) < (len(manifest.Containers) + 1) {
var containerStatus api.ContainerStatus
// Not all containers expected are created, verify if there are
// image related issues
for _, container := range manifest.Containers {
if _, found := info[container.Name]; found {
continue
}
image := container.Image
containerStatus.State.Waiting = &api.ContainerStateWaiting{}
// Check image is ready on the node or not
// TODO(dchen1107): docker/docker/issues/8365 to figure out if the image exists
_, err := client.InspectImage(image)
if err != nil && err == docker.ErrNoSuchImage {
if err == docker.ErrNoSuchImage {
containerStatus.State.Waiting.Reason = fmt.Sprintf("Image: %s is not ready on the node", image)
} else {
containerStatus.State.Waiting.Reason = err.Error()
}
} else {
containerStatus.State.Waiting.Reason = fmt.Sprintf("Image: %s is ready, container is creating", image)
}
info[container.Name] = containerStatus
}
}
return info, nil
}

View File

@ -77,8 +77,8 @@ func (h *httpActionHandler) Run(podFullName, uuid string, container *api.Contain
return err
}
netInfo, found := info[networkContainerName]
if found && netInfo.DetailInfo.NetworkSettings != nil {
host = netInfo.DetailInfo.NetworkSettings.IPAddress
if found {
host = netInfo.PodIP
} else {
return fmt.Errorf("failed to find networking container: %v", info)
}

View File

@ -113,6 +113,7 @@ type Kubelet struct {
networkContainerImage string
podWorkers podWorkers
resyncInterval time.Duration
pods []Pod
// Optional, no events will be sent without it
etcdClient tools.EtcdClient
@ -482,8 +483,8 @@ func (kl *Kubelet) syncPod(pod *Pod, dockerContainers dockertools.DockerContaine
podFullName, uuid)
}
netInfo, found := info[networkContainerName]
if found && netInfo.DetailInfo.NetworkSettings != nil {
podState.PodIP = netInfo.DetailInfo.NetworkSettings.IPAddress
if found {
podState.PodIP = netInfo.PodIP
}
for _, container := range pod.Manifest.Containers {
@ -699,15 +700,14 @@ func filterHostPortConflicts(pods []Pod) []Pod {
// no changes are seen to the configuration, will synchronize the last known desired
// state every sync_frequency seconds. Never returns.
func (kl *Kubelet) syncLoop(updates <-chan PodUpdate, handler SyncHandler) {
var pods []Pod
for {
select {
case u := <-updates:
switch u.Op {
case SET:
glog.V(3).Infof("Containers changed [%s]", kl.hostname)
pods = u.Pods
pods = filterHostPortConflicts(pods)
kl.pods = u.Pods
kl.pods = filterHostPortConflicts(kl.pods)
case UPDATE:
//TODO: implement updates of containers
@ -718,12 +718,12 @@ func (kl *Kubelet) syncLoop(updates <-chan PodUpdate, handler SyncHandler) {
panic("syncLoop does not support incremental changes")
}
case <-time.After(kl.resyncInterval):
if pods == nil {
if kl.pods == nil {
continue
}
}
err := handler.SyncPods(pods)
err := handler.SyncPods(kl.pods)
if err != nil {
glog.Errorf("Couldn't sync containers : %v", err)
}
@ -769,7 +769,15 @@ func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName, tail stri
// GetPodInfo returns information from Docker about the containers in a pod
func (kl *Kubelet) GetPodInfo(podFullName, uuid string) (api.PodInfo, error) {
return dockertools.GetDockerPodInfo(kl.dockerClient, podFullName, uuid)
var manifest api.ContainerManifest
for _, pod := range kl.pods {
fullName := fmt.Sprintf("%s.%s", pod.Name, pod.Namespace)
if fullName == podFullName {
manifest = pod.Manifest
break
}
}
return dockertools.GetDockerPodInfo(kl.dockerClient, manifest, podFullName, uuid)
}
// GetContainerInfo returns stats (from Cadvisor) for a container.

View File

@ -226,8 +226,8 @@ func (rs *REST) fillPodInfo(pod *api.Pod) {
pod.CurrentState.Info = info
netContainerInfo, ok := info["net"]
if ok {
if netContainerInfo.DetailInfo.NetworkSettings != nil {
pod.CurrentState.PodIP = netContainerInfo.DetailInfo.NetworkSettings.IPAddress
if netContainerInfo.PodIP != "" {
pod.CurrentState.PodIP = netContainerInfo.PodIP
} else {
glog.Warningf("No network settings: %#v", netContainerInfo)
}