Add support to capture application termination message and propagate

the message to the upper layer through ContainerStatus.
This commit is contained in:
Dawn Chen 2014-11-06 22:41:16 -08:00
parent 8ffbced280
commit 57454cce81
4 changed files with 87 additions and 3 deletions

View File

@ -23,6 +23,7 @@ import (
"fmt"
"hash/adler32"
"io"
"io/ioutil"
"math/rand"
"os"
"os/exec"
@ -371,8 +372,9 @@ var (
ErrContainerCannotRun = errors.New("Container cannot run")
)
func inspectContainer(client DockerInterface, dockerID, containerName string) (*api.ContainerStatus, error) {
func inspectContainer(client DockerInterface, dockerID, containerName, tPath string) (*api.ContainerStatus, error) {
inspectResult, err := client.InspectContainer(dockerID)
if err != nil {
return nil, err
}
@ -403,6 +405,17 @@ func inspectContainer(client DockerInterface, dockerID, containerName string) (*
StartedAt: inspectResult.State.StartedAt,
FinishedAt: inspectResult.State.FinishedAt,
}
if tPath != "" {
path, found := inspectResult.Volumes[tPath]
if found {
data, err := ioutil.ReadFile(path)
if err != nil {
glog.Errorf("Error on reading termination-log %s(%v)", path, err)
} else {
containerStatus.State.Termination.Message = string(data)
}
}
}
waiting = false
}
@ -421,6 +434,11 @@ func inspectContainer(client DockerInterface, dockerID, containerName string) (*
// GetDockerPodInfo returns docker info for all containers in the pod/manifest.
func GetDockerPodInfo(client DockerInterface, manifest api.PodSpec, podFullName, uuid string) (api.PodInfo, error) {
info := api.PodInfo{}
expectedContainers := make(map[string]api.Container)
for _, container := range manifest.Containers {
expectedContainers[container.Name] = container
}
expectedContainers["net"] = api.Container{}
containers, err := client.ListContainers(docker.ListContainersOptions{All: true})
if err != nil {
@ -435,6 +453,14 @@ func GetDockerPodInfo(client DockerInterface, manifest api.PodSpec, podFullName,
if uuid != "" && dockerUUID != uuid {
continue
}
c, found := expectedContainers[dockerContainerName]
terminationMessagePath := ""
if !found {
// TODO(dchen1107): should figure out why not continue here
// continue
} else {
terminationMessagePath = c.TerminationMessagePath
}
// We assume docker return us a list of containers in time order
if containerStatus, found := info[dockerContainerName]; found {
containerStatus.RestartCount += 1
@ -442,7 +468,7 @@ func GetDockerPodInfo(client DockerInterface, manifest api.PodSpec, podFullName,
continue
}
containerStatus, err := inspectContainer(client, value.ID, dockerContainerName)
containerStatus, err := inspectContainer(client, value.ID, dockerContainerName, terminationMessagePath)
if err != nil {
return nil, err
}

View File

@ -109,6 +109,11 @@ func (f *FakeDockerClient) StartContainer(id string, hostConfig *docker.HostConf
f.Lock()
defer f.Unlock()
f.called = append(f.called, "start")
f.Container = &docker.Container{
ID: id,
Config: &docker.Config{Image: "testimage"},
HostConfig: hostConfig,
}
return f.Err
}

View File

@ -20,6 +20,7 @@ import (
"fmt"
"io"
"net/http"
"os"
"path"
"sort"
"strconv"
@ -295,7 +296,6 @@ func makeBinds(pod *api.BoundPod, container *api.Container, podVolumes volumeMap
}
return binds
}
func makePortsAndBindings(container *api.Container) (map[docker.Port]struct{}, map[docker.Port][]docker.PortBinding) {
exposedPorts := map[docker.Port]struct{}{}
portBindings := map[docker.Port][]docker.PortBinding{}
@ -463,6 +463,21 @@ func (kl *Kubelet) runContainer(pod *api.BoundPod, container *api.Container, pod
if err != nil {
return "", err
}
if len(container.TerminationMessagePath) != 0 {
p := path.Join(kl.rootDirectory, pod.Name, container.Name)
if err := os.MkdirAll(p, 0750); err != nil {
glog.Errorf("Error on creating %s(%v)", p, err)
} else {
containerLogPath := path.Join(p, dockerContainer.ID)
fs, err := os.Create(containerLogPath)
if err != nil {
glog.Errorf("Error on creating termination-log file: %s(%v)", containerLogPath, err)
}
defer fs.Close()
b := fmt.Sprintf("%s:%s", containerLogPath, container.TerminationMessagePath)
binds = append(binds, b)
}
}
privileged := false
if capabilities.Get().AllowPrivileged {
privileged = container.Privileged

View File

@ -186,6 +186,44 @@ func TestSyncPodsDoesNothing(t *testing.T) {
verifyCalls(t, fakeDocker, []string{"list", "list", "inspect_container", "inspect_container"})
}
func TestSyncPodsWithTerminationLog(t *testing.T) {
kubelet, _, fakeDocker := newTestKubelet(t)
container := api.Container{
Name: "bar",
TerminationMessagePath: "/dev/somepath",
}
fakeDocker.ContainerList = []docker.APIContainers{}
err := kubelet.SyncPods([]api.BoundPod{
{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Namespace: "new",
Annotations: map[string]string{ConfigSourceAnnotationKey: "test"},
},
Spec: api.PodSpec{
Containers: []api.Container{
container,
},
},
},
})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
kubelet.drainWorkers()
verifyCalls(t, fakeDocker, []string{
"list", "create", "start", "list", "inspect_container", "list", "create", "start"})
fakeDocker.Lock()
parts := strings.Split(fakeDocker.Container.HostConfig.Binds[0], ":")
if fakeDocker.Container.HostConfig == nil ||
!matchString(t, "/tmp/kubelet/foo/bar/k8s_bar\\.[a-f0-9]", parts[0]) ||
parts[1] != "/dev/somepath" {
t.Errorf("Unexpected containers created %v", fakeDocker.Container)
}
fakeDocker.Unlock()
}
// drainWorkers waits until all workers are done. Should only used for testing.
func (kl *Kubelet) drainWorkers() {
for {