Merge pull request #2225 from dchen1107/deathrattle

Capture application termination messages/output
This commit is contained in:
bgrant0607
2014-11-10 11:25:14 -08:00
11 changed files with 130 additions and 6 deletions

View File

@@ -40,6 +40,7 @@ func ExampleManifestAndPod(id string) (api.ContainerManifest, api.BoundPod) {
{
Name: "c" + id,
Image: "foo",
TerminationMessagePath: "/somepath",
},
},
Volumes: []api.Volume{
@@ -62,6 +63,7 @@ func ExampleManifestAndPod(id string) (api.ContainerManifest, api.BoundPod) {
{
Name: "c" + id,
Image: "foo",
TerminationMessagePath: "/somepath",
},
},
Volumes: []api.Volume{
@@ -124,7 +126,7 @@ func TestReadFromFile(t *testing.T) {
Namespace: "default",
},
Spec: api.PodSpec{
Containers: []api.Container{{Image: "test/image"}},
Containers: []api.Container{{Image: "test/image", TerminationMessagePath: "/dev/termination-log"}},
},
})
if !reflect.DeepEqual(expected, update) {

View File

@@ -146,14 +146,24 @@ func TestExtractFromHTTP(t *testing.T) {
Name: "1",
Namespace: "default",
},
Spec: api.PodSpec{Containers: []api.Container{{Name: "1", Image: "foo"}}},
Spec: api.PodSpec{
Containers: []api.Container{{
Name: "1",
Image: "foo",
TerminationMessagePath: "/dev/termination-log"}},
},
},
api.BoundPod{
ObjectMeta: api.ObjectMeta{
Name: "bar",
Namespace: "default",
},
Spec: api.PodSpec{Containers: []api.Container{{Name: "1", Image: "foo"}}},
Spec: api.PodSpec{
Containers: []api.Container{{
Name: "1",
Image: "foo",
TerminationMessagePath: "/dev/termination-log"}},
},
}),
},
{

View File

@@ -23,6 +23,7 @@ import (
"fmt"
"hash/adler32"
"io"
"io/ioutil"
"math/rand"
"os"
"os/exec"
@@ -364,8 +365,9 @@ var (
ErrContainerCannotRun = errors.New("Container cannot run")
)
func inspectContainer(client DockerInterface, dockerID, containerName string) (*api.ContainerStatus, error) {
func inspectContainer(client DockerInterface, dockerID, containerName, tPath string) (*api.ContainerStatus, error) {
inspectResult, err := client.InspectContainer(dockerID)
if err != nil {
return nil, err
}
@@ -396,6 +398,17 @@ func inspectContainer(client DockerInterface, dockerID, containerName string) (*
StartedAt: inspectResult.State.StartedAt,
FinishedAt: inspectResult.State.FinishedAt,
}
if tPath != "" {
path, found := inspectResult.Volumes[tPath]
if found {
data, err := ioutil.ReadFile(path)
if err != nil {
glog.Errorf("Error on reading termination-log %s(%v)", path, err)
} else {
containerStatus.State.Termination.Message = string(data)
}
}
}
waiting = false
}
@@ -414,6 +427,11 @@ func inspectContainer(client DockerInterface, dockerID, containerName string) (*
// GetDockerPodInfo returns docker info for all containers in the pod/manifest.
func GetDockerPodInfo(client DockerInterface, manifest api.PodSpec, podFullName, uuid string) (api.PodInfo, error) {
info := api.PodInfo{}
expectedContainers := make(map[string]api.Container)
for _, container := range manifest.Containers {
expectedContainers[container.Name] = container
}
expectedContainers["net"] = api.Container{}
containers, err := client.ListContainers(docker.ListContainersOptions{All: true})
if err != nil {
@@ -428,6 +446,14 @@ func GetDockerPodInfo(client DockerInterface, manifest api.PodSpec, podFullName,
if uuid != "" && dockerUUID != uuid {
continue
}
c, found := expectedContainers[dockerContainerName]
terminationMessagePath := ""
if !found {
// TODO(dchen1107): should figure out why not continue here
// continue
} else {
terminationMessagePath = c.TerminationMessagePath
}
// We assume docker return us a list of containers in time order
if containerStatus, found := info[dockerContainerName]; found {
containerStatus.RestartCount += 1
@@ -435,7 +461,7 @@ func GetDockerPodInfo(client DockerInterface, manifest api.PodSpec, podFullName,
continue
}
containerStatus, err := inspectContainer(client, value.ID, dockerContainerName)
containerStatus, err := inspectContainer(client, value.ID, dockerContainerName, terminationMessagePath)
if err != nil {
return nil, err
}

View File

@@ -109,6 +109,11 @@ func (f *FakeDockerClient) StartContainer(id string, hostConfig *docker.HostConf
f.Lock()
defer f.Unlock()
f.called = append(f.called, "start")
f.Container = &docker.Container{
ID: id,
Config: &docker.Config{Image: "testimage"},
HostConfig: hostConfig,
}
return f.Err
}

View File

@@ -20,6 +20,7 @@ import (
"fmt"
"io"
"net/http"
"os"
"path"
"sort"
"strconv"
@@ -295,7 +296,6 @@ func makeBinds(pod *api.BoundPod, container *api.Container, podVolumes volumeMap
}
return binds
}
func makePortsAndBindings(container *api.Container) (map[docker.Port]struct{}, map[docker.Port][]docker.PortBinding) {
exposedPorts := map[docker.Port]struct{}{}
portBindings := map[docker.Port][]docker.PortBinding{}
@@ -463,6 +463,21 @@ func (kl *Kubelet) runContainer(pod *api.BoundPod, container *api.Container, pod
if err != nil {
return "", err
}
if len(container.TerminationMessagePath) != 0 {
p := path.Join(kl.rootDirectory, pod.Name, container.Name)
if err := os.MkdirAll(p, 0750); err != nil {
glog.Errorf("Error on creating %s(%v)", p, err)
} else {
containerLogPath := path.Join(p, dockerContainer.ID)
fs, err := os.Create(containerLogPath)
if err != nil {
glog.Errorf("Error on creating termination-log file: %s(%v)", containerLogPath, err)
}
defer fs.Close()
b := fmt.Sprintf("%s:%s", containerLogPath, container.TerminationMessagePath)
binds = append(binds, b)
}
}
privileged := false
if capabilities.Get().AllowPrivileged {
privileged = container.Privileged

View File

@@ -186,6 +186,44 @@ func TestSyncPodsDoesNothing(t *testing.T) {
verifyCalls(t, fakeDocker, []string{"list", "list", "inspect_container", "inspect_container"})
}
func TestSyncPodsWithTerminationLog(t *testing.T) {
kubelet, _, fakeDocker := newTestKubelet(t)
container := api.Container{
Name: "bar",
TerminationMessagePath: "/dev/somepath",
}
fakeDocker.ContainerList = []docker.APIContainers{}
err := kubelet.SyncPods([]api.BoundPod{
{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Namespace: "new",
Annotations: map[string]string{ConfigSourceAnnotationKey: "test"},
},
Spec: api.PodSpec{
Containers: []api.Container{
container,
},
},
},
})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
kubelet.drainWorkers()
verifyCalls(t, fakeDocker, []string{
"list", "create", "start", "list", "inspect_container", "list", "create", "start"})
fakeDocker.Lock()
parts := strings.Split(fakeDocker.Container.HostConfig.Binds[0], ":")
if fakeDocker.Container.HostConfig == nil ||
!matchString(t, "/tmp/kubelet/foo/bar/k8s_bar\\.[a-f0-9]", parts[0]) ||
parts[1] != "/dev/somepath" {
t.Errorf("Unexpected containers created %v", fakeDocker.Container)
}
fakeDocker.Unlock()
}
// drainWorkers waits until all workers are done. Should only used for testing.
func (kl *Kubelet) drainWorkers() {
for {