Add nethealth prepull container output to e2e run logs

This commit is contained in:
Girish Kalele 2016-06-07 18:02:55 -07:00
parent 3ba5816e46
commit ee7ca66dba
4 changed files with 45 additions and 10 deletions

View File

@ -35,6 +35,18 @@ spec:
name: socket
- mountPath: /usr/bin/docker
name: docker
# Add a container that runs a health-check
- name: nethealth-check
resources:
requests:
cpu: 100m
limits:
cpu: 100m
image: gcr.io/google_containers/kube-nethealth-amd64:1.0
command:
- /bin/sh
- -c
- "/usr/bin/nethealth || true"
volumes:
- hostPath:
path: /var/run/docker.sock
@ -44,3 +56,6 @@ spec:
name: docker
# This pod is really fire-and-forget.
restartPolicy: Never
# This pod needs hostNetworking for true VM perf measurement as well as avoiding cbr0 issues
hostNetwork: true

View File

@ -140,6 +140,12 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
framework.Logf("WARNING: Image pulling pods failed to enter success in %v: %v", imagePrePullingTimeout, err)
}
// Dump the output of the nethealth containers only once per run
if framework.TestContext.DumpLogsOnFailure {
framework.Logf("Dumping network health container logs from all nodes")
framework.LogContainersInPodsWithLabels(c, api.NamespaceSystem, framework.ImagePullerLabels, "nethealth")
}
return nil
}, func(data []byte) {

View File

@ -263,7 +263,7 @@ func (f *Framework) AfterEach() {
if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
DumpAllNamespaceInfo(f.Client, f.Namespace.Name)
By(fmt.Sprintf("Dumping a list of prepulled images on each node"))
LogPodsWithLabels(f.Client, api.NamespaceSystem, ImagePullerLabels)
LogContainersInPodsWithLabels(f.Client, api.NamespaceSystem, ImagePullerLabels, "image-puller")
if f.federated {
// Print logs of federation control plane pods (federation-apiserver and federation-controller-manager)
LogPodsWithLabels(f.Client, "federation", map[string]string{"app": "federated-cluster"})

View File

@ -663,8 +663,10 @@ func RunKubernetesServiceTestContainer(c *client.Client, repoRoot string, ns str
}
}
func kubectlLogPod(c *client.Client, pod api.Pod) {
func kubectlLogPod(c *client.Client, pod api.Pod, containerNameSubstr string) {
for _, container := range pod.Spec.Containers {
if strings.Contains(container.Name, containerNameSubstr) {
// Contains() matches all strings if substr is empty
logs, err := GetPodLogs(c, pod.Namespace, pod.Name, container.Name)
if err != nil {
logs, err = getPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name)
@ -673,7 +675,8 @@ func kubectlLogPod(c *client.Client, pod api.Pod) {
}
}
By(fmt.Sprintf("Logs of %v/%v:%v on node %v", pod.Namespace, pod.Name, container.Name, pod.Spec.NodeName))
Logf(logs)
Logf("%s : STARTLOG\n%s\nENDLOG for container %v:%v:%v", containerNameSubstr, logs, pod.Namespace, pod.Name, container.Name)
}
}
}
@ -686,7 +689,7 @@ func LogFailedContainers(c *client.Client, ns string) {
Logf("Running kubectl logs on non-ready containers in %v", ns)
for _, pod := range podList.Items {
if res, err := PodRunningReady(&pod); !res || err != nil {
kubectlLogPod(c, pod)
kubectlLogPod(c, pod, "")
}
}
}
@ -699,7 +702,18 @@ func LogPodsWithLabels(c *client.Client, ns string, match map[string]string) {
}
Logf("Running kubectl logs on pods with labels %v in %v", match, ns)
for _, pod := range podList.Items {
kubectlLogPod(c, pod)
kubectlLogPod(c, pod, "")
}
}
func LogContainersInPodsWithLabels(c *client.Client, ns string, match map[string]string, containerSubstr string) {
podList, err := c.Pods(ns).List(api.ListOptions{LabelSelector: labels.SelectorFromSet(match)})
if err != nil {
Logf("Error getting pods in namespace %q: %v", ns, err)
return
}
for _, pod := range podList.Items {
kubectlLogPod(c, pod, containerSubstr)
}
}