diff --git a/cluster/saltbase/salt/e2e-image-puller/e2e-image-puller.manifest b/cluster/saltbase/salt/e2e-image-puller/e2e-image-puller.manifest index 3cc7db4081d..e17aefdd49b 100644 --- a/cluster/saltbase/salt/e2e-image-puller/e2e-image-puller.manifest +++ b/cluster/saltbase/salt/e2e-image-puller/e2e-image-puller.manifest @@ -35,6 +35,18 @@ spec: name: socket - mountPath: /usr/bin/docker name: docker + # Add a container that runs a health-check + - name: nethealth-check + resources: + requests: + cpu: 100m + limits: + cpu: 100m + image: gcr.io/google_containers/kube-nethealth-amd64:1.0 + command: + - /bin/sh + - -c + - "/usr/bin/nethealth || true" volumes: - hostPath: path: /var/run/docker.sock @@ -44,3 +56,6 @@ spec: name: docker # This pod is really fire-and-forget. restartPolicy: Never + # This pod needs hostNetworking for true VM perf measurement as well as avoiding cbr0 issues + hostNetwork: true + diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 32318267c8e..82e2009f387 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -140,6 +140,12 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { framework.Logf("WARNING: Image pulling pods failed to enter success in %v: %v", imagePrePullingTimeout, err) } + // Dump the output of the nethealth containers only once per run + if framework.TestContext.DumpLogsOnFailure { + framework.Logf("Dumping network health container logs from all nodes") + framework.LogContainersInPodsWithLabels(c, api.NamespaceSystem, framework.ImagePullerLabels, "nethealth") + } + return nil }, func(data []byte) { diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index bf4ea48c2c9..c7c1f0a4584 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -263,7 +263,7 @@ func (f *Framework) AfterEach() { if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure { DumpAllNamespaceInfo(f.Client, f.Namespace.Name) By(fmt.Sprintf("Dumping a list of prepulled images on each node")) - LogPodsWithLabels(f.Client, api.NamespaceSystem, ImagePullerLabels) + LogContainersInPodsWithLabels(f.Client, api.NamespaceSystem, ImagePullerLabels, "image-puller") if f.federated { // Print logs of federation control plane pods (federation-apiserver and federation-controller-manager) LogPodsWithLabels(f.Client, "federation", map[string]string{"app": "federated-cluster"}) diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 206311a4c31..fd6111e62f1 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -663,17 +663,20 @@ func RunKubernetesServiceTestContainer(c *client.Client, repoRoot string, ns str } } -func kubectlLogPod(c *client.Client, pod api.Pod) { +func kubectlLogPod(c *client.Client, pod api.Pod, containerNameSubstr string) { for _, container := range pod.Spec.Containers { - logs, err := GetPodLogs(c, pod.Namespace, pod.Name, container.Name) - if err != nil { - logs, err = getPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name) + if strings.Contains(container.Name, containerNameSubstr) { + // Contains() matches all strings if substr is empty + logs, err := GetPodLogs(c, pod.Namespace, pod.Name, container.Name) if err != nil { - Logf("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err) + logs, err = getPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name) + if err != nil { + Logf("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err) + } } + By(fmt.Sprintf("Logs of %v/%v:%v on node %v", pod.Namespace, pod.Name, container.Name, pod.Spec.NodeName)) + Logf("%s : STARTLOG\n%s\nENDLOG for container %v:%v:%v", containerNameSubstr, logs, pod.Namespace, pod.Name, container.Name) } - By(fmt.Sprintf("Logs of %v/%v:%v on node %v", pod.Namespace, pod.Name, container.Name, pod.Spec.NodeName)) - Logf(logs) } } @@ -686,7 +689,7 @@ func LogFailedContainers(c *client.Client, ns string) { Logf("Running kubectl logs on non-ready containers in %v", ns) for _, pod := range podList.Items { if res, err := PodRunningReady(&pod); !res || err != nil { - kubectlLogPod(c, pod) + kubectlLogPod(c, pod, "") } } } @@ -699,7 +702,18 @@ func LogPodsWithLabels(c *client.Client, ns string, match map[string]string) { } Logf("Running kubectl logs on pods with labels %v in %v", match, ns) for _, pod := range podList.Items { - kubectlLogPod(c, pod) + kubectlLogPod(c, pod, "") + } +} + +func LogContainersInPodsWithLabels(c *client.Client, ns string, match map[string]string, containerSubstr string) { + podList, err := c.Pods(ns).List(api.ListOptions{LabelSelector: labels.SelectorFromSet(match)}) + if err != nil { + Logf("Error getting pods in namespace %q: %v", ns, err) + return + } + for _, pod := range podList.Items { + kubectlLogPod(c, pod, containerSubstr) } }