Merge pull request #20108 from yujuhong/get_logs

e2e reboot: print status and logs for not running/ready pods
This commit is contained in:
Dawn Chen 2016-01-25 17:33:49 -08:00
commit 1e68e719c3
3 changed files with 68 additions and 28 deletions

View File

@ -18,11 +18,9 @@ package e2e
import (
"fmt"
"strings"
"time"
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util"
. "github.com/onsi/ginkgo"
@ -32,18 +30,6 @@ import (
// How long to wait for a log pod to be displayed
const podLogTimeout = 45 * time.Second
// utility function for gomega Eventually
func getPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) {
logs, err := c.Get().Resource("pods").Namespace(namespace).Name(podName).SubResource("log").Param("container", containerName).Do().Raw()
if err != nil {
return "", err
}
if err == nil && strings.Contains(string(logs), "Internal Error") {
return "", fmt.Errorf("Internal Error")
}
return string(logs), err
}
var _ = Describe("Downward API volume", func() {
f := NewFramework("downward-api")
It("should provide podname only [Conformance]", func() {

View File

@ -157,6 +157,41 @@ func testReboot(c *client.Client, rebootCmd string) {
}
}
func printStatusAndLogsForNotReadyPods(c *client.Client, oldPods, newPods []*api.Pod) {
printFn := func(id, log string, err error, previous bool) {
prefix := "Retrieving log for container"
if previous {
prefix = "Retrieving log for the last terminated container"
}
if err != nil {
Logf("%s %s, err: %v:\n%s\n", prefix, id, log)
} else {
Logf("%s %s:\n%s\n", prefix, id, log)
}
}
for _, oldPod := range oldPods {
for _, p := range newPods {
if p.Namespace != oldPod.Namespace || p.Name != oldPod.Name {
continue
}
if ok, _ := podRunningReady(p); !ok {
Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status)
// Print the log of the containers if pod is not running and ready.
for _, container := range p.Status.ContainerStatuses {
cIdentifer := fmt.Sprintf("%s/%s/%s", p.Namespace, p.Name, container.Name)
log, err := getPodLogs(c, p.Namespace, p.Name, container.Name)
printFn(cIdentifer, log, err, false)
// Get log from the previous container.
if container.RestartCount > 0 {
printFn(cIdentifer, log, err, true)
}
}
}
break
}
}
}
// rebootNode takes node name on provider through the following steps using c:
// - ensures the node is ready
// - ensures all pods on the node are running and ready
@ -229,6 +264,8 @@ func rebootNode(c *client.Client, provider, name, rebootCmd string) bool {
// Ensure all of the pods that we found on this node before the reboot are
// running / healthy.
if !checkPodsRunningReady(c, ns, podNames, rebootPodReadyAgainTimeout) {
newPods := ps.List()
printStatusAndLogsForNotReadyPods(c, pods, newPods)
return false
}

View File

@ -1339,23 +1339,13 @@ func testContainerOutputMatcher(scenarioName string,
By(fmt.Sprintf("Trying to get logs from node %s pod %s container %s: %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err))
var logs []byte
var logs string
start := time.Now()
// Sometimes the actual containers take a second to get started, try to get logs for 60s
for time.Now().Sub(start) < (60 * time.Second) {
err = nil
logs, err = c.Get().
Resource("pods").
Namespace(ns).
Name(pod.Name).
SubResource("log").
Param("container", containerName).
Do().
Raw()
if err == nil && strings.Contains(string(logs), "Internal Error") {
err = fmt.Errorf("Fetched log contains \"Internal Error\": %q.", string(logs))
}
logs, err = getPodLogs(c, ns, pod.Name, containerName)
if err != nil {
By(fmt.Sprintf("Warning: Failed to get logs from node %q pod %q container %q. %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err))
@ -1363,12 +1353,12 @@ func testContainerOutputMatcher(scenarioName string,
continue
}
By(fmt.Sprintf("Successfully fetched pod logs:%v\n", string(logs)))
By(fmt.Sprintf("Successfully fetched pod logs:%v\n", logs))
break
}
for _, m := range expectedOutput {
Expect(string(logs)).To(matcher(m), "%q in container output", m)
Expect(logs).To(matcher(m), "%q in container output", m)
}
}
@ -2716,3 +2706,30 @@ func scaleRCByName(client *client.Client, ns, name string, replicas uint) error
client, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector)))
}
}
func getPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, false)
}
func getPreviousPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, true)
}
// utility function for gomega Eventually
func getPodLogsInternal(c *client.Client, namespace, podName, containerName string, previous bool) (string, error) {
logs, err := c.Get().
Resource("pods").
Namespace(namespace).
Name(podName).SubResource("log").
Param("container", containerName).
Param("previous", strconv.FormatBool(previous)).
Do().
Raw()
if err != nil {
return "", err
}
if err == nil && strings.Contains(string(logs), "Internal Error") {
return "", fmt.Errorf("Fetched log contains \"Internal Error\": %q.", string(logs))
}
return string(logs), err
}