mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 17:30:00 +00:00
Merge pull request #20108 from yujuhong/get_logs
e2e reboot: print status and logs for not running/ready pods
This commit is contained in:
commit
1e68e719c3
@ -18,11 +18,9 @@ package e2e
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
|
||||||
"k8s.io/kubernetes/pkg/util"
|
"k8s.io/kubernetes/pkg/util"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
@ -32,18 +30,6 @@ import (
|
|||||||
// How long to wait for a log pod to be displayed
|
// How long to wait for a log pod to be displayed
|
||||||
const podLogTimeout = 45 * time.Second
|
const podLogTimeout = 45 * time.Second
|
||||||
|
|
||||||
// utility function for gomega Eventually
|
|
||||||
func getPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) {
|
|
||||||
logs, err := c.Get().Resource("pods").Namespace(namespace).Name(podName).SubResource("log").Param("container", containerName).Do().Raw()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if err == nil && strings.Contains(string(logs), "Internal Error") {
|
|
||||||
return "", fmt.Errorf("Internal Error")
|
|
||||||
}
|
|
||||||
return string(logs), err
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ = Describe("Downward API volume", func() {
|
var _ = Describe("Downward API volume", func() {
|
||||||
f := NewFramework("downward-api")
|
f := NewFramework("downward-api")
|
||||||
It("should provide podname only [Conformance]", func() {
|
It("should provide podname only [Conformance]", func() {
|
||||||
|
@ -157,6 +157,41 @@ func testReboot(c *client.Client, rebootCmd string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func printStatusAndLogsForNotReadyPods(c *client.Client, oldPods, newPods []*api.Pod) {
|
||||||
|
printFn := func(id, log string, err error, previous bool) {
|
||||||
|
prefix := "Retrieving log for container"
|
||||||
|
if previous {
|
||||||
|
prefix = "Retrieving log for the last terminated container"
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
Logf("%s %s, err: %v:\n%s\n", prefix, id, log)
|
||||||
|
} else {
|
||||||
|
Logf("%s %s:\n%s\n", prefix, id, log)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, oldPod := range oldPods {
|
||||||
|
for _, p := range newPods {
|
||||||
|
if p.Namespace != oldPod.Namespace || p.Name != oldPod.Name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ok, _ := podRunningReady(p); !ok {
|
||||||
|
Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status)
|
||||||
|
// Print the log of the containers if pod is not running and ready.
|
||||||
|
for _, container := range p.Status.ContainerStatuses {
|
||||||
|
cIdentifer := fmt.Sprintf("%s/%s/%s", p.Namespace, p.Name, container.Name)
|
||||||
|
log, err := getPodLogs(c, p.Namespace, p.Name, container.Name)
|
||||||
|
printFn(cIdentifer, log, err, false)
|
||||||
|
// Get log from the previous container.
|
||||||
|
if container.RestartCount > 0 {
|
||||||
|
printFn(cIdentifer, log, err, true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// rebootNode takes node name on provider through the following steps using c:
|
// rebootNode takes node name on provider through the following steps using c:
|
||||||
// - ensures the node is ready
|
// - ensures the node is ready
|
||||||
// - ensures all pods on the node are running and ready
|
// - ensures all pods on the node are running and ready
|
||||||
@ -229,6 +264,8 @@ func rebootNode(c *client.Client, provider, name, rebootCmd string) bool {
|
|||||||
// Ensure all of the pods that we found on this node before the reboot are
|
// Ensure all of the pods that we found on this node before the reboot are
|
||||||
// running / healthy.
|
// running / healthy.
|
||||||
if !checkPodsRunningReady(c, ns, podNames, rebootPodReadyAgainTimeout) {
|
if !checkPodsRunningReady(c, ns, podNames, rebootPodReadyAgainTimeout) {
|
||||||
|
newPods := ps.List()
|
||||||
|
printStatusAndLogsForNotReadyPods(c, pods, newPods)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1339,23 +1339,13 @@ func testContainerOutputMatcher(scenarioName string,
|
|||||||
|
|
||||||
By(fmt.Sprintf("Trying to get logs from node %s pod %s container %s: %v",
|
By(fmt.Sprintf("Trying to get logs from node %s pod %s container %s: %v",
|
||||||
podStatus.Spec.NodeName, podStatus.Name, containerName, err))
|
podStatus.Spec.NodeName, podStatus.Name, containerName, err))
|
||||||
var logs []byte
|
var logs string
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
// Sometimes the actual containers take a second to get started, try to get logs for 60s
|
// Sometimes the actual containers take a second to get started, try to get logs for 60s
|
||||||
for time.Now().Sub(start) < (60 * time.Second) {
|
for time.Now().Sub(start) < (60 * time.Second) {
|
||||||
err = nil
|
err = nil
|
||||||
logs, err = c.Get().
|
logs, err = getPodLogs(c, ns, pod.Name, containerName)
|
||||||
Resource("pods").
|
|
||||||
Namespace(ns).
|
|
||||||
Name(pod.Name).
|
|
||||||
SubResource("log").
|
|
||||||
Param("container", containerName).
|
|
||||||
Do().
|
|
||||||
Raw()
|
|
||||||
if err == nil && strings.Contains(string(logs), "Internal Error") {
|
|
||||||
err = fmt.Errorf("Fetched log contains \"Internal Error\": %q.", string(logs))
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
By(fmt.Sprintf("Warning: Failed to get logs from node %q pod %q container %q. %v",
|
By(fmt.Sprintf("Warning: Failed to get logs from node %q pod %q container %q. %v",
|
||||||
podStatus.Spec.NodeName, podStatus.Name, containerName, err))
|
podStatus.Spec.NodeName, podStatus.Name, containerName, err))
|
||||||
@ -1363,12 +1353,12 @@ func testContainerOutputMatcher(scenarioName string,
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
}
|
}
|
||||||
By(fmt.Sprintf("Successfully fetched pod logs:%v\n", string(logs)))
|
By(fmt.Sprintf("Successfully fetched pod logs:%v\n", logs))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, m := range expectedOutput {
|
for _, m := range expectedOutput {
|
||||||
Expect(string(logs)).To(matcher(m), "%q in container output", m)
|
Expect(logs).To(matcher(m), "%q in container output", m)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2716,3 +2706,30 @@ func scaleRCByName(client *client.Client, ns, name string, replicas uint) error
|
|||||||
client, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector)))
|
client, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) {
|
||||||
|
return getPodLogsInternal(c, namespace, podName, containerName, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getPreviousPodLogs(c *client.Client, namespace, podName, containerName string) (string, error) {
|
||||||
|
return getPodLogsInternal(c, namespace, podName, containerName, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// utility function for gomega Eventually
|
||||||
|
func getPodLogsInternal(c *client.Client, namespace, podName, containerName string, previous bool) (string, error) {
|
||||||
|
logs, err := c.Get().
|
||||||
|
Resource("pods").
|
||||||
|
Namespace(namespace).
|
||||||
|
Name(podName).SubResource("log").
|
||||||
|
Param("container", containerName).
|
||||||
|
Param("previous", strconv.FormatBool(previous)).
|
||||||
|
Do().
|
||||||
|
Raw()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if err == nil && strings.Contains(string(logs), "Internal Error") {
|
||||||
|
return "", fmt.Errorf("Fetched log contains \"Internal Error\": %q.", string(logs))
|
||||||
|
}
|
||||||
|
return string(logs), err
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user