From dbac2a369a38fe5a4659d06b42f898b4fdfe9071 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 3 Apr 2020 14:07:56 +0200 Subject: [PATCH 1/2] podlogs: adapt to modified error message Commit 8a495cb5e44e changed the spelling of the error message that we want to ignore. In case of version skew we suppress both the old and new spelling. --- test/e2e/storage/podlogs/podlogs.go | 1 + 1 file changed, 1 insertion(+) diff --git a/test/e2e/storage/podlogs/podlogs.go b/test/e2e/storage/podlogs/podlogs.go index b8e992d8fe6..f4609757d9c 100644 --- a/test/e2e/storage/podlogs/podlogs.go +++ b/test/e2e/storage/podlogs/podlogs.go @@ -170,6 +170,7 @@ func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogO // Same for attempts to read logs from a container that // isn't ready (yet?!). if !strings.HasPrefix(line, "rpc error: code = Unknown desc = Error: No such container:") && + !strings.HasPrefix(line, "unable to retrieve container logs for ") && !strings.HasPrefix(line, "Unable to retrieve container logs for ") { if first { if to.LogWriter == nil { From b9c5c55c09e00a78f28fb3de9cd535242e729b00 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 3 Apr 2020 14:11:16 +0200 Subject: [PATCH 2/2] podlogs: avoid dumping a terminated container more than once The original logic was that dumping can stop (for example, due to loosing the connection to the apiserver) and then will start again as long as the container exists. That it duplicates output on restarts is better than skipping output that might not have been dumped yet. But that logic then also dumped the output of containers that have terminated multiple times: - logging is started, dumps all output and stops because the container has terminated - next check finds the container again, sees no active logger, repeats This wasn't a problem for short-lived logging in a custom namespace (the way how it is done for CSI drivers in Kubernetes E2E), but other testsuites (like the one from PMEM-CSI) keep logging running for the entire test suite duration: there duplicate output became a problem when adding driver redeployment as part of the suite's run. To avoid duplicated output for terminated containers, which containers have been handled is now stored permanently. For terminated containers, restarting of dumping is prevented. This comes with the risk that if the previous dumping ended before capturing all output, some output will get lost. Marking the start and stop of the log was also useful when streaming to a single writer and thus gets enabled. --- test/e2e/storage/podlogs/podlogs.go | 50 +++++++++++++++++++++-------- 1 file changed, 36 insertions(+), 14 deletions(-) diff --git a/test/e2e/storage/podlogs/podlogs.go b/test/e2e/storage/podlogs/podlogs.go index f4609757d9c..1c69231a547 100644 --- a/test/e2e/storage/podlogs/podlogs.go +++ b/test/e2e/storage/podlogs/podlogs.go @@ -76,7 +76,11 @@ func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogO go func() { var m sync.Mutex - logging := map[string]bool{} + // Key is pod/container name, true if currently logging it. + active := map[string]bool{} + // Key is pod/container/container-id, true if we have ever started to capture its output. + started := map[string]bool{} + check := func() { m.Lock() defer m.Unlock() @@ -91,10 +95,17 @@ func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogO for _, pod := range pods.Items { for i, c := range pod.Spec.Containers { + // sanity check, array should have entry for each container + if len(pod.Status.ContainerStatuses) <= i { + continue + } name := pod.ObjectMeta.Name + "/" + c.Name - if logging[name] || - // sanity check, array should have entry for each container - len(pod.Status.ContainerStatuses) <= i || + id := name + "/" + pod.Status.ContainerStatuses[i].ContainerID + if active[name] || + // If we have worked on a container before and it has now terminated, then + // there cannot be any new output and we can ignore it. + (pod.Status.ContainerStatuses[i].State.Terminated != nil && + started[id]) || // Don't attempt to get logs for a container unless it is running or has terminated. // Trying to get a log would just end up with an error that we would have to suppress. (pod.Status.ContainerStatuses[i].State.Running == nil && @@ -117,7 +128,7 @@ func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogO } // Determine where we write. If this fails, we intentionally return without clearing - // the logging[name] flag, which prevents trying over and over again to + // the active[name] flag, which prevents trying over and over again to // create the output file. var out io.Writer var closer io.Closer @@ -155,14 +166,22 @@ func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogO if closer != nil { defer closer.Close() } + first := true defer func() { m.Lock() - logging[name] = false + // If we never printed anything, then also skip the final message. + if !first { + if prefix != "" { + fmt.Fprintf(out, "%s==== end of pod log ====\n", prefix) + } else { + fmt.Fprintf(out, "==== end of pod log for container %s ====\n", name) + } + } + active[name] = false m.Unlock() readCloser.Close() }() scanner := bufio.NewScanner(readCloser) - first := true for scanner.Scan() { line := scanner.Text() // Filter out the expected "end of stream" error message, @@ -173,19 +192,22 @@ func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogO !strings.HasPrefix(line, "unable to retrieve container logs for ") && !strings.HasPrefix(line, "Unable to retrieve container logs for ") { if first { - if to.LogWriter == nil { - // Because the same log might be written to multiple times - // in different test instances, log an extra line to separate them. - // Also provides some useful extra information. - fmt.Fprintf(out, "==== start of log for container %s ====\n", name) + // Because the same log might be written to multiple times + // in different test instances, log an extra line to separate them. + // Also provides some useful extra information. + if prefix == "" { + fmt.Fprintf(out, "==== start of pod log for container %s ====\n", name) + } else { + fmt.Fprintf(out, "%s==== start of pod log ====\n", prefix) } first = false } - fmt.Fprintf(out, "%s%s\n", prefix, scanner.Text()) + fmt.Fprintf(out, "%s%s\n", prefix, line) } } }() - logging[name] = true + active[name] = true + started[id] = true } } }