podlogs: avoid dumping a terminated container more than once

The original logic was that dumping can stop (for example, due to
loosing the connection to the apiserver) and then will start again as
long as the container exists. That it duplicates output on restarts
is better than skipping output that might not have been dumped yet.

But that logic then also dumped the output of containers that have
terminated multiple times:
- logging is started, dumps all output and stops because the
  container has terminated
- next check finds the container again, sees no active logger,
  repeats

This wasn't a problem for short-lived logging in a custom
namespace (the way how it is done for CSI drivers in Kubernetes E2E),
but other testsuites (like the one from PMEM-CSI) keep logging running
for the entire test suite duration: there duplicate output became a
problem when adding driver redeployment as part of the suite's run.

To avoid duplicated output for terminated containers, which containers
have been handled is now stored permanently. For terminated containers,
restarting of dumping is prevented. This comes with the risk that if
the previous dumping ended before capturing all output, some output
will get lost.

Marking the start and stop of the log was also useful when streaming
to a single writer and thus gets enabled.
This commit is contained in:
Patrick Ohly 2020-04-03 14:11:16 +02:00
parent dbac2a369a
commit b9c5c55c09

View File

@ -76,7 +76,11 @@ func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogO
go func() {
var m sync.Mutex
logging := map[string]bool{}
// Key is pod/container name, true if currently logging it.
active := map[string]bool{}
// Key is pod/container/container-id, true if we have ever started to capture its output.
started := map[string]bool{}
check := func() {
m.Lock()
defer m.Unlock()
@ -91,10 +95,17 @@ func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogO
for _, pod := range pods.Items {
for i, c := range pod.Spec.Containers {
name := pod.ObjectMeta.Name + "/" + c.Name
if logging[name] ||
// sanity check, array should have entry for each container
len(pod.Status.ContainerStatuses) <= i ||
if len(pod.Status.ContainerStatuses) <= i {
continue
}
name := pod.ObjectMeta.Name + "/" + c.Name
id := name + "/" + pod.Status.ContainerStatuses[i].ContainerID
if active[name] ||
// If we have worked on a container before and it has now terminated, then
// there cannot be any new output and we can ignore it.
(pod.Status.ContainerStatuses[i].State.Terminated != nil &&
started[id]) ||
// Don't attempt to get logs for a container unless it is running or has terminated.
// Trying to get a log would just end up with an error that we would have to suppress.
(pod.Status.ContainerStatuses[i].State.Running == nil &&
@ -117,7 +128,7 @@ func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogO
}
// Determine where we write. If this fails, we intentionally return without clearing
// the logging[name] flag, which prevents trying over and over again to
// the active[name] flag, which prevents trying over and over again to
// create the output file.
var out io.Writer
var closer io.Closer
@ -155,14 +166,22 @@ func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogO
if closer != nil {
defer closer.Close()
}
first := true
defer func() {
m.Lock()
logging[name] = false
// If we never printed anything, then also skip the final message.
if !first {
if prefix != "" {
fmt.Fprintf(out, "%s==== end of pod log ====\n", prefix)
} else {
fmt.Fprintf(out, "==== end of pod log for container %s ====\n", name)
}
}
active[name] = false
m.Unlock()
readCloser.Close()
}()
scanner := bufio.NewScanner(readCloser)
first := true
for scanner.Scan() {
line := scanner.Text()
// Filter out the expected "end of stream" error message,
@ -173,19 +192,22 @@ func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogO
!strings.HasPrefix(line, "unable to retrieve container logs for ") &&
!strings.HasPrefix(line, "Unable to retrieve container logs for ") {
if first {
if to.LogWriter == nil {
// Because the same log might be written to multiple times
// in different test instances, log an extra line to separate them.
// Also provides some useful extra information.
fmt.Fprintf(out, "==== start of log for container %s ====\n", name)
if prefix == "" {
fmt.Fprintf(out, "==== start of pod log for container %s ====\n", name)
} else {
fmt.Fprintf(out, "%s==== start of pod log ====\n", prefix)
}
first = false
}
fmt.Fprintf(out, "%s%s\n", prefix, scanner.Text())
fmt.Fprintf(out, "%s%s\n", prefix, line)
}
}
}()
logging[name] = true
active[name] = true
started[id] = true
}
}
}