Merge pull request #85913 from jkaniuk/log-dump-fix

Fix waiting for logexporter log fetching processes
This commit is contained in:
Kubernetes Prow Robot 2019-12-05 01:48:32 -08:00 committed by GitHub
commit bcc6b22121
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -504,10 +504,10 @@ function dump_nodes_with_logexporter() {
# Store logs from logexporter pods to allow debugging log exporting process
# itself.
proc=${max_dump_processes}
"${KUBECTL}" get pods -n "${logexporter_namespace}" -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.nodeName}{"\n"}{end}' | while read pod node; do
"${KUBECTL}" get pods -n "${logexporter_namespace}" -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.nodeName}{"\n"}{end}' | (while read -r pod node; do
echo "Fetching logs from ${pod} running on ${node}"
mkdir -p ${report_dir}/${node}
"${KUBECTL}" logs -n "${logexporter_namespace}" ${pod} > ${report_dir}/${node}/${pod}.log &
mkdir -p "${report_dir}/${node}"
"${KUBECTL}" logs -n "${logexporter_namespace}" "${pod}" > "${report_dir}/${node}/${pod}.log" &
# We don't want to run more than ${max_dump_processes} at a time, so
# wait once we hit that many nodes. This isn't ideal, since one might
@ -517,11 +517,8 @@ function dump_nodes_with_logexporter() {
proc=${max_dump_processes}
wait
fi
done
# Wait for any remaining processes.
if [[ proc -gt 0 && proc -lt ${max_dump_processes} ]]; then
wait
fi
done; wait)
# List registry of marker files (of nodes whose logexporter succeeded) from GCS.
local nodes_succeeded