From 46e8c08fe8bd8c7691146a98565d3ac5bf548200 Mon Sep 17 00:00:00 2001 From: Jeff Grafton Date: Wed, 25 Mar 2015 15:57:14 -0700 Subject: [PATCH] Update kube::log::{error,status} to include timestamp. Additionally, make the build scripts call these rather than calling 'echo' directly. --- build/common.sh | 66 ++++++++++++++++++++++----------------------- hack/lib/logging.sh | 6 +++-- 2 files changed, 37 insertions(+), 35 deletions(-) diff --git a/build/common.sh b/build/common.sh index 91b1a49c4cf..365639d56be 100644 --- a/build/common.sh +++ b/build/common.sh @@ -109,7 +109,7 @@ readonly RELEASE_DIR="${LOCAL_OUTPUT_ROOT}/release-tars" # KUBE_BUILD_DATA_CONTAINER_NAME # DOCKER_MOUNT_ARGS function kube::build::verify_prereqs() { - echo "+++ Verifying Prerequisites...." + kube::log::status "Verifying Prerequisites...." if [[ "${1-}" != "clean" ]]; then if [[ -z "$(which docker)" ]]; then @@ -130,10 +130,10 @@ function kube::build::verify_prereqs() { exit 1 else # Reach over and set the clock. After sleep/resume the clock will skew. - echo "+++ Setting boot2docker clock" + kube::log::status "Setting boot2docker clock" boot2docker ssh sudo date -u -D "%Y%m%d%H%M.%S" --set "$(date -u +%Y%m%d%H%M.%S)" >/dev/null if [[ -z "$DOCKER_HOST" ]]; then - echo "+++ Setting boot2docker env variables" + kube::log::status "Setting boot2docker env variables" $(boot2docker shellinit) fi fi @@ -162,7 +162,7 @@ function kube::build::verify_prereqs() { if [[ ! -z "$(which boot2docker)" ]]; then if [[ $(boot2docker status) == "running" ]]; then if [[ -z "$DOCKER_HOST" ]]; then - echo "+++ Setting boot2docker env variables" + kube::log::status "Setting boot2docker env variables" $(boot2docker shellinit) fi fi @@ -190,17 +190,17 @@ function kube::build::clean_output() { # Clean out the output directory if it exists. if kube::build::has_docker ; then if kube::build::build_image_built ; then - echo "+++ Cleaning out _output/dockerized/bin/ via docker build image" + kube::log::status "Cleaning out _output/dockerized/bin/ via docker build image" kube::build::run_build_command bash -c "rm -rf '${REMOTE_OUTPUT_BINPATH}'/*" else - echo "!!! Build image not built. Cannot clean via docker build image." + kube::log::error "Build image not built. Cannot clean via docker build image." fi - echo "+++ Removing data container" + kube::log::status "Removing data container" "${DOCKER[@]}" rm -v "${KUBE_BUILD_DATA_CONTAINER_NAME}" >/dev/null 2>&1 || true fi - echo "+++ Cleaning out local _output directory" + kube::log::status "Cleaning out local _output directory" rm -rf "${LOCAL_OUTPUT_ROOT}" } @@ -216,7 +216,7 @@ function kube::build::prepare_output() { selinuxenabled && \ which chcon >/dev/null ; then if [[ ! $(ls -Zd "${LOCAL_OUTPUT_ROOT}") =~ svirt_sandbox_file_t ]] ; then - echo "+++ Applying SELinux policy to '_output' directory." + kube::log::status "Applying SELinux policy to '_output' directory." if ! chcon -Rt svirt_sandbox_file_t "${LOCAL_OUTPUT_ROOT}"; then echo " ***Failed***. This may be because you have root owned files under _output." echo " Continuing, but this build may fail later if SELinux prevents access." @@ -236,7 +236,7 @@ function kube::build::has_docker() { # #2 - image tag function kube::build::docker_image_exists() { [[ -n $1 && -n $2 ]] || { - echo "!!! Internal error. Image not specified in docker_image_exists." >&2 + kube::log::error "Internal error. Image not specified in docker_image_exists." exit 2 } @@ -248,7 +248,7 @@ function kube::build::docker_image_exists() { # Takes $1 and computes a short has for it. Useful for unique tag generation function kube::build::short_hash() { [[ $# -eq 1 ]] || { - echo "!!! Internal error. No data based to short_hash." >&2 + kube::log::error "Internal error. No data based to short_hash." exit 2 } @@ -291,7 +291,7 @@ function kube::build::ensure_golang() { } } - echo "+++ Pulling docker image: golang:${KUBE_BUILD_GOLANG_VERSION}" + kube::log::status "Pulling docker image: golang:${KUBE_BUILD_GOLANG_VERSION}" "${DOCKER[@]}" pull golang:${KUBE_BUILD_GOLANG_VERSION} } } @@ -346,7 +346,7 @@ function kube::build::docker_build() { local -r context_dir=$2 local -ra build_cmd=("${DOCKER[@]}" build -t "${image}" "${context_dir}") - echo "+++ Building Docker image ${image}." + kube::log::status "Building Docker image ${image}." local docker_output docker_output=$("${build_cmd[@]}" 2>&1) || { cat <&2 @@ -366,7 +366,7 @@ EOF function kube::build::clean_image() { local -r image=$1 - echo "+++ Deleting docker image ${image}" + kube::log::status "Deleting docker image ${image}" "${DOCKER[@]}" rmi ${image} 2> /dev/null || true } @@ -375,13 +375,13 @@ function kube::build::clean_images() { kube::build::clean_image "${KUBE_BUILD_IMAGE}" - echo "+++ Cleaning all other untagged docker images" + kube::log::status "Cleaning all other untagged docker images" "${DOCKER[@]}" rmi $("${DOCKER[@]}" images -q --filter 'dangling=true') 2> /dev/null || true } function kube::build::ensure_data_container() { if ! "${DOCKER[@]}" inspect "${KUBE_BUILD_DATA_CONTAINER_NAME}" >/dev/null 2>&1; then - echo "+++ Creating data container" + kube::log::status "Creating data container" local -ra docker_cmd=( "${DOCKER[@]}" run "${DOCKER_DATA_MOUNT_ARGS[@]}" @@ -396,7 +396,7 @@ function kube::build::ensure_data_container() { # Run a command in the kube-build image. This assumes that the image has # already been built. This will sync out all output data from the build. function kube::build::run_build_command() { - echo "+++ Running build command...." + kube::log::status "Running build command...." [[ $# != 0 ]] || { echo "Invalid input." >&2; return 4; } kube::build::ensure_data_container @@ -455,7 +455,7 @@ function kube::build::copy_output() { "${DOCKER[@]}" run "${docker_run_opts[@]}" "${KUBE_BUILD_IMAGE}" ) - echo "+++ Syncing back _output/dockerized/bin directory from remote Docker" + kube::log::status "Syncing back _output/dockerized/bin directory from remote Docker" rm -rf "${LOCAL_OUTPUT_BINPATH}" mkdir -p "${LOCAL_OUTPUT_BINPATH}" @@ -473,7 +473,7 @@ function kube::build::copy_output() { let count=count+1 if [[ $count -eq 60 ]]; then # break after 5m - echo "!!! Timed out waiting for binaries..." + kube::log::error "Timed out waiting for binaries..." break fi sleep 5 @@ -481,7 +481,7 @@ function kube::build::copy_output() { "${DOCKER[@]}" rm -f -v "${KUBE_BUILD_CONTAINER_NAME}" >/dev/null 2>&1 || true else - echo "+++ Output directory is local. No need to copy results out." + kube::log::status "Output directory is local. No need to copy results out." fi } @@ -513,7 +513,7 @@ function kube::release::package_client_tarballs() { platforms=($(cd "${LOCAL_OUTPUT_BINPATH}" ; echo */*)) for platform in "${platforms[@]}" ; do local platform_tag=${platform/\//-} # Replace a "/" for a "-" - echo "+++ Building tarball: client $platform_tag" + kube::log::status "Building tarball: client $platform_tag" local release_stage="${RELEASE_STAGE}/client/${platform_tag}/kubernetes" rm -rf "${release_stage}" @@ -542,7 +542,7 @@ function kube::release::package_server_tarballs() { local platform for platform in "${KUBE_SERVER_PLATFORMS[@]}" ; do local platform_tag=${platform/\//-} # Replace a "/" for a "-" - echo "+++ Building tarball: server $platform_tag" + kube::log::status "Building tarball: server $platform_tag" local release_stage="${RELEASE_STAGE}/server/${platform_tag}/kubernetes" rm -rf "${release_stage}" @@ -572,7 +572,7 @@ function kube::release::package_server_tarballs() { # Package up the salt configuration tree. This is an optional helper to getting # a cluster up and running. function kube::release::package_salt_tarball() { - echo "+++ Building tarball: salt" + kube::log::status "Building tarball: salt" local release_stage="${RELEASE_STAGE}/salt/kubernetes" rm -rf "${release_stage}" @@ -596,7 +596,7 @@ function kube::release::package_salt_tarball() { # This is the stuff you need to run tests from the binary distribution. function kube::release::package_test_tarball() { - echo "+++ Building tarball: test" + kube::log::status "Building tarball: test" local release_stage="${RELEASE_STAGE}/test/kubernetes" rm -rf "${release_stage}" @@ -627,7 +627,7 @@ function kube::release::package_test_tarball() { # - tarballs for server binary and salt configs that are ready to be uploaded # to master by whatever means appropriate. function kube::release::package_full_tarball() { - echo "+++ Building tarball: full" + kube::log::status "Building tarball: full" local release_stage="${RELEASE_STAGE}/full/kubernetes" rm -rf "${release_stage}" @@ -764,11 +764,11 @@ function kube::release::gcs::copy_release_artifacts() { gcs_options=("-h" "Cache-Control:private, max-age=0") fi - echo "+++ Copying release artifacts to ${gcs_destination}" + kube::log::status "Copying release artifacts to ${gcs_destination}" # First delete all objects at the destination if gsutil ls "${gcs_destination}" >/dev/null 2>&1; then - echo "!!! ${gcs_destination} not empty." + kube::log::error "${gcs_destination} not empty." read -p "Delete everything under ${gcs_destination}? [y/n] " -r || { echo "EOF on prompt. Skipping upload" return @@ -810,7 +810,7 @@ function kube::release::gcs::copy_release_artifacts() { # extra credit. if [[ ${KUBE_GCS_MAKE_PUBLIC} =~ ^[yY]$ ]]; then - echo "+++ Marking all uploaded objects public" + kube::log::status "Marking all uploaded objects public" gsutil acl ch -R -g all:R "${gcs_destination}" >/dev/null 2>&1 fi @@ -830,7 +830,7 @@ function kube::release::gcs::publish_latest() { gsutil acl ch -R -g all:R "${latest_file_dst}" >/dev/null 2>&1 fi - echo "+++ gsutil cat ${latest_file_dst}:" + kube::log::status "gsutil cat ${latest_file_dst}:" gsutil cat ${latest_file_dst} } @@ -842,7 +842,7 @@ function kube::release::gcs::publish_latest_official() { local -r version_regex="^v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)$" [[ ${new_version} =~ ${version_regex} ]] || { - echo "!!! publish_latest_official passed bogus value: '${new_version}'" >&2 + kube::log::error "publish_latest_official passed bogus value: '${new_version}'" return 1 } @@ -854,7 +854,7 @@ function kube::release::gcs::publish_latest_official() { gcs_version=$(gsutil cat "${latest_file_dst}") [[ ${gcs_version} =~ ${version_regex} ]] || { - echo "!!! ${latest_file_dst} contains invalid release version, can't compare: '${gcs_version}'" >&2 + kube::log::error "${latest_file_dst} contains invalid release version, can't compare: '${gcs_version}'" return 1 } @@ -876,10 +876,10 @@ function kube::release::gcs::publish_latest_official() { fi if [[ "${greater}" != "true" ]]; then - echo "+++ ${gcs_version} (latest on GCS) >= ${new_version} (just uploaded), not updating ${latest_file_dst}" + kube::log::status "${gcs_version} (latest on GCS) >= ${new_version} (just uploaded), not updating ${latest_file_dst}" return 0 fi - echo "+++ ${new_version} (just uploaded) > ${gcs_version} (latest on GCS), updating ${latest_file_dst}" + kube::log::status "${new_version} (just uploaded) > ${gcs_version} (latest on GCS), updating ${latest_file_dst}" kube::release::gcs::publish_latest } diff --git a/hack/lib/logging.sh b/hack/lib/logging.sh index e8de9efe941..55394cb4349 100644 --- a/hack/lib/logging.sh +++ b/hack/lib/logging.sh @@ -85,7 +85,8 @@ kube::log::error_exit() { # Log an error but keep going. Don't dump the stack or exit. kube::log::error() { - echo "!!! ${1-}" >&2 + timestamp=$(date +"[%m%d %H:%M:%S]") + echo "!!! $timestamp ${1-}" >&2 shift for message; do echo " $message" >&2 @@ -129,7 +130,8 @@ kube::log::info_from_stdin() { # Print a status line. Formatted to show up in a stream of output. kube::log::status() { - echo "+++ $1" + timestamp=$(date +"[%m%d %H:%M:%S]") + echo "+++ $timestamp $1" shift for message; do echo " $message"