Merge pull request #24623 from ixdy/kubemark-cluster-logs

Automatic merge from submit-queue

Use cluster/log-dump.sh to collect base cluster logs in kubemark

Fixes #24415.

cc @fejta @spxtr @wojtek-t
This commit is contained in:
k8s-merge-robot 2016-04-26 09:13:42 -07:00
commit 9b08bc62d1

View File

@ -139,6 +139,14 @@ function install_google_cloud_sdk_tarball() {
export PATH=${install_dir}/google-cloud-sdk/bin:${PATH} export PATH=${install_dir}/google-cloud-sdk/bin:${PATH}
} }
function dump_cluster_logs_and_exit() {
local -r exit_status=$?
if [[ -x "cluster/log-dump.sh" ]]; then
./cluster/log-dump.sh "${ARTIFACTS}"
fi
exit ${exit_status}
}
### Pre Set Up ### ### Pre Set Up ###
if running_in_docker; then if running_in_docker; then
curl -fsSL --retry 3 -o "${WORKSPACE}/google-cloud-sdk.tar.gz" 'https://dl.google.com/dl/cloudsdk/channels/rapid/google-cloud-sdk.tar.gz' curl -fsSL --retry 3 -o "${WORKSPACE}/google-cloud-sdk.tar.gz" 'https://dl.google.com/dl/cloudsdk/channels/rapid/google-cloud-sdk.tar.gz'
@ -291,13 +299,7 @@ fi
if [[ "${E2E_UP,,}" == "true" ]]; then if [[ "${E2E_UP,,}" == "true" ]]; then
# We want to try to gather logs even if kube-up fails, so collect the # We want to try to gather logs even if kube-up fails, so collect the
# result here and fail after dumping logs if it's nonzero. # result here and fail after dumping logs if it's nonzero.
go run ./hack/e2e.go ${E2E_OPT:-} -v --up && up_result="$?" || up_result="$?" go run ./hack/e2e.go ${E2E_OPT:-} -v --up || dump_cluster_logs_and_exit
if [[ "${up_result}" -ne 0 ]]; then
if [[ -x "cluster/log-dump.sh" ]]; then
./cluster/log-dump.sh "${ARTIFACTS}"
fi
exit "${up_result}"
fi
go run ./hack/e2e.go -v --ctl="version --match-server-version=false" go run ./hack/e2e.go -v --ctl="version --match-server-version=false"
if [[ "${gcp_list_resources}" == "true" ]]; then if [[ "${gcp_list_resources}" == "true" ]]; then
${gcp_list_resources_script} > "${gcp_resources_cluster_up}" ${gcp_list_resources_script} > "${gcp_resources_cluster_up}"
@ -335,17 +337,9 @@ if [[ "${USE_KUBEMARK:-}" == "true" ]]; then
NUM_NODES=${KUBEMARK_NUM_NODES:-$NUM_NODES} NUM_NODES=${KUBEMARK_NUM_NODES:-$NUM_NODES}
MASTER_SIZE=${KUBEMARK_MASTER_SIZE:-$MASTER_SIZE} MASTER_SIZE=${KUBEMARK_MASTER_SIZE:-$MASTER_SIZE}
# If start-kubemark fails, we trigger empty set of tests that would trigger storing logs from the base cluster. # If start-kubemark fails, we trigger empty set of tests that would trigger storing logs from the base cluster.
./test/kubemark/start-kubemark.sh && kubemark_started="$?" || kubemark_started="$?" ./test/kubemark/start-kubemark.sh || dump_cluster_logs_and_exit
if [[ "${kubemark_started}" != "0" ]]; then
go run ./hack/e2e.go -v --test --test_args="--ginkgo.focus=DO\sNOT\sMATCH\sANYTHING"
exit 1
fi
# Similarly, if tests fail, we trigger empty set of tests that would trigger storing logs from the base cluster. # Similarly, if tests fail, we trigger empty set of tests that would trigger storing logs from the base cluster.
./test/kubemark/run-e2e-tests.sh --ginkgo.focus="${KUBEMARK_TESTS}" --gather-resource-usage="false" && kubemark_succeeded="$?" || kubemark_succeeded="$?" ./test/kubemark/run-e2e-tests.sh --ginkgo.focus="${KUBEMARK_TESTS}" --gather-resource-usage="false" || dump_cluster_logs_and_exit
if [[ "${kubemark_succeeded}" != "0" ]]; then
go run ./hack/e2e.go -v --test --test_args="--ginkgo.focus=DO\sNOT\sMATCH\sANYTHING"
exit 1
fi
./test/kubemark/stop-kubemark.sh ./test/kubemark/stop-kubemark.sh
NUM_NODES=${NUM_NODES_BKP} NUM_NODES=${NUM_NODES_BKP}
MASTER_SIZE=${MASTER_SIZE_BKP} MASTER_SIZE=${MASTER_SIZE_BKP}