mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 04:33:26 +00:00
Merge pull request #19153 from spxtr/split-e2e
Split e2e.sh into e2e.sh and e2e-runner.sh on master branch.
This commit is contained in:
commit
adb0da005e
@ -1,22 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
echo "e2e-jobs.sh not implemented yet. See #18981 for details."
|
||||
|
196
hack/jenkins/e2e-runner.sh
Normal file → Executable file
196
hack/jenkins/e2e-runner.sh
Normal file → Executable file
@ -14,9 +14,203 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Run e2e tests using environment variables exported in e2e.sh.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
echo "e2e-runner.sh not implemented yet. See #18981 for details."
|
||||
echo "--------------------------------------------------------------------------------"
|
||||
echo "Test Environment:"
|
||||
printenv | sort
|
||||
echo "--------------------------------------------------------------------------------"
|
||||
|
||||
# We get the Kubernetes tarballs on either cluster creation or when we want to
|
||||
# replace existing ones in a multi-step job (e.g. a cluster upgrade).
|
||||
if [[ "${E2E_UP,,}" == "true" || "${JENKINS_FORCE_GET_TARS:-}" =~ ^[yY]$ ]]; then
|
||||
if [[ ${KUBE_RUN_FROM_OUTPUT:-} =~ ^[yY]$ ]]; then
|
||||
echo "Found KUBE_RUN_FROM_OUTPUT=y; will use binaries from _output"
|
||||
cp _output/release-tars/kubernetes*.tar.gz .
|
||||
else
|
||||
echo "Pulling binaries from GCS"
|
||||
# In a multi-step job, clean up just the kubernetes build files.
|
||||
# Otherwise, we want a completely empty directory.
|
||||
if [[ "${JENKINS_FORCE_GET_TARS:-}" =~ ^[yY]$ ]]; then
|
||||
rm -rf kubernetes*
|
||||
# .config and its children are created by the gcloud call that we use to
|
||||
# get the GCE service account.
|
||||
# console-log.txt is created by Jenkins, but is usually not flushed out
|
||||
# this early in the script.
|
||||
elif [[ $(find . -not -path "./.config*" -not -name "console-log.txt" \
|
||||
| wc -l) != 1 ]]; then
|
||||
echo "${PWD} not empty, bailing!"
|
||||
find .
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ${JENKINS_USE_SERVER_VERSION:-} =~ ^[yY]$ ]]; then
|
||||
# for GKE we can use server default version.
|
||||
bucket="release"
|
||||
msg=$(gcloud ${CMD_GROUP} container get-server-config --project=${PROJECT} --zone=${ZONE} | grep defaultClusterVersion)
|
||||
# msg will look like "defaultClusterVersion: 1.0.1". Strip
|
||||
# everything up to, including ": "
|
||||
build_version="v${msg##*: }"
|
||||
echo "Using server version $bucket/$build_version"
|
||||
else # use JENKINS_PUBLISHED_VERSION
|
||||
# Use a published version like "ci/latest" (default),
|
||||
# "release/latest", "release/latest-1", or "release/stable"
|
||||
IFS='/' read -a varr <<< "${JENKINS_PUBLISHED_VERSION}"
|
||||
bucket="${varr[0]}"
|
||||
build_version=$(gsutil cat gs://kubernetes-release/${JENKINS_PUBLISHED_VERSION}.txt)
|
||||
echo "Using published version $bucket/$build_version (from ${JENKINS_PUBLISHED_VERSION})"
|
||||
fi
|
||||
# At this point, we want to have the following vars set:
|
||||
# - bucket
|
||||
# - build_version
|
||||
gsutil -m cp gs://kubernetes-release/${bucket}/${build_version}/kubernetes.tar.gz gs://kubernetes-release/${bucket}/${build_version}/kubernetes-test.tar.gz .
|
||||
|
||||
# Set by GKE-CI to change the CLUSTER_API_VERSION to the git version
|
||||
if [[ ! -z ${E2E_SET_CLUSTER_API_VERSION:-} ]]; then
|
||||
export CLUSTER_API_VERSION=$(echo ${build_version} | cut -c 2-)
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ ! "${CIRCLECI:-}" == "true" ]]; then
|
||||
# Copy GCE keys so we don't keep cycling them.
|
||||
# To set this up, you must know the <project>, <zone>, and <instance>
|
||||
# on which your jenkins jobs are running. Then do:
|
||||
#
|
||||
# # SSH from your computer into the instance.
|
||||
# $ gcloud compute ssh --project="<prj>" ssh --zone="<zone>" <instance>
|
||||
#
|
||||
# # Generate a key by ssh'ing from the instance into itself, then exit.
|
||||
# $ gcloud compute ssh --project="<prj>" ssh --zone="<zone>" <instance>
|
||||
# $ ^D
|
||||
#
|
||||
# # Copy the keys to the desired location (e.g. /var/lib/jenkins/gce_keys/).
|
||||
# $ sudo mkdir -p /var/lib/jenkins/gce_keys/
|
||||
# $ sudo cp ~/.ssh/google_compute_engine /var/lib/jenkins/gce_keys/
|
||||
# $ sudo cp ~/.ssh/google_compute_engine.pub /var/lib/jenkins/gce_keys/
|
||||
#
|
||||
# # Move the permissions for the keys to Jenkins.
|
||||
# $ sudo chown -R jenkins /var/lib/jenkins/gce_keys/
|
||||
# $ sudo chgrp -R jenkins /var/lib/jenkins/gce_keys/
|
||||
if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
|
||||
echo "Skipping SSH key copying for AWS"
|
||||
else
|
||||
mkdir -p ${WORKSPACE}/.ssh/
|
||||
cp /var/lib/jenkins/gce_keys/google_compute_engine ${WORKSPACE}/.ssh/
|
||||
cp /var/lib/jenkins/gce_keys/google_compute_engine.pub ${WORKSPACE}/.ssh/
|
||||
fi
|
||||
fi
|
||||
|
||||
md5sum kubernetes*.tar.gz
|
||||
tar -xzf kubernetes.tar.gz
|
||||
tar -xzf kubernetes-test.tar.gz
|
||||
fi
|
||||
|
||||
cd kubernetes
|
||||
|
||||
# Have cmd/e2e run by goe2e.sh generate JUnit report in ${WORKSPACE}/junit*.xml
|
||||
ARTIFACTS=${WORKSPACE}/_artifacts
|
||||
mkdir -p ${ARTIFACTS}
|
||||
export E2E_REPORT_DIR=${ARTIFACTS}
|
||||
declare -r gcp_list_resources_script="./cluster/gce/list-resources.sh"
|
||||
declare -r gcp_resources_before="${ARTIFACTS}/gcp-resources-before.txt"
|
||||
declare -r gcp_resources_cluster_up="${ARTIFACTS}/gcp-resources-cluster-up.txt"
|
||||
declare -r gcp_resources_after="${ARTIFACTS}/gcp-resources-after.txt"
|
||||
# TODO(15492): figure out some way to run this script even if it doesn't exist
|
||||
# in the Kubernetes tarball.
|
||||
if [[ ( ${KUBERNETES_PROVIDER} == "gce" || ${KUBERNETES_PROVIDER} == "gke" ) && -x "${gcp_list_resources_script}" ]]; then
|
||||
gcp_list_resources="true"
|
||||
else
|
||||
gcp_list_resources="false"
|
||||
fi
|
||||
|
||||
### Pre Set Up ###
|
||||
# Install gcloud from a custom path if provided. Used to test GKE with gcloud
|
||||
# at HEAD, release candidate.
|
||||
if [[ ! -z "${CLOUDSDK_BUCKET:-}" ]]; then
|
||||
gsutil -m cp -r "${CLOUDSDK_BUCKET}" ~
|
||||
rm -rf ~/repo ~/cloudsdk
|
||||
mv ~/$(basename "${CLOUDSDK_BUCKET}") ~/repo
|
||||
mkdir ~/cloudsdk
|
||||
tar zvxf ~/repo/google-cloud-sdk.tar.gz -C ~/cloudsdk
|
||||
export CLOUDSDK_CORE_DISABLE_PROMPTS=1
|
||||
export CLOUDSDK_COMPONENT_MANAGER_SNAPSHOT_URL=file://${HOME}/repo/components-2.json
|
||||
~/cloudsdk/google-cloud-sdk/install.sh --disable-installation-options --bash-completion=false --path-update=false --usage-reporting=false
|
||||
export PATH=${HOME}/cloudsdk/google-cloud-sdk/bin:${PATH}
|
||||
export CLOUDSDK_CONFIG=/var/lib/jenkins/.config/gcloud
|
||||
fi
|
||||
|
||||
### Set up ###
|
||||
if [[ "${E2E_UP,,}" == "true" ]]; then
|
||||
go run ./hack/e2e.go ${E2E_OPT} -v --down
|
||||
fi
|
||||
if [[ "${gcp_list_resources}" == "true" ]]; then
|
||||
${gcp_list_resources_script} > "${gcp_resources_before}"
|
||||
fi
|
||||
if [[ "${E2E_UP,,}" == "true" ]]; then
|
||||
go run ./hack/e2e.go ${E2E_OPT} -v --up
|
||||
go run ./hack/e2e.go -v --ctl="version --match-server-version=false"
|
||||
if [[ "${gcp_list_resources}" == "true" ]]; then
|
||||
${gcp_list_resources_script} > "${gcp_resources_cluster_up}"
|
||||
fi
|
||||
fi
|
||||
|
||||
### Run tests ###
|
||||
# Jenkins will look at the junit*.xml files for test failures, so don't exit
|
||||
# with a nonzero error code if it was only tests that failed.
|
||||
if [[ "${E2E_TEST,,}" == "true" ]]; then
|
||||
# Check to make sure the cluster is up before running tests, and fail if it's not.
|
||||
go run ./hack/e2e.go ${E2E_OPT} -v --isup
|
||||
go run ./hack/e2e.go ${E2E_OPT} -v --test --test_args="${GINKGO_TEST_ARGS}" && exitcode=0 || exitcode=$?
|
||||
if [[ "${E2E_PUBLISH_GREEN_VERSION:-}" == "true" && ${exitcode} == 0 && -n ${build_version:-} ]]; then
|
||||
echo "publish build_version to ci/latest-green.txt: ${build_version}"
|
||||
echo "${build_version}" > ${WORKSPACE}/build_version.txt
|
||||
gsutil cp ${WORKSPACE}/build_version.txt gs://kubernetes-release/ci/latest-green.txt
|
||||
fi
|
||||
fi
|
||||
|
||||
### Start Kubemark ###
|
||||
if [[ "${USE_KUBEMARK:-}" == "true" ]]; then
|
||||
export RUN_FROM_DISTRO=true
|
||||
NUM_NODES_BKP=${NUM_NODES}
|
||||
MASTER_SIZE_BKP=${MASTER_SIZE}
|
||||
./test/kubemark/stop-kubemark.sh
|
||||
NUM_NODES=${KUBEMARK_NUM_NODES:-$NUM_NODES}
|
||||
MASTER_SIZE=${KUBEMARK_MASTER_SIZE:-$MASTER_SIZE}
|
||||
./test/kubemark/start-kubemark.sh
|
||||
./test/kubemark/run-e2e-tests.sh --ginkgo.focus="should\sallow\sstarting\s30\spods\sper\snode" --delete-namespace="false" --gather-resource-usage="false"
|
||||
./test/kubemark/stop-kubemark.sh
|
||||
NUM_NODES=${NUM_NODES_BKP}
|
||||
MASTER_SIZE=${MASTER_SIZE_BKP}
|
||||
unset RUN_FROM_DISTRO
|
||||
unset NUM_NODES_BKP
|
||||
unset MASTER_SIZE_BKP
|
||||
fi
|
||||
|
||||
### Clean up ###
|
||||
if [[ "${E2E_DOWN,,}" == "true" ]]; then
|
||||
# Sleep before deleting the cluster to give the controller manager time to
|
||||
# delete any cloudprovider resources still around from the last test.
|
||||
# This is calibrated to allow enough time for 3 attempts to delete the
|
||||
# resources. Each attempt is allocated 5 seconds for requests to the
|
||||
# cloudprovider plus the processingRetryInterval from servicecontroller.go
|
||||
# for the wait between attempts.
|
||||
sleep 30
|
||||
go run ./hack/e2e.go ${E2E_OPT} -v --down
|
||||
fi
|
||||
if [[ "${gcp_list_resources}" == "true" ]]; then
|
||||
${gcp_list_resources_script} > "${gcp_resources_after}"
|
||||
fi
|
||||
|
||||
# Compare resources if either the cluster was
|
||||
# * started and destroyed (normal e2e)
|
||||
# * neither started nor destroyed (soak test)
|
||||
if [[ "${E2E_UP:-}" == "${E2E_DOWN:-}" && -f "${gcp_resources_before}" && -f "${gcp_resources_after}" ]]; then
|
||||
if ! diff -sw -U0 -F'^\[.*\]$' "${gcp_resources_before}" "${gcp_resources_after}" && [[ "${FAIL_ON_GCP_RESOURCE_LEAK:-}" == "true" ]]; then
|
||||
echo "!!! FAIL: Google Cloud Platform resources leaked while running tests!"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
@ -14,14 +14,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# kubernetes-e2e-{gce, gke, gke-ci} jobs: This script is triggered by
|
||||
# the kubernetes-build job, or runs every half hour. We abort this job
|
||||
# if it takes more than 75m. As of initial commit, it typically runs
|
||||
# in about half an hour.
|
||||
#
|
||||
# The "Workspace Cleanup Plugin" is installed and in use for this job,
|
||||
# so the ${WORKSPACE} directory (the current directory) is currently
|
||||
# empty.
|
||||
# Sets up environment variables for an e2e test specified in JOB_NAME, then
|
||||
# runs e2e-runner.sh.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
@ -1117,204 +1111,18 @@ export KUBE_SKIP_UPDATE=y
|
||||
export KUBE_SKIP_CONFIRMATIONS=y
|
||||
|
||||
# E2E Control Variables
|
||||
export E2E_OPT="${E2E_OPT:-}"
|
||||
export E2E_UP="${E2E_UP:-true}"
|
||||
export E2E_TEST="${E2E_TEST:-true}"
|
||||
export E2E_DOWN="${E2E_DOWN:-true}"
|
||||
export E2E_CLEAN_START="${E2E_CLEAN_START:-}"
|
||||
# Used by hack/ginkgo-e2e.sh to enable ginkgo's parallel test runner.
|
||||
export GINKGO_PARALLEL=${GINKGO_PARALLEL:-}
|
||||
export GINKGO_TEST_ARGS="${GINKGO_TEST_ARGS:-}"
|
||||
|
||||
echo "--------------------------------------------------------------------------------"
|
||||
echo "Test Environment:"
|
||||
printenv | sort
|
||||
echo "--------------------------------------------------------------------------------"
|
||||
|
||||
# We get the Kubernetes tarballs on either cluster creation or when we want to
|
||||
# replace existing ones in a multi-step job (e.g. a cluster upgrade).
|
||||
if [[ "${E2E_UP,,}" == "true" || "${JENKINS_FORCE_GET_TARS:-}" =~ ^[yY]$ ]]; then
|
||||
if [[ ${KUBE_RUN_FROM_OUTPUT:-} =~ ^[yY]$ ]]; then
|
||||
echo "Found KUBE_RUN_FROM_OUTPUT=y; will use binaries from _output"
|
||||
cp _output/release-tars/kubernetes*.tar.gz .
|
||||
else
|
||||
echo "Pulling binaries from GCS"
|
||||
# In a multi-step job, clean up just the kubernetes build files.
|
||||
# Otherwise, we want a completely empty directory.
|
||||
if [[ "${JENKINS_FORCE_GET_TARS:-}" =~ ^[yY]$ ]]; then
|
||||
rm -rf kubernetes*
|
||||
# .config and its children are created by the gcloud call that we use to
|
||||
# get the GCE service account.
|
||||
# console-log.txt is created by Jenkins, but is usually not flushed out
|
||||
# this early in the script.
|
||||
elif [[ $(find . -not -path "./.config*" -not -name "console-log.txt" \
|
||||
| wc -l) != 1 ]]; then
|
||||
echo "${PWD} not empty, bailing!"
|
||||
find .
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ${JENKINS_USE_SERVER_VERSION:-} =~ ^[yY]$ ]]; then
|
||||
# for GKE we can use server default version.
|
||||
bucket="release"
|
||||
msg=$(gcloud ${CMD_GROUP} container get-server-config --project=${PROJECT} --zone=${ZONE} | grep defaultClusterVersion)
|
||||
# msg will look like "defaultClusterVersion: 1.0.1". Strip
|
||||
# everything up to, including ": "
|
||||
build_version="v${msg##*: }"
|
||||
echo "Using server version $bucket/$build_version"
|
||||
else # use JENKINS_PUBLISHED_VERSION
|
||||
# Use a published version like "ci/latest" (default),
|
||||
# "release/latest", "release/latest-1", or "release/stable"
|
||||
IFS='/' read -a varr <<< "${JENKINS_PUBLISHED_VERSION}"
|
||||
bucket="${varr[0]}"
|
||||
build_version=$(gsutil cat gs://kubernetes-release/${JENKINS_PUBLISHED_VERSION}.txt)
|
||||
echo "Using published version $bucket/$build_version (from ${JENKINS_PUBLISHED_VERSION})"
|
||||
fi
|
||||
# At this point, we want to have the following vars set:
|
||||
# - bucket
|
||||
# - build_version
|
||||
gsutil -m cp gs://kubernetes-release/${bucket}/${build_version}/kubernetes.tar.gz gs://kubernetes-release/${bucket}/${build_version}/kubernetes-test.tar.gz .
|
||||
|
||||
# Set by GKE-CI to change the CLUSTER_API_VERSION to the git version
|
||||
if [[ ! -z ${E2E_SET_CLUSTER_API_VERSION:-} ]]; then
|
||||
export CLUSTER_API_VERSION=$(echo ${build_version} | cut -c 2-)
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ ! "${CIRCLECI:-}" == "true" ]]; then
|
||||
# Copy GCE keys so we don't keep cycling them.
|
||||
# To set this up, you must know the <project>, <zone>, and <instance>
|
||||
# on which your jenkins jobs are running. Then do:
|
||||
#
|
||||
# # SSH from your computer into the instance.
|
||||
# $ gcloud compute ssh --project="<prj>" ssh --zone="<zone>" <instance>
|
||||
#
|
||||
# # Generate a key by ssh'ing from the instance into itself, then exit.
|
||||
# $ gcloud compute ssh --project="<prj>" ssh --zone="<zone>" <instance>
|
||||
# $ ^D
|
||||
#
|
||||
# # Copy the keys to the desired location (e.g. /var/lib/jenkins/gce_keys/).
|
||||
# $ sudo mkdir -p /var/lib/jenkins/gce_keys/
|
||||
# $ sudo cp ~/.ssh/google_compute_engine /var/lib/jenkins/gce_keys/
|
||||
# $ sudo cp ~/.ssh/google_compute_engine.pub /var/lib/jenkins/gce_keys/
|
||||
#
|
||||
# # Move the permissions for the keys to Jenkins.
|
||||
# $ sudo chown -R jenkins /var/lib/jenkins/gce_keys/
|
||||
# $ sudo chgrp -R jenkins /var/lib/jenkins/gce_keys/
|
||||
if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
|
||||
echo "Skipping SSH key copying for AWS"
|
||||
else
|
||||
mkdir -p ${WORKSPACE}/.ssh/
|
||||
cp /var/lib/jenkins/gce_keys/google_compute_engine ${WORKSPACE}/.ssh/
|
||||
cp /var/lib/jenkins/gce_keys/google_compute_engine.pub ${WORKSPACE}/.ssh/
|
||||
fi
|
||||
fi
|
||||
|
||||
md5sum kubernetes*.tar.gz
|
||||
tar -xzf kubernetes.tar.gz
|
||||
tar -xzf kubernetes-test.tar.gz
|
||||
fi
|
||||
|
||||
cd kubernetes
|
||||
|
||||
# Have cmd/e2e run by goe2e.sh generate JUnit report in ${WORKSPACE}/junit*.xml
|
||||
ARTIFACTS=${WORKSPACE}/_artifacts
|
||||
mkdir -p ${ARTIFACTS}
|
||||
export E2E_REPORT_DIR=${ARTIFACTS}
|
||||
declare -r gcp_list_resources_script="./cluster/gce/list-resources.sh"
|
||||
declare -r gcp_resources_before="${ARTIFACTS}/gcp-resources-before.txt"
|
||||
declare -r gcp_resources_cluster_up="${ARTIFACTS}/gcp-resources-cluster-up.txt"
|
||||
declare -r gcp_resources_after="${ARTIFACTS}/gcp-resources-after.txt"
|
||||
# TODO(15492): figure out some way to run this script even if it doesn't exist
|
||||
# in the Kubernetes tarball.
|
||||
if [[ ( ${KUBERNETES_PROVIDER} == "gce" || ${KUBERNETES_PROVIDER} == "gke" ) && -x "${gcp_list_resources_script}" ]]; then
|
||||
gcp_list_resources="true"
|
||||
# If we are on PR Jenkins merging into master, use the local e2e.sh. Otherwise, use the latest on github.
|
||||
if [[ "${ghprbTargetBranch:-}" == "master" ]]; then
|
||||
source "hack/jenkins/e2e-runner.sh"
|
||||
else
|
||||
gcp_list_resources="false"
|
||||
fi
|
||||
|
||||
### Pre Set Up ###
|
||||
# Install gcloud from a custom path if provided. Used to test GKE with gcloud
|
||||
# at HEAD, release candidate.
|
||||
if [[ ! -z "${CLOUDSDK_BUCKET:-}" ]]; then
|
||||
gsutil -m cp -r "${CLOUDSDK_BUCKET}" ~
|
||||
rm -rf ~/repo ~/cloudsdk
|
||||
mv ~/$(basename "${CLOUDSDK_BUCKET}") ~/repo
|
||||
mkdir ~/cloudsdk
|
||||
tar zvxf ~/repo/google-cloud-sdk.tar.gz -C ~/cloudsdk
|
||||
export CLOUDSDK_CORE_DISABLE_PROMPTS=1
|
||||
export CLOUDSDK_COMPONENT_MANAGER_SNAPSHOT_URL=file://${HOME}/repo/components-2.json
|
||||
~/cloudsdk/google-cloud-sdk/install.sh --disable-installation-options --bash-completion=false --path-update=false --usage-reporting=false
|
||||
export PATH=${HOME}/cloudsdk/google-cloud-sdk/bin:${PATH}
|
||||
export CLOUDSDK_CONFIG=/var/lib/jenkins/.config/gcloud
|
||||
fi
|
||||
|
||||
### Set up ###
|
||||
if [[ "${E2E_UP,,}" == "true" ]]; then
|
||||
go run ./hack/e2e.go ${E2E_OPT} -v --down
|
||||
fi
|
||||
if [[ "${gcp_list_resources}" == "true" ]]; then
|
||||
${gcp_list_resources_script} > "${gcp_resources_before}"
|
||||
fi
|
||||
if [[ "${E2E_UP,,}" == "true" ]]; then
|
||||
go run ./hack/e2e.go ${E2E_OPT} -v --up
|
||||
go run ./hack/e2e.go -v --ctl="version --match-server-version=false"
|
||||
if [[ "${gcp_list_resources}" == "true" ]]; then
|
||||
${gcp_list_resources_script} > "${gcp_resources_cluster_up}"
|
||||
fi
|
||||
fi
|
||||
|
||||
### Run tests ###
|
||||
# Jenkins will look at the junit*.xml files for test failures, so don't exit
|
||||
# with a nonzero error code if it was only tests that failed.
|
||||
if [[ "${E2E_TEST,,}" == "true" ]]; then
|
||||
# Check to make sure the cluster is up before running tests, and fail if it's not.
|
||||
go run ./hack/e2e.go ${E2E_OPT} -v --isup
|
||||
go run ./hack/e2e.go ${E2E_OPT} -v --test --test_args="${GINKGO_TEST_ARGS}" && exitcode=0 || exitcode=$?
|
||||
if [[ "${E2E_PUBLISH_GREEN_VERSION:-}" == "true" && ${exitcode} == 0 && -n ${build_version:-} ]]; then
|
||||
echo "publish build_version to ci/latest-green.txt: ${build_version}"
|
||||
echo "${build_version}" > ${WORKSPACE}/build_version.txt
|
||||
gsutil cp ${WORKSPACE}/build_version.txt gs://kubernetes-release/ci/latest-green.txt
|
||||
fi
|
||||
fi
|
||||
|
||||
### Start Kubemark ###
|
||||
if [[ "${USE_KUBEMARK:-}" == "true" ]]; then
|
||||
export RUN_FROM_DISTRO=true
|
||||
NUM_NODES_BKP=${NUM_NODES}
|
||||
MASTER_SIZE_BKP=${MASTER_SIZE}
|
||||
./test/kubemark/stop-kubemark.sh
|
||||
NUM_NODES=${KUBEMARK_NUM_NODES:-$NUM_NODES}
|
||||
MASTER_SIZE=${KUBEMARK_MASTER_SIZE:-$MASTER_SIZE}
|
||||
./test/kubemark/start-kubemark.sh
|
||||
./test/kubemark/run-e2e-tests.sh --ginkgo.focus="should\sallow\sstarting\s30\spods\sper\snode" --delete-namespace="false" --gather-resource-usage="false"
|
||||
./test/kubemark/stop-kubemark.sh
|
||||
NUM_NODES=${NUM_NODES_BKP}
|
||||
MASTER_SIZE=${MASTER_SIZE_BKP}
|
||||
unset RUN_FROM_DISTRO
|
||||
unset NUM_NODES_BKP
|
||||
unset MASTER_SIZE_BKP
|
||||
fi
|
||||
|
||||
### Clean up ###
|
||||
if [[ "${E2E_DOWN,,}" == "true" ]]; then
|
||||
# Sleep before deleting the cluster to give the controller manager time to
|
||||
# delete any cloudprovider resources still around from the last test.
|
||||
# This is calibrated to allow enough time for 3 attempts to delete the
|
||||
# resources. Each attempt is allocated 5 seconds for requests to the
|
||||
# cloudprovider plus the processingRetryInterval from servicecontroller.go
|
||||
# for the wait between attempts.
|
||||
sleep 30
|
||||
go run ./hack/e2e.go ${E2E_OPT} -v --down
|
||||
fi
|
||||
if [[ "${gcp_list_resources}" == "true" ]]; then
|
||||
${gcp_list_resources_script} > "${gcp_resources_after}"
|
||||
fi
|
||||
|
||||
# Compare resources if either the cluster was
|
||||
# * started and destroyed (normal e2e)
|
||||
# * neither started nor destroyed (soak test)
|
||||
if [[ "${E2E_UP:-}" == "${E2E_DOWN:-}" && -f "${gcp_resources_before}" && -f "${gcp_resources_after}" ]]; then
|
||||
if ! diff -sw -U0 -F'^\[.*\]$' "${gcp_resources_before}" "${gcp_resources_after}" && [[ "${FAIL_ON_GCP_RESOURCE_LEAK:-}" == "true" ]]; then
|
||||
echo "!!! FAIL: Google Cloud Platform resources leaked while running tests!"
|
||||
exit 1
|
||||
fi
|
||||
source <(curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/e2e-runner.sh")
|
||||
fi
|
||||
|
Loading…
Reference in New Issue
Block a user