From 83bfe2cfda0f90d1d1b9ed04684c8fe115f64fe2 Mon Sep 17 00:00:00 2001 From: Isaac Hollander McCreery Date: Thu, 11 Feb 2016 14:08:59 -0800 Subject: [PATCH] Refactor e2e.sh in preparation for YAML-ifying configs Remove E2E_CLUSTER_NAME, E2E_NETWORK, and KUBE_GCE_INSTANCE_PREFIX from jobs that don't need them, because they are in their own project, and add defaults Fail on leaked resources by default Kill E2E_SET_CLUSTER_API_VERSION Rely on defaults for E2E_UP, E2E_TEST, E2E_DOWN Only PUBLISH_GREEN_VERSION for kubernetes-e2e-gce Directly export ZONE vars rather than chaining and setting locals Collapse E2E_CLUSTER_NAME, E2E_NETWORK, and KUBE_GCE_INSTANCE_PREFIX into E2E_NAME (always take the shortest of the three, to avoid name length problems Factor out soak vars Remove incorrect feature enablement ENV vars export GINKGO_TEST_ARGS directly, rather than looping through export at the bottom Directly export E2E Control Variables Export KUBEMARK ENV vars directly export KUBERNETES_PROVIDER directly Export JENKINS_PUBLISHED_VERSION directly, and don't use it where unnecessary; remove old CURRENT_RELEASE_PUBLISHED_VERSION export PROJECT and NETWORK_PROVIDER directly export node ENV vars directly export other shared cluster ENV vars directly Directly export CLOUDSDK ENV vars export GCE and AWS ENVs directly Various cleanup, including refactoring upgrades Fixup: no PERFORMANCE env for AWS Address comment Fixes for comments up to cebf501 Address comments through 81f39b8 Fix: default E2E_UP, E2E_TEST, E2E_DOWN Use JENKINS_FORCE_GET_TARS for upgrade jobs; fixes through 140ea2b Fix ubernetes-lite config Address rebased 2 comments Remove tab --- hack/jenkins/e2e-runner.sh | 22 +- hack/jenkins/e2e.sh | 826 +++++++++++++------------------------ 2 files changed, 287 insertions(+), 561 deletions(-) diff --git a/hack/jenkins/e2e-runner.sh b/hack/jenkins/e2e-runner.sh index f9a71d690a1..11b0f6aba0e 100755 --- a/hack/jenkins/e2e-runner.sh +++ b/hack/jenkins/e2e-runner.sh @@ -49,6 +49,8 @@ if [[ "${E2E_UP,,}" == "true" || "${JENKINS_FORCE_GET_TARS:-}" =~ ^[yY]$ ]]; the exit 1 fi + # This is for test, staging, and prod jobs on GKE, where we want to + # test what's running in GKE by default rather than some CI build. if [[ ${JENKINS_USE_SERVER_VERSION:-} =~ ^[yY]$ ]]; then # for GKE we can use server default version. bucket="release" @@ -57,13 +59,16 @@ if [[ "${E2E_UP,,}" == "true" || "${JENKINS_FORCE_GET_TARS:-}" =~ ^[yY]$ ]]; the # everything up to, including ": " build_version="v${msg##*: }" echo "Using server version $bucket/$build_version" - else # use JENKINS_PUBLISHED_VERSION + else # use JENKINS_PUBLISHED_VERSION, for CI # Use a published version like "ci/latest" (default), # "release/latest", "release/latest-1", or "release/stable" + JENKINS_PUBLISHED_VERSION=${JENKINS_PUBLISHED_VERSION:-'ci/latest'} IFS='/' read -a varr <<< "${JENKINS_PUBLISHED_VERSION}" bucket="${varr[0]}" build_version=$(gsutil cat gs://kubernetes-release/${JENKINS_PUBLISHED_VERSION}.txt) echo "Using published version $bucket/$build_version (from ${JENKINS_PUBLISHED_VERSION})" + # Set CLUSTER_API_VERSION for GKE CI + export CLUSTER_API_VERSION=$(echo ${build_version} | cut -c 2-) fi # At this point, we want to have the following vars set: # - bucket @@ -72,11 +77,6 @@ if [[ "${E2E_UP,,}" == "true" || "${JENKINS_FORCE_GET_TARS:-}" =~ ^[yY]$ ]]; the "gs://kubernetes-release/${bucket}/${build_version}/kubernetes.tar.gz" \ "gs://kubernetes-release/${bucket}/${build_version}/kubernetes-test.tar.gz" \ . - - # Set by GKE-CI to change the CLUSTER_API_VERSION to the git version - if [[ ! -z ${E2E_SET_CLUSTER_API_VERSION:-} ]]; then - export CLUSTER_API_VERSION=$(echo ${build_version} | cut -c 2-) - fi fi if [[ ! "${CIRCLECI:-}" == "true" ]]; then @@ -154,13 +154,13 @@ fi ### Set up ### if [[ "${E2E_UP,,}" == "true" ]]; then - go run ./hack/e2e.go ${E2E_OPT} -v --down + go run ./hack/e2e.go ${E2E_OPT:-} -v --down fi if [[ "${gcp_list_resources}" == "true" ]]; then ${gcp_list_resources_script} > "${gcp_resources_before}" fi if [[ "${E2E_UP,,}" == "true" ]]; then - go run ./hack/e2e.go ${E2E_OPT} -v --up + go run ./hack/e2e.go ${E2E_OPT:-} -v --up go run ./hack/e2e.go -v --ctl="version --match-server-version=false" if [[ "${gcp_list_resources}" == "true" ]]; then ${gcp_list_resources_script} > "${gcp_resources_cluster_up}" @@ -172,8 +172,8 @@ fi # with a nonzero error code if it was only tests that failed. if [[ "${E2E_TEST,,}" == "true" ]]; then # Check to make sure the cluster is up before running tests, and fail if it's not. - go run ./hack/e2e.go ${E2E_OPT} -v --isup - go run ./hack/e2e.go ${E2E_OPT} -v --test --test_args="${GINKGO_TEST_ARGS}" && exitcode=0 || exitcode=$? + go run ./hack/e2e.go ${E2E_OPT:-} -v --isup + go run ./hack/e2e.go ${E2E_OPT:-} -v --test --test_args="${GINKGO_TEST_ARGS}" && exitcode=0 || exitcode=$? if [[ "${E2E_PUBLISH_GREEN_VERSION:-}" == "true" && ${exitcode} == 0 && -n ${build_version:-} ]]; then echo "publish build_version to ci/latest-green.txt: ${build_version}" echo "${build_version}" > ${WORKSPACE}/build_version.txt @@ -208,7 +208,7 @@ if [[ "${E2E_DOWN,,}" == "true" ]]; then # cloudprovider plus the processingRetryInterval from servicecontroller.go # for the wait between attempts. sleep 30 - go run ./hack/e2e.go ${E2E_OPT} -v --down + go run ./hack/e2e.go ${E2E_OPT:-} -v --down fi if [[ "${gcp_list_resources}" == "true" ]]; then ${gcp_list_resources_script} > "${gcp_resources_after}" diff --git a/hack/jenkins/e2e.sh b/hack/jenkins/e2e.sh index 1c963ab7e61..e0d60b77be4 100755 --- a/hack/jenkins/e2e.sh +++ b/hack/jenkins/e2e.sh @@ -21,196 +21,98 @@ set -o errexit set -o nounset set -o pipefail -# Properly configure globals for an upgrade step in a GKE or GCE upgrade suite -# -# These suites: -# step1: launch a cluster at $old_version, -# step2: runs $new_version Kubectl e2es, -# step3: upgrades the master to $new_version, -# step4: runs $old_version e2es, -# step5: upgrades the rest of the cluster, -# step6: runs $old_version e2es again, then -# step7: runs $new_version e2es and tears down the cluster. -# -# Assumes globals: -# $JOB_NAME -# $KUBERNETES_PROVIDER -# -# Args: -# $1 old_version: the version to deploy a cluster at, and old e2e tests to run -# against the upgraded cluster (should be something like -# 'release/latest', to work with JENKINS_PUBLISHED_VERSION logic) -# $2 new_version: the version to upgrade the cluster to, and new e2e tests to run -# against the upgraded cluster (should be something like -# 'ci/latest', to work with JENKINS_PUBLISHED_VERSION logic) -# $3 cluster_name: determines E2E_CLUSTER_NAME and E2E_NETWORK -# $4 project: determines PROJECT - -function configure_upgrade_step() { - local -r old_version="$1" - local -r new_version="$2" - local -r cluster_name="$3" - local -r project="$4" - - [[ "${JOB_NAME}" =~ .*-(step[1-7])-.* ]] || { - echo "JOB_NAME ${JOB_NAME} is not a valid upgrade job name, could not parse" - exit 1 - } - local -r step="${BASH_REMATCH[1]}" - - if [[ "${KUBERNETES_PROVIDER}" == "gce" ]]; then - KUBE_GCE_INSTANCE_PREFIX="$cluster_name" - NUM_NODES=5 - KUBE_ENABLE_DAEMONSETS=true - fi - - E2E_CLUSTER_NAME="$cluster_name" - E2E_NETWORK="$cluster_name" - PROJECT="$project" - - case $step in - step1) - # Deploy at old version - JENKINS_PUBLISHED_VERSION="${old_version}" - - E2E_UP="true" - E2E_TEST="false" - E2E_DOWN="false" - - if [[ "${KUBERNETES_PROVIDER}" == "gke" ]]; then - E2E_SET_CLUSTER_API_VERSION=y - fi - ;; - - step2) - # Run new e2e kubectl tests - JENKINS_PUBLISHED_VERSION="${new_version}" - JENKINS_FORCE_GET_TARS=y - - E2E_OPT="--check_version_skew=false" - E2E_UP="false" - E2E_TEST="true" - E2E_DOWN="false" - GINKGO_TEST_ARGS="--ginkgo.focus=Kubectl" - ;; - - step3) - # Use upgrade logic of version we're upgrading to. - JENKINS_PUBLISHED_VERSION="${new_version}" - JENKINS_FORCE_GET_TARS=y - - E2E_OPT="--check_version_skew=false" - E2E_UP="false" - E2E_TEST="true" - E2E_DOWN="false" - GINKGO_TEST_ARGS="--ginkgo.focus=\[Feature:Upgrade\].*upgrade-master --upgrade-target=${new_version}" - ;; - - step4) - # Run old e2es - JENKINS_PUBLISHED_VERSION="${old_version}" - JENKINS_FORCE_GET_TARS=y - - E2E_OPT="--check_version_skew=false" - E2E_UP="false" - E2E_TEST="true" - E2E_DOWN="false" - ;; - - step5) - # Use upgrade logic of version we're upgrading to. - JENKINS_PUBLISHED_VERSION="${new_version}" - JENKINS_FORCE_GET_TARS=y - - E2E_OPT="--check_version_skew=false" - E2E_UP="false" - E2E_TEST="true" - E2E_DOWN="false" - GINKGO_TEST_ARGS="--ginkgo.focus=\[Feature:Upgrade\].*upgrade-cluster --upgrade-target=${new_version}" - ;; - - step6) - # Run old e2es - JENKINS_PUBLISHED_VERSION="${old_version}" - JENKINS_FORCE_GET_TARS=y - - E2E_OPT="--check_version_skew=false" - E2E_UP="false" - E2E_TEST="true" - E2E_DOWN="false" - ;; - - step7) - # Run new e2es - JENKINS_PUBLISHED_VERSION="${new_version}" - JENKINS_FORCE_GET_TARS=y - - # TODO(15011): these really shouldn't be (very) version skewed, but - # because we have to get ci/latest again, it could get slightly out of - # whack. - E2E_OPT="--check_version_skew=false" - E2E_UP="false" - E2E_TEST="true" - E2E_DOWN="true" - ;; - esac -} - echo "--------------------------------------------------------------------------------" echo "Initial Environment:" printenv | sort echo "--------------------------------------------------------------------------------" -if [[ "${CIRCLECI:-}" == "true" ]]; then - JOB_NAME="circleci-${CIRCLE_PROJECT_USERNAME}-${CIRCLE_PROJECT_REPONAME}" - BUILD_NUMBER=${CIRCLE_BUILD_NUM} - WORKSPACE=`pwd` -else - # Jenkins? - export HOME=${WORKSPACE} # Nothing should want Jenkins $HOME -fi +# Nothing should want Jenkins $HOME +export HOME=${WORKSPACE} -# Additional parameters that are passed to hack/e2e.go -E2E_OPT=${E2E_OPT:-""} - -# Set environment variables shared for all of the GCE Jenkins projects. +# Set environment variables based on provider if [[ ${JOB_NAME} =~ ^kubernetes-.*-gce ]]; then - KUBERNETES_PROVIDER="gce" - : ${E2E_MIN_STARTUP_PODS:="1"} - : ${E2E_ZONE:="us-central1-f"} - : ${NUM_NODES_PARALLEL:="6"} # Number of nodes required to run all of the tests in parallel - + export KUBERNETES_PROVIDER="gce" + export E2E_MIN_STARTUP_PODS="1" + export KUBE_GCE_ZONE="us-central1-f" + export FAIL_ON_GCP_RESOURCE_LEAK="true" elif [[ ${JOB_NAME} =~ ^kubernetes-.*-gke ]]; then - KUBERNETES_PROVIDER="gke" - : ${E2E_ZONE:="us-central1-f"} + export KUBERNETES_PROVIDER="gke" + export ZONE="us-central1-f" # By default, GKE tests run against the GKE test endpoint using CI Cloud SDK. # Release jobs (e.g. prod, staging, and test) override these two variables. - : ${CLOUDSDK_BUCKET:="gs://cloud-sdk-build/testing/staging"} - : ${GKE_API_ENDPOINT:="https://test-container.sandbox.googleapis.com/"} + export CLOUDSDK_BUCKET="gs://cloud-sdk-build/testing/staging" + export CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER="https://test-container.sandbox.googleapis.com/" + export FAIL_ON_GCP_RESOURCE_LEAK="true" elif [[ ${JOB_NAME} =~ ^kubernetes-.*-aws ]]; then - KUBERNETES_PROVIDER="aws" - : ${E2E_MIN_STARTUP_PODS:="1"} - : ${E2E_ZONE:="us-east-1a"} - : ${NUM_NODES_PARALLEL:="6"} # Number of nodes required to run all of the tests in parallel + export KUBERNETES_PROVIDER="aws" + export E2E_MIN_STARTUP_PODS="1" + export KUBE_AWS_ZONE="us-west-2a" + export MASTER_SIZE="m3.medium" + export NODE_SIZE="m3.medium" + export NUM_NODES="3" fi -if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then - if [[ "${PERFORMANCE:-}" == "true" ]]; then - : ${MASTER_SIZE:="m3.xlarge"} - : ${NUM_NODES:="100"} - : ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Feature:Performance\]"} - else - : ${MASTER_SIZE:="m3.medium"} - : ${NODE_SIZE:="m3.medium"} - : ${NUM_NODES:="3"} +# Set environment variables based on soak jobs +if [[ ${JOB_NAME} =~ soak-weekly ]]; then + export FAIL_ON_GCP_RESOURCE_LEAK="false" + export E2E_TEST="false" + export E2E_DOWN="false" +elif [[ ${JOB_NAME} =~ soak-continuous ]]; then + export FAIL_ON_GCP_RESOURCE_LEAK="false" + export E2E_UP="false" + export E2E_DOWN="false" + # Clear out any orphaned namespaces in case previous run was interrupted. + export E2E_CLEAN_START="true" + # We should be testing the reliability of a long-running cluster. The + # [Disruptive] tests kill/restart components or nodes in the cluster, + # defeating the purpose of a soak cluster. (#15722) + export GINKGO_TEST_ARGS="--ginkgo.skip=\[Disruptive\]|\[Flaky\]|\[Feature:.+\]" +fi + +# Set environment variables based on upgrade jobs +if [[ ${JOB_NAME} =~ ^kubernetes-upgrade ]]; then + # Upgrade jobs bounce back and forth between versions; just force + # it to always get the tars of the version it wants to test. + export JENKINS_FORCE_GET_TARS="y" + export FAIL_ON_GCP_RESOURCE_LEAK="false" + if [[ "${KUBERNETES_PROVIDER}" == "gce" ]]; then + export NUM_NODES=5 + fi + if [[ "${JOB_NAME}" =~ step1 ]]; then + export E2E_TEST="false" + export E2E_DOWN="false" + elif [[ "${JOB_NAME}" =~ step2 ]]; then + export E2E_OPT="--check_version_skew=false" + export E2E_UP="false" + export E2E_DOWN="false" + export GINKGO_TEST_ARGS="--ginkgo.focus=Kubectl" + elif [[ "${JOB_NAME}" =~ step3 ]]; then + export E2E_OPT="--check_version_skew=false" + export E2E_UP="false" + export E2E_DOWN="false" + export GINKGO_TEST_ARGS="--ginkgo.focus=\[Feature:Upgrade\].*upgrade-master --upgrade-target=${new_version}" + elif [[ "${JOB_NAME}" =~ step4 ]]; then + export E2E_OPT="--check_version_skew=false" + export E2E_UP="false" + export E2E_DOWN="false" + elif [[ "${JOB_NAME}" =~ step5 ]]; then + export E2E_OPT="--check_version_skew=false" + export E2E_UP="false" + export E2E_DOWN="false" + export GINKGO_TEST_ARGS="--ginkgo.focus=\[Feature:Upgrade\].*upgrade-cluster --upgrade-target=${new_version}" + elif [[ "${JOB_NAME}" =~ step6 ]]; then + export E2E_OPT="--check_version_skew=false" + export E2E_UP="false" + export E2E_DOWN="false" + elif [[ "${JOB_NAME}" =~ step7 ]]; then + # TODO(15011): these really shouldn't be (very) version skewed, but + # because we have to get ci/latest again, it could get slightly out of + # whack. + export E2E_OPT="--check_version_skew=false" + export E2E_UP="false" fi fi -# CURRENT_RELEASE_PUBLISHED_VERSION is the JENKINS_PUBLISHED_VERSION for the -# release we are currently pointing our release testing infrastructure at. -# When 1.2.0-beta.0 comes out, e.g., this will become "ci/latest-1.2" -CURRENT_RELEASE_PUBLISHED_VERSION="ci/latest-1.1" - # Define environment variables based on the Jenkins project name. # NOTE: Not all jobs are defined here. The hack/jenkins/e2e.sh in master and # release branches defines relevant jobs for that particular version of @@ -221,462 +123,333 @@ case ${JOB_NAME} in # Runs a subset of tests on GCE in parallel. Run against all pending PRs. kubernetes-pull-build-test-e2e-gce) - : ${E2E_CLUSTER_NAME:="jnks-e2e-gce-${NODE_NAME}-${EXECUTOR_NUMBER}"} - : ${E2E_NETWORK:="e2e-gce-${NODE_NAME}-${EXECUTOR_NUMBER}"} - : ${GINKGO_PARALLEL:="y"} + # XXX Not a unique project + export E2E_NAME="e2e-gce-${NODE_NAME}-${EXECUTOR_NUMBER}" + export GINKGO_PARALLEL="y" # This list should match the list in kubernetes-e2e-gce. - : ${GINKGO_TEST_ARGS:="--ginkgo.skip=\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]"} - : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-gce-${NODE_NAME}-${EXECUTOR_NUMBER}"} - : ${PROJECT:="kubernetes-jenkins-pull"} + export GINKGO_TEST_ARGS="--ginkgo.skip=\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]" + export FAIL_ON_GCP_RESOURCE_LEAK="false" + export PROJECT="kubernetes-jenkins-pull" # Override GCE defaults - NUM_NODES=${NUM_NODES_PARALLEL} + export NUM_NODES="6" ;; # GCE core jobs # Runs all non-slow, non-serial, non-flaky, tests on GCE in parallel. kubernetes-e2e-gce) - : ${E2E_CLUSTER_NAME:="jenkins-gce-e2e"} - : ${E2E_PUBLISH_GREEN_VERSION:="true"} - : ${E2E_NETWORK:="e2e-gce"} + # This is the *only* job that should publish the last green version. + export E2E_PUBLISH_GREEN_VERSION="true" # This list should match the list in kubernetes-pull-build-test-e2e-gce. - : ${GINKGO_TEST_ARGS:="--ginkgo.skip=\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]"} - : ${GINKGO_PARALLEL:="y"} - : ${KUBE_GCE_INSTANCE_PREFIX="e2e-gce"} - : ${PROJECT:="k8s-jkns-e2e-gce"} - : ${FAIL_ON_GCP_RESOURCE_LEAK:="true"} + export GINKGO_TEST_ARGS="--ginkgo.skip=\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]" + export GINKGO_PARALLEL="y" + export PROJECT="k8s-jkns-e2e-gce" ;; # Runs slow tests on GCE, sequentially. kubernetes-e2e-gce-slow) - : ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-slow"} - : ${E2E_NETWORK:="e2e-slow"} - : ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Slow\] \ - --ginkgo.skip=\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]"} - : ${GINKGO_PARALLEL:="y"} - : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-slow"} - : ${PROJECT:="k8s-jkns-e2e-gce-slow"} - : ${FAIL_ON_GCP_RESOURCE_LEAK:="true"} + export GINKGO_TEST_ARGS="--ginkgo.focus=\[Slow\] \ + --ginkgo.skip=\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]" + export GINKGO_PARALLEL="y" + export PROJECT="k8s-jkns-e2e-gce-slow" ;; # Runs all non-flaky, non-slow tests on GCE, sequentially, # and in a multi-zone ("Ubernetes Lite") cluster. kubernetes-e2e-gce-ubernetes-lite) - : ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-ubelite"} - : ${E2E_PUBLISH_GREEN_VERSION:="true"} - : ${E2E_NETWORK:="e2e-gce-ubelite"} - : ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \ - ${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \ - ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ - ${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \ - )"} - : ${KUBE_GCE_INSTANCE_PREFIX="e2e-gce-ubelite"} - : ${PROJECT:="k8s-jkns-e2e-gce-ubelite"} - : ${ENABLE_DEPLOYMENTS:=true} - : ${FAIL_ON_GCP_RESOURCE_LEAK:="true"} - : ${E2E_MULTIZONE:="true"} - : ${E2E_ZONE:=""} - : ${E2E_ZONES:=us-central1-a us-central1-b us-central1-f} + export PROJECT="k8s-jkns-e2e-gce-ubelite" + export E2E_MULTIZONE="true" + export KUBE_GCE_ZONE="" + export E2E_ZONES="us-central1-a us-central1-b us-central1-f"} ;; # Run the [Serial], [Disruptive], and [Feature:Restart] tests on GCE. kubernetes-e2e-gce-serial) - : ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-serial"} - : ${E2E_NETWORK:="jenkins-gce-e2e-serial"} - : ${FAIL_ON_GCP_RESOURCE_LEAK:="true"} - : ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Serial\]|\[Disruptive\] \ - --ginkgo.skip=\[Flaky\]|\[Feature:.+\]"} - : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-serial"} - : ${PROJECT:="kubernetes-jkns-e2e-gce-serial"} + export GINKGO_TEST_ARGS="--ginkgo.focus=\[Serial\]|\[Disruptive\] \ + --ginkgo.skip=\[Flaky\]|\[Feature:.+\]" + export PROJECT="kubernetes-jkns-e2e-gce-serial" ;; # Runs only the ingress tests on GCE. kubernetes-e2e-gce-ingress) - : ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-ingress"} - : ${E2E_NETWORK:="e2e-ingress"} - : ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Feature:Ingress\]"} - : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-ingress"} - : ${FAIL_ON_GCP_RESOURCE_LEAK:="true"} + # XXX Not a unique project + export E2E_NAME="e2e-ingress" + export GINKGO_TEST_ARGS="--ginkgo.focus=\[Feature:Ingress\]" # TODO: Move this into a different project. Currently, since this test # shares resources with various other networking tests, so it's easier # to zero in on the source of a leak if it's run in isolation. - : ${PROJECT:="kubernetes-flannel"} + export PROJECT="kubernetes-flannel" ;; # Runs only the ingress tests on GKE. kubernetes-e2e-gke-ingress) - : ${E2E_CLUSTER_NAME:="jenkins-gke-e2e-ingress"} - : ${E2E_NETWORK:="e2e-gke-ingress"} - : ${E2E_SET_CLUSTER_API_VERSION:=y} - : ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Feature:Ingress\]"} - : ${FAIL_ON_GCP_RESOURCE_LEAK:="true"} - : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-gke-ingress"} + # XXX Not a unique project + export E2E_NAME="e2e-gke-ingress" + export GINKGO_TEST_ARGS="--ginkgo.focus=\[Feature:Ingress\]" # TODO: Move this into a different project. Currently, since this test # shares resources with various other networking tests, it's easier to # zero in on the source of a leak if it's run in isolation. - : ${PROJECT:="kubernetes-flannel"} + export PROJECT="kubernetes-flannel" ;; # Runs the flaky tests on GCE, sequentially. kubernetes-e2e-gce-flaky) - : ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-flaky"} - : ${E2E_NETWORK:="e2e-flaky"} - : ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Flaky\] \ - --ginkgo.skip=\[Feature:.+\]"} - : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-flaky"} - : ${PROJECT:="k8s-jkns-e2e-gce-flaky"} - : ${FAIL_ON_GCP_RESOURCE_LEAK:="true"} - : ${E2E_DOWN:="true"} + export GINKGO_TEST_ARGS="--ginkgo.focus=\[Flaky\] \ + --ginkgo.skip=\[Feature:.+\]" + export PROJECT="k8s-jkns-e2e-gce-flaky" ;; # GKE core jobs # Runs all non-slow, non-serial, non-flaky, tests on GKE in parallel. kubernetes-e2e-gke) - : ${E2E_CLUSTER_NAME:="jkns-gke-e2e-ci"} - : ${E2E_NETWORK:="e2e-gke-ci"} - : ${E2E_SET_CLUSTER_API_VERSION:=y} - : ${PROJECT:="k8s-jkns-e2e-gke-ci"} - : ${FAIL_ON_GCP_RESOURCE_LEAK:="true"} - : ${GINKGO_TEST_ARGS:="--ginkgo.skip=\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]"} - : ${GINKGO_PARALLEL:="y"} + export PROJECT="k8s-jkns-e2e-gke-ci" + export GINKGO_TEST_ARGS="--ginkgo.skip=\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]" + export GINKGO_PARALLEL="y" ;; kubernetes-e2e-gke-slow) - : ${E2E_CLUSTER_NAME:="jkns-gke-e2e-slow"} - : ${E2E_NETWORK:="e2e-gke-slow"} - : ${E2E_SET_CLUSTER_API_VERSION:=y} - : ${PROJECT:="k8s-jkns-e2e-gke-slow"} - : ${FAIL_ON_GCP_RESOURCE_LEAK:="true"} - : ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Slow\] \ - --ginkgo.skip=\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]"} - : ${GINKGO_PARALLEL:="y"} + export PROJECT="k8s-jkns-e2e-gke-slow" + export GINKGO_TEST_ARGS="--ginkgo.focus=\[Slow\] \ + --ginkgo.skip=\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]" + export GINKGO_PARALLEL="y" ;; # Run the [Serial], [Disruptive], and [Feature:Restart] tests on GKE. kubernetes-e2e-gke-serial) - : ${E2E_CLUSTER_NAME:="jenkins-gke-e2e-serial"} - : ${E2E_NETWORK:="jenkins-gke-e2e-serial"} - : ${E2E_SET_CLUSTER_API_VERSION:=y} - : ${FAIL_ON_GCP_RESOURCE_LEAK:="true"} - : ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Serial\]|\[Disruptive\] \ - --ginkgo.skip=\[Flaky\]|\[Feature:.+\]"} - : ${PROJECT:="jenkins-gke-e2e-serial"} + export GINKGO_TEST_ARGS="--ginkgo.focus=\[Serial\]|\[Disruptive\] \ + --ginkgo.skip=\[Flaky\]|\[Feature:.+\]" + export PROJECT="jenkins-gke-e2e-serial" ;; kubernetes-e2e-gke-flaky) - : ${E2E_CLUSTER_NAME:="kubernetes-gke-e2e-flaky"} - : ${E2E_NETWORK:="gke-e2e-flaky"} - : ${E2E_SET_CLUSTER_API_VERSION:=y} - : ${PROJECT:="k8s-jkns-e2e-gke-ci-flaky"} - : ${FAIL_ON_GCP_RESOURCE_LEAK:="true"} - : ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Flaky\] \ - --ginkgo.skip=\[Feature:.+\]"} + export PROJECT="k8s-jkns-e2e-gke-ci-flaky" + export GINKGO_TEST_ARGS="--ginkgo.focus=\[Flaky\] \ + --ginkgo.skip=\[Feature:.+\]" ;; # AWS core jobs # Runs all non-flaky, non-slow tests on AWS, sequentially. kubernetes-e2e-aws) - : ${E2E_PUBLISH_GREEN_VERSION:=true} - : ${E2E_CLUSTER_NAME:="jenkins-aws-e2e"} - : ${E2E_ZONE:="us-west-2a"} - : ${ZONE:="us-west-2a"} - : ${E2E_NETWORK:="e2e-aws"} - : ${GINKGO_TEST_ARGS:="--ginkgo.skip=\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]"} - : ${GINKGO_PARALLEL:="y"} - : ${KUBE_GCE_INSTANCE_PREFIX="e2e-aws"} - : ${PROJECT:="k8s-jkns-e2e-aws"} - : ${AWS_CONFIG_FILE:='/var/lib/jenkins/.aws/credentials'} - : ${AWS_SSH_KEY:='/var/lib/jenkins/.ssh/kube_aws_rsa'} - : ${KUBE_SSH_USER:='ubuntu'} + export GINKGO_TEST_ARGS="--ginkgo.skip=\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]" + export GINKGO_PARALLEL="y" + export PROJECT="k8s-jkns-e2e-aws" + export AWS_CONFIG_FILE='/var/lib/jenkins/.aws/credentials' + export AWS_SSH_KEY='/var/lib/jenkins/.ssh/kube_aws_rsa' + export KUBE_SSH_USER='ubuntu' # This is needed to be able to create PD from the e2e test - : ${AWS_SHARED_CREDENTIALS_FILE:='/var/lib/jenkins/.aws/credentials'} + export AWS_SHARED_CREDENTIALS_FILE='/var/lib/jenkins/.aws/credentials' ;; # Feature jobs # Runs only the reboot tests on GCE. kubernetes-e2e-gce-reboot) - : ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-reboot"} - : ${E2E_NETWORK:="e2e-reboot"} - : ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Feature:Reboot\]"} - : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-reboot"} - : ${PROJECT:="kubernetes-jenkins"} + export GINKGO_TEST_ARGS="--ginkgo.focus=\[Feature:Reboot\]" + export PROJECT="k8s-jkns-e2e-gce-ci-reboot" ;; kubernetes-e2e-gke-reboot) - : ${E2E_CLUSTER_NAME:="jkns-gke-e2e-ci-reboot"} - : ${E2E_NETWORK:="e2e-gke-ci-reboot"} - : ${E2E_SET_CLUSTER_API_VERSION:=y} - : ${PROJECT:="k8s-jkns-e2e-gke-ci-reboot"} - : ${FAIL_ON_GCP_RESOURCE_LEAK:="true"} - : ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Feature:Reboot\]"} + export PROJECT="k8s-jkns-e2e-gke-ci-reboot" + export GINKGO_TEST_ARGS="--ginkgo.focus=\[Feature:Reboot\]" ;; # Runs only the examples tests on GCE. kubernetes-e2e-gce-examples) - : ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-examples"} - : ${E2E_NETWORK:="e2e-examples"} - : ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Feature:Example\]"} - : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-examples"} - : ${PROJECT:="kubernetes-jenkins"} + export GINKGO_TEST_ARGS="--ginkgo.focus=\[Feature:Example\]" + export PROJECT="k8s-jkns-e2e-examples" ;; # Runs only the autoscaling tests on GCE. kubernetes-e2e-gce-autoscaling) - : ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-autoscaling"} - : ${E2E_NETWORK:="e2e-autoscaling"} - : ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Feature:ClusterSizeAutoscaling\]|\[Feature:InitialResources\] \ - --ginkgo.skip=\[Flaky\]"} - : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-autoscaling"} - : ${PROJECT:="k8s-jnks-e2e-gce-autoscaling"} - : ${FAIL_ON_GCP_RESOURCE_LEAK:="true"} + export GINKGO_TEST_ARGS="--ginkgo.focus=\[Feature:ClusterSizeAutoscaling\]|\[Feature:InitialResources\] \ + --ginkgo.skip=\[Flaky\]" + export PROJECT="k8s-jnks-e2e-gce-autoscaling" # Override GCE default for cluster size autoscaling purposes. - ENABLE_CLUSTER_MONITORING="googleinfluxdb" - ADMISSION_CONTROL="NamespaceLifecycle,InitialResources,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" + export KUBE_ENABLE_CLUSTER_MONITORING="googleinfluxdb" + export KUBE_ADMISSION_CONTROL="NamespaceLifecycle,InitialResources,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" ;; # Runs the performance/scalability tests on GCE. A larger cluster is used. kubernetes-e2e-gce-scalability) - : ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-scalability"} - : ${E2E_NETWORK:="e2e-scalability"} - : ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Feature:Performance\] \ - --gather-resource-usage=true \ - --gather-metrics-at-teardown=true \ - --gather-logs-sizes=true \ - --output-print-type=json"} - : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-scalability"} - : ${PROJECT:="kubernetes-jenkins"} + # XXX Not a unique project + export E2E_NAME="e2e-scalability" + export GINKGO_TEST_ARGS="--ginkgo.focus=\[Feature:Performance\] \ + --gather-resource-usage=true \ + --gather-metrics-at-teardown=true \ + --gather-logs-sizes=true \ + --output-print-type=json" + export PROJECT="kubernetes-jenkins" + export FAIL_ON_GCP_RESOURCE_LEAK="false" # Override GCE defaults. - MASTER_SIZE="n1-standard-4" - NODE_SIZE="n1-standard-2" - NODE_DISK_SIZE="50GB" - NUM_NODES="100" + export MASTER_SIZE="n1-standard-4" + export NODE_SIZE="n1-standard-2" + export NODE_DISK_SIZE="50GB" + export NUM_NODES="100" # Reduce logs verbosity - TEST_CLUSTER_LOG_LEVEL="--v=2" + export TEST_CLUSTER_LOG_LEVEL="--v=2" # TODO: Remove when we figure out the reason for ocassional failures #19048 - KUBELET_TEST_LOG_LEVEL="--v=4" + export KUBELET_TEST_LOG_LEVEL="--v=4" # Increase resync period to simulate production - TEST_CLUSTER_RESYNC_PERIOD="--min-resync-period=12h" + export TEST_CLUSTER_RESYNC_PERIOD="--min-resync-period=12h" ;; # Runs e2e on GCE with flannel and VXLAN. kubernetes-e2e-gce-flannel) - : ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-flannel"} - : ${E2E_PUBLISH_GREEN_VERSION:="true"} - : ${E2E_NETWORK:="e2e-gce-flannel"} - : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-flannel"} - : ${PROJECT:="kubernetes-flannel"} + # XXX Not a unique project + export E2E_NAME="e2e-flannel" + export PROJECT="kubernetes-flannel" + export FAIL_ON_GCP_RESOURCE_LEAK="false" # Override GCE defaults. - NETWORK_PROVIDER="flannel" + export NETWORK_PROVIDER="flannel" ;; # Runs the performance/scalability test on huge 1000-node cluster on GCE. # Flannel is used as network provider. # Allows a couple of nodes to be NotReady during startup kubernetes-e2e-gce-enormous-cluster) - : ${E2E_CLUSTER_NAME:="jenkins-gce-enormous-cluster"} - : ${E2E_NETWORK:="e2e-enormous-cluster"} + # XXX Not a unique project + export E2E_NAME="e2e-enormous-cluster" # TODO: Currently run only density test. # Once this is stable, run the whole [Performance] suite. - : ${GINKGO_TEST_ARGS:="--ginkgo.focus=starting\s30\spods\sper\snode"} - : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-enormous-cluster"} - : ${PROJECT:="kubernetes-scale"} + export GINKGO_TEST_ARGS="--ginkgo.focus=starting\s30\spods\sper\snode" + export PROJECT="kubernetes-scale" + export FAIL_ON_GCP_RESOURCE_LEAK="false" # Override GCE defaults. - NETWORK_PROVIDER="flannel" + export NETWORK_PROVIDER="flannel" # Temporarily switch of Heapster, as this will not schedule anywhere. # TODO: Think of a solution to enable it. - ENABLE_CLUSTER_MONITORING="none" - E2E_ZONE="asia-east1-a" - MASTER_SIZE="n1-standard-32" - NODE_SIZE="n1-standard-1" - NODE_DISK_SIZE="50GB" - NUM_NODES="1000" - ALLOWED_NOTREADY_NODES="2" - EXIT_ON_WEAK_ERROR="false" + export KUBE_ENABLE_CLUSTER_MONITORING="none" + export KUBE_GCE_ZONE="asia-east1-a" + export MASTER_SIZE="n1-standard-32" + export NODE_SIZE="n1-standard-1" + export NODE_DISK_SIZE="50GB" + export NUM_NODES="1000" + export ALLOWED_NOTREADY_NODES="2" + export EXIT_ON_WEAK_ERROR="false" # Reduce logs verbosity - TEST_CLUSTER_LOG_LEVEL="--v=1" + export TEST_CLUSTER_LOG_LEVEL="--v=1" # Increase resync period to simulate production - TEST_CLUSTER_RESYNC_PERIOD="--min-resync-period=12h" + export TEST_CLUSTER_RESYNC_PERIOD="--min-resync-period=12h" ;; # Starts and tears down 1000-node cluster on GCE using flannel networking # Requires all 1000 nodes to come up. kubernetes-e2e-gce-enormous-startup) - : ${E2E_CLUSTER_NAME:="jenkins-gce-enormous-startup"} + # XXX Not a unique project # TODO: increase a quota for networks in kubernetes-scale and move this test to its own network - : ${E2E_NETWORK:="e2e-enormous-cluster"} - : ${E2E_TEST:="false"} - : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-enormous-startup"} - : ${PROJECT:="kubernetes-scale"} + export E2E_NAME="e2e-enormous-cluster" + export E2E_TEST="false" + export PROJECT="kubernetes-scale" + export FAIL_ON_GCP_RESOURCE_LEAK="false" # Override GCE defaults. - NETWORK_PROVIDER="flannel" + export NETWORK_PROVIDER="flannel" # Temporarily switch of Heapster, as this will not schedule anywhere. # TODO: Think of a solution to enable it. - ENABLE_CLUSTER_MONITORING="none" - E2E_ZONE="us-east1-b" - MASTER_SIZE="n1-standard-32" - NODE_SIZE="n1-standard-1" - NODE_DISK_SIZE="50GB" - NUM_NODES="1000" + export KUBE_ENABLE_CLUSTER_MONITORING="none" + export KUBE_GCE_ZONE="us-east1-b" + export MASTER_SIZE="n1-standard-32" + export NODE_SIZE="n1-standard-1" + export NODE_DISK_SIZE="50GB" + export NUM_NODES="1000" # Reduce logs verbosity - TEST_CLUSTER_LOG_LEVEL="--v=1" + export TEST_CLUSTER_LOG_LEVEL="--v=1" # Increase resync period to simulate production - TEST_CLUSTER_RESYNC_PERIOD="--min-resync-period=12h" + export TEST_CLUSTER_RESYNC_PERIOD="--min-resync-period=12h" ;; # Run Kubemark test on a fake 100 node cluster to have a comparison # to the real results from scalability suite kubernetes-kubemark-gce) - : ${E2E_CLUSTER_NAME:="kubernetes-kubemark"} - : ${E2E_NETWORK:="kubernetes-kubemark"} - : ${PROJECT:="k8s-jenkins-kubemark"} - : ${E2E_UP:="true"} - : ${E2E_DOWN:="true"} - : ${E2E_TEST:="false"} - : ${USE_KUBEMARK:="true"} - : ${KUBEMARK_TESTS:="\[Feature:Performance\]"} + export PROJECT="k8s-jenkins-kubemark" + export E2E_TEST="false" + export USE_KUBEMARK="true" + export KUBEMARK_TESTS="\[Feature:Performance\]" # Override defaults to be indpendent from GCE defaults and set kubemark parameters - KUBE_GCE_INSTANCE_PREFIX="kubemark100" - NUM_NODES="10" - MASTER_SIZE="n1-standard-2" - NODE_SIZE="n1-standard-1" - E2E_ZONE="us-central1-b" - KUBEMARK_MASTER_SIZE="n1-standard-4" - KUBEMARK_NUM_NODES="100" + export NUM_NODES="10" + export MASTER_SIZE="n1-standard-2" + export NODE_SIZE="n1-standard-1" + export KUBE_GCE_ZONE="us-central1-b" + export KUBEMARK_MASTER_SIZE="n1-standard-4" + export KUBEMARK_NUM_NODES="100" ;; # Run Kubemark test on a fake 500 node cluster to test for regressions on # bigger clusters kubernetes-kubemark-500-gce) - : ${E2E_CLUSTER_NAME:="kubernetes-kubemark-500"} - : ${E2E_NETWORK:="kubernetes-kubemark-500"} - : ${PROJECT:="kubernetes-scale"} - : ${E2E_UP:="true"} - : ${E2E_DOWN:="true"} - : ${E2E_TEST:="false"} - : ${USE_KUBEMARK:="true"} - : ${KUBEMARK_TESTS:="\[Feature:Performance\]"} + # XXX Not a unique project + export E2E_NAME="kubemark-500" + export PROJECT="kubernetes-scale" + export E2E_TEST="false" + export USE_KUBEMARK="true" + export KUBEMARK_TESTS="\[Feature:Performance\]" + export FAIL_ON_GCP_RESOURCE_LEAK="false" # Override defaults to be indpendent from GCE defaults and set kubemark parameters - NUM_NODES="6" - MASTER_SIZE="n1-standard-4" - NODE_SIZE="n1-standard-8" - KUBE_GCE_INSTANCE_PREFIX="kubemark500" - E2E_ZONE="us-east1-b" - KUBEMARK_MASTER_SIZE="n1-standard-16" - KUBEMARK_NUM_NODES="500" + export NUM_NODES="6" + export MASTER_SIZE="n1-standard-4" + export NODE_SIZE="n1-standard-8" + export KUBE_GCE_ZONE="us-east1-b" + export KUBEMARK_MASTER_SIZE="n1-standard-16" + export KUBEMARK_NUM_NODES="500" ;; # Run big Kubemark test, this currently means a 1000 node cluster and 16 core master kubernetes-kubemark-gce-scale) - : ${E2E_CLUSTER_NAME:="kubernetes-kubemark-scale"} - : ${E2E_NETWORK:="kubernetes-kubemark-scale"} - : ${PROJECT:="kubernetes-scale"} - : ${E2E_UP:="true"} - : ${E2E_DOWN:="true"} - : ${E2E_TEST:="false"} - : ${USE_KUBEMARK:="true"} - : ${KUBEMARK_TESTS:="\[Feature:Performance\]"} + # XXX Not a unique project + export E2E_NAME="kubemark-1000" + export PROJECT="kubernetes-scale" + export E2E_TEST="false" + export USE_KUBEMARK="true" + export KUBEMARK_TESTS="\[Feature:Performance\]" + export FAIL_ON_GCP_RESOURCE_LEAK="false" # Override defaults to be indpendent from GCE defaults and set kubemark parameters # We need 11 so that we won't hit max-pods limit (set to 100). TODO: do it in a nicer way. - NUM_NODES="11" - MASTER_SIZE="n1-standard-4" - NODE_SIZE="n1-standard-8" # Note: can fit about 17 hollow nodes per core - # so NUM_NODES x cores_per_node should - # be set accordingly. - KUBE_GCE_INSTANCE_PREFIX="kubemark1000" - E2E_ZONE="us-east1-b" - KUBEMARK_MASTER_SIZE="n1-standard-16" - KUBEMARK_NUM_NODES="1000" + export NUM_NODES="11" + export MASTER_SIZE="n1-standard-4" + # Note: can fit about 17 hollow nodes per core so NUM_NODES x + # cores_per_node should be set accordingly. + export NODE_SIZE="n1-standard-8" + export KUBEMARK_MASTER_SIZE="n1-standard-16" + export KUBEMARK_NUM_NODES="1000" + export KUBE_GCE_ZONE="us-east1-b" ;; # Soak jobs # Sets up the GCE soak cluster weekly using the latest CI release. kubernetes-soak-weekly-deploy-gce) - : ${E2E_CLUSTER_NAME:="gce-soak-weekly"} - : ${E2E_DOWN:="false"} - : ${E2E_NETWORK:="gce-soak-weekly"} - : ${E2E_TEST:="false"} - : ${E2E_UP:="true"} - : ${KUBE_GCE_INSTANCE_PREFIX:="gce-soak-weekly"} - : ${HAIRPIN_MODE:="false"} - : ${PROJECT:="kubernetes-jenkins"} + export HAIRPIN_MODE="false" + export PROJECT="k8s-jkns-gce-soak" ;; # Runs tests on GCE soak cluster. kubernetes-soak-continuous-e2e-gce) - : ${E2E_CLUSTER_NAME:="gce-soak-weekly"} - : ${E2E_DOWN:="false"} - : ${E2E_NETWORK:="gce-soak-weekly"} - : ${E2E_UP:="false"} - # Clear out any orphaned namespaces in case previous run was interrupted. - : ${E2E_CLEAN_START:="true"} - # We should be testing the reliability of a long-running cluster. The - # [Disruptive] tests kill/restart components or nodes in the cluster, - # defeating the purpose of a soak cluster. (#15722) - : ${GINKGO_TEST_ARGS:="--ginkgo.skip=\[Disruptive\]|\[Flaky\]|\[Feature:.+\]"} - : ${KUBE_GCE_INSTANCE_PREFIX:="gce-soak-weekly"} - : ${HAIRPIN_MODE:="false"} - : ${PROJECT:="kubernetes-jenkins"} + export HAIRPIN_MODE="false" + export PROJECT="k8s-jkns-gce-soak" ;; # Clone of kubernetes-soak-weekly-deploy-gce. Issue #20832. kubernetes-soak-weekly-deploy-gce-2) - : ${E2E_CLUSTER_NAME:="gce-soak-weekly-2"} - : ${E2E_DOWN:="false"} - : ${E2E_NETWORK:="gce-soak-weekly-2"} - : ${E2E_TEST:="false"} - : ${E2E_UP:="true"} - : ${KUBE_GCE_INSTANCE_PREFIX:="gce-soak-weekly-2"} - : ${PROJECT:="kubernetes-jenkins"} + export PROJECT="k8s-jkns-gce-soak-2" ;; # Clone of kubernetes-soak-continuous-e2e-gce. Issue #20832. kubernetes-soak-continuous-e2e-gce-2) - : ${E2E_CLUSTER_NAME:="gce-soak-weekly-2"} - : ${E2E_DOWN:="false"} - : ${E2E_NETWORK:="gce-soak-weekly-2"} - : ${E2E_UP:="false"} - # Clear out any orphaned namespaces in case previous run was interrupted. - : ${E2E_CLEAN_START:="true"} - # We should be testing the reliability of a long-running cluster. The - # [Disruptive] tests kill/restart components or nodes in the cluster, - # defeating the purpose of a soak cluster. (#15722) - : ${GINKGO_TEST_ARGS:="--ginkgo.skip=\[Disruptive\]|\[Flaky\]|\[Feature:.+\]"} - : ${KUBE_GCE_INSTANCE_PREFIX:="gce-soak-weekly-2"} - : ${PROJECT:="kubernetes-jenkins"} + export PROJECT="k8s-jkns-gce-soak-2" ;; # Sets up the GKE soak cluster weekly using the latest CI release. kubernetes-soak-weekly-deploy-gke) - : ${E2E_CLUSTER_NAME:="jenkins-gke-soak-weekly"} - : ${E2E_DOWN:="false"} - : ${E2E_NETWORK:="gke-soak-weekly"} - : ${E2E_SET_CLUSTER_API_VERSION:=y} - : ${JENKINS_PUBLISHED_VERSION:="ci/latest"} - : ${E2E_TEST:="false"} - : ${E2E_UP:="true"} - : ${PROJECT:="kubernetes-jenkins"} + export PROJECT="k8s-jkns-gke-soak" # Need at least n1-standard-2 nodes to run kubelet_perf tests - NODE_SIZE="n1-standard-2" + export MACHINE_TYPE="n1-standard-2" ;; # Runs tests on GKE soak cluster. kubernetes-soak-continuous-e2e-gke) - : ${E2E_CLUSTER_NAME:="jenkins-gke-soak-weekly"} - : ${E2E_NETWORK:="gke-soak-weekly"} - : ${E2E_DOWN:="false"} - : ${E2E_UP:="false"} - # Clear out any orphaned namespaces in case previous run was interrupted. - : ${E2E_CLEAN_START:="true"} - : ${PROJECT:="kubernetes-jenkins"} - : ${E2E_OPT:="--check_version_skew=false"} - # We should be testing the reliability of a long-running cluster. The - # [Disruptive] tests kill/restart components or nodes in the cluster, - # defeating the purpose of a soak cluster. (#15722) - : ${GINKGO_TEST_ARGS:="--ginkgo.skip=\[Disruptive\]|\[Flaky\]|\[Feature:.+\]"} + export PROJECT="k8s-jkns-gke-soak" + export E2E_OPT="--check_version_skew=false" ;; # Upgrade jobs @@ -688,19 +461,23 @@ case ${JOB_NAME} in # Configurations for step1, step4, and step6 live in the release-1.0 branch. kubernetes-upgrade-gke-1.0-master-step2-kubectl-e2e-new) - configure_upgrade_step 'configured-in-release-1.0' 'ci/latest' 'upgrade-gke-1-0-master' 'kubernetes-jenkins-gke-upgrade' + export E2E_NAME="upgrade-gke-1-0-master" + export PROJECT="kubernetes-jenkins-gke-upgrade" ;; kubernetes-upgrade-gke-1.0-master-step3-upgrade-master) - configure_upgrade_step 'configured-in-release-1.0' 'ci/latest' 'upgrade-gke-1-0-master' 'kubernetes-jenkins-gke-upgrade' + export E2E_NAME="upgrade-gke-1-0-master" + export PROJECT="kubernetes-jenkins-gke-upgrade" ;; kubernetes-upgrade-gke-1.0-master-step5-upgrade-cluster) - configure_upgrade_step 'configured-in-release-1.0' 'ci/latest' 'upgrade-gke-1-0-master' 'kubernetes-jenkins-gke-upgrade' + export E2E_NAME="upgrade-gke-1-0-master" + export PROJECT="kubernetes-jenkins-gke-upgrade" ;; kubernetes-upgrade-gke-1.0-master-step7-e2e-new) - configure_upgrade_step 'configured-in-release-1.0' 'ci/latest' 'upgrade-gke-1-0-master' 'kubernetes-jenkins-gke-upgrade' + export E2E_NAME="upgrade-gke-1-0-master" + export PROJECT="kubernetes-jenkins-gke-upgrade" ;; # kubernetes-upgrade-gke-1.1-master @@ -710,19 +487,23 @@ case ${JOB_NAME} in # Configurations for step1, step4, and step6 live in the release-1.1 branch. kubernetes-upgrade-gke-1.1-master-step2-kubectl-e2e-new) - configure_upgrade_step 'configured-in-release-1.1' 'ci/latest' 'upgrade-gke-1-1-master' 'kubernetes-jenkins-gke-upgrade' + export E2E_NAME="upgrade-gke-1-1-master" + export PROJECT="kubernetes-jenkins-gke-upgrade" ;; kubernetes-upgrade-gke-1.1-master-step3-upgrade-master) - configure_upgrade_step 'configured-in-release-1.1' 'ci/latest' 'upgrade-gke-1-1-master' 'kubernetes-jenkins-gke-upgrade' + export E2E_NAME="upgrade-gke-1-1-master" + export PROJECT="kubernetes-jenkins-gke-upgrade" ;; kubernetes-upgrade-gke-1.1-master-step5-upgrade-cluster) - configure_upgrade_step 'configured-in-release-1.1' 'ci/latest' 'upgrade-gke-1-1-master' 'kubernetes-jenkins-gke-upgrade' + export E2E_NAME="upgrade-gke-1-1-master" + export PROJECT="kubernetes-jenkins-gke-upgrade" ;; kubernetes-upgrade-gke-1.1-master-step7-e2e-new) - configure_upgrade_step 'configured-in-release-1.1' 'ci/latest' 'upgrade-gke-1-1-master' 'kubernetes-jenkins-gke-upgrade' + export E2E_NAME="upgrade-gke-1-1-master" + export PROJECT="kubernetes-jenkins-gke-upgrade" ;; # kubernetes-upgrade-gce-1.1-master @@ -732,104 +513,49 @@ case ${JOB_NAME} in # Configurations for step1, step4, and step6 live in the release-1.1 branch. kubernetes-upgrade-gce-1.1-master-step2-kubectl-e2e-new) - configure_upgrade_step 'configured-in-release-1.1' 'ci/latest' 'upgrade-gce-1-1-master' 'k8s-jkns-gce-upgrade' + export E2E_NAME="upgrade-gce-1-1-master" + export PROJECT="k8s-jkns-gce-upgrade" ;; kubernetes-upgrade-gce-1.1-master-step3-upgrade-master) - configure_upgrade_step 'configured-in-release-1.1' 'ci/latest' 'upgrade-gce-1-1-master' 'k8s-jkns-gce-upgrade' + export E2E_NAME="upgrade-gce-1-1-master" + export PROJECT="k8s-jkns-gce-upgrade" ;; kubernetes-upgrade-gce-1.1-master-step5-upgrade-cluster) - configure_upgrade_step 'configured-in-release-1.1' 'ci/latest' 'upgrade-gce-1-1-master' 'k8s-jkns-gce-upgrade' + export E2E_NAME="upgrade-gce-1-1-master" + export PROJECT="k8s-jkns-gce-upgrade" ;; kubernetes-upgrade-gce-1.1-master-step7-e2e-new) - configure_upgrade_step 'configured-in-release-1.1' 'ci/latest' 'upgrade-gce-1-1-master' 'k8s-jkns-gce-upgrade' + export E2E_NAME="upgrade-gce-1-1-master" + export PROJECT="k8s-jkns-gce-upgrade" ;; esac +# Assume we're upping, testing, and downing a cluster +export E2E_UP="${E2E_UP:-true}" +export E2E_TEST="${E2E_TEST:-true}" +export E2E_DOWN="${E2E_DOWN:-true}" + # Skip gcloud update checking export CLOUDSDK_COMPONENT_MANAGER_DISABLE_UPDATE_CHECK=true # AWS variables -export KUBE_AWS_INSTANCE_PREFIX=${E2E_CLUSTER_NAME} -export KUBE_AWS_ZONE=${E2E_ZONE} -export AWS_CONFIG_FILE=${AWS_CONFIG_FILE:-} -export AWS_SSH_KEY=${AWS_SSH_KEY:-} -export KUBE_SSH_USER=${KUBE_SSH_USER:-} -export AWS_SHARED_CREDENTIALS_FILE=${AWS_SHARED_CREDENTIALS_FILE:-} +export KUBE_AWS_INSTANCE_PREFIX=${E2E_NAME:-'jenkins-e2e'} # GCE variables -export INSTANCE_PREFIX=${E2E_CLUSTER_NAME} -export KUBE_GCE_ZONE=${E2E_ZONE} -export MULTIZONE=${E2E_MULTIZONE:-} # for building multi-zone Ubernetes Lite clusters -export KUBE_GCE_NETWORK=${E2E_NETWORK} -export KUBE_GCE_INSTANCE_PREFIX=${KUBE_GCE_INSTANCE_PREFIX:-} -export KUBE_GCE_NODE_PROJECT=${KUBE_GCE_NODE_PROJECT:-} -export KUBE_GCE_NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-} -export KUBE_OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-} +export INSTANCE_PREFIX=${E2E_NAME:-'jenkins-e2e'} +export KUBE_GCE_NETWORK=${E2E_NAME:-'jenkins-e2e'} +export KUBE_GCE_INSTANCE_PREFIX=${E2E_NAME:-'jenkins-e2e'} export GCE_SERVICE_ACCOUNT=$(gcloud auth list 2> /dev/null | grep active | cut -f3 -d' ') -export FAIL_ON_GCP_RESOURCE_LEAK="${FAIL_ON_GCP_RESOURCE_LEAK:-false}" -export ALLOWED_NOTREADY_NODES=${ALLOWED_NOTREADY_NODES:-} -export EXIT_ON_WEAK_ERROR=${EXIT_ON_WEAK_ERROR:-} -export HAIRPIN_MODE=${HAIRPIN_MODE:-} # GKE variables -export CLUSTER_NAME=${E2E_CLUSTER_NAME} -export ZONE=${E2E_ZONE} -export KUBE_GKE_NETWORK=${E2E_NETWORK} -export E2E_SET_CLUSTER_API_VERSION=${E2E_SET_CLUSTER_API_VERSION:-} -export CMD_GROUP=${CMD_GROUP:-} -export MACHINE_TYPE=${NODE_SIZE:-} # GKE scripts use MACHINE_TYPE for the node vm size -export CLOUDSDK_BUCKET="${CLOUDSDK_BUCKET:-}" +export CLUSTER_NAME=${E2E_NAME:-'jenkins-e2e'} +export KUBE_GKE_NETWORK=${E2E_NAME:-'jenkins-e2e'} -if [[ ! -z "${GKE_API_ENDPOINT:-}" ]]; then - export CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER=${GKE_API_ENDPOINT} -fi - -# Shared cluster variables -export E2E_MIN_STARTUP_PODS=${E2E_MIN_STARTUP_PODS:-} -export KUBE_ENABLE_CLUSTER_MONITORING=${ENABLE_CLUSTER_MONITORING:-} -export KUBE_ENABLE_CLUSTER_REGISTRY=${ENABLE_CLUSTER_REGISTRY:-} -export KUBE_ENABLE_HORIZONTAL_POD_AUTOSCALER=${ENABLE_HORIZONTAL_POD_AUTOSCALER:-} -export KUBE_ENABLE_DEPLOYMENTS=${ENABLE_DEPLOYMENTS:-} -export KUBE_ENABLE_EXPERIMENTAL_API=${ENABLE_EXPERIMENTAL_API:-} -export MASTER_SIZE=${MASTER_SIZE:-} -export NODE_SIZE=${NODE_SIZE:-} -export NODE_DISK_SIZE=${NODE_DISK_SIZE:-} -export NUM_NODES=${NUM_NODES:-} -export TEST_CLUSTER_LOG_LEVEL=${TEST_CLUSTER_LOG_LEVEL:-} -export KUBELET_TEST_LOG_LEVEL=${KUBELET_TEST_LOG_LEVEL:-} -export TEST_CLUSTER_RESYNC_PERIOD=${TEST_CLUSTER_RESYNC_PERIOD:-} -export PROJECT=${PROJECT:-} -export NETWORK_PROVIDER=${NETWORK_PROVIDER:-} -export JENKINS_PUBLISHED_VERSION=${JENKINS_PUBLISHED_VERSION:-'ci/latest'} - -export KUBE_ADMISSION_CONTROL=${ADMISSION_CONTROL:-} - -export KUBERNETES_PROVIDER=${KUBERNETES_PROVIDER} +# Get golang into our PATH so we can run e2e.go export PATH=${PATH}:/usr/local/go/bin -export KUBE_SKIP_UPDATE=y -export KUBE_SKIP_CONFIRMATIONS=y - -# Kubemark -export USE_KUBEMARK="${USE_KUBEMARK:-false}" -export KUBEMARK_TESTS="${KUBEMARK_TESTS:-}" -export KUBEMARK_MASTER_SIZE="${KUBEMARK_MASTER_SIZE:-$MASTER_SIZE}" -export KUBEMARK_NUM_NODES="${KUBEMARK_NUM_NODES:-$NUM_NODES}" - -# E2E Control Variables -export E2E_OPT="${E2E_OPT:-}" -export E2E_UP="${E2E_UP:-true}" -export E2E_TEST="${E2E_TEST:-true}" -export E2E_DOWN="${E2E_DOWN:-true}" -export E2E_CLEAN_START="${E2E_CLEAN_START:-}" -export E2E_PUBLISH_GREEN_VERSION="${E2E_PUBLISH_GREEN_VERSION:-false}" -export E2E_ZONES=${E2E_ZONES:-} # for building multi-zone Ubernetes Lite clusters -# Used by hack/ginkgo-e2e.sh to enable ginkgo's parallel test runner. -export GINKGO_PARALLEL=${GINKGO_PARALLEL:-} -export GINKGO_PARALLEL_NODES=${GINKGO_PARALLEL_NODES:-} -export GINKGO_TEST_ARGS="${GINKGO_TEST_ARGS:-}" # If we are on PR Jenkins merging into master, use the local e2e.sh. Otherwise, use the latest on github. if [[ "${ghprbTargetBranch:-}" == "master" ]]; then