diff --git a/cluster/gke/config-common.sh b/cluster/gke/config-common.sh index 18015ec92d3..51982057f19 100644 --- a/cluster/gke/config-common.sh +++ b/cluster/gke/config-common.sh @@ -18,7 +18,13 @@ # Specifically, the following environment variables are assumed: # - CLUSTER_NAME (the name of the cluster) -ZONE="${ZONE:-us-central1-f}" +if [ ! -z "${REGION:-}" ] && [ ! -z "${ZONE:-}" ]; then + echo "Only one of REGION and ZONE can be set." >&2 + exit 1 +fi +if [ -z "${REGION:-}" ]; then + ZONE="${ZONE:-us-central1-f}" +fi NUM_NODES="${NUM_NODES:-3}" ADDITIONAL_ZONES="${ADDITIONAL_ZONES:-}" CLUSTER_API_VERSION="${CLUSTER_API_VERSION:-}" diff --git a/cluster/gke/util.sh b/cluster/gke/util.sh index e3b8f03f259..57c8f634f84 100755 --- a/cluster/gke/util.sh +++ b/cluster/gke/util.sh @@ -68,6 +68,7 @@ function prepare-e2e() { # GCLOUD # Vars set: # PROJECT +# SCOPE_ARGS function detect-project() { echo "... in gke:detect-project()" >&2 if [[ -z "${PROJECT:-}" ]]; then @@ -79,6 +80,18 @@ function detect-project() { echo "'gcloud config set project '" >&2 exit 1 fi + + SCOPE_ARGS=( + "--project=${PROJECT}" + ) + + if [[ ! -z "${ZONE:-}" ]]; then + SCOPE_ARGS+=("--zone=${ZONE}") + fi + + if [[ ! -z "${REGION:-}" ]]; then + SCOPE_ARGS+=("--region=${REGION}") + fi } # Execute prior to running tests to build a release if required for env. @@ -116,6 +129,9 @@ function validate-cluster { # Simply override the NUM_NODES variable if we've spread nodes across multiple # zones before calling into the generic validate-cluster logic. local EXPECTED_NUM_NODES="${NUM_NODES}" + if [ ! -z "${REGION:-}" ]; then + (( EXPECTED_NUM_NODES *= 3 )) + fi for zone in $(echo "${ADDITIONAL_ZONES}" | sed "s/,/ /g") do (( EXPECTED_NUM_NODES += NUM_NODES )) @@ -128,7 +144,8 @@ function validate-cluster { # Assumed vars: # GCLOUD # CLUSTER_NAME -# ZONE +# ZONE (optional) +# REGION (optional) # CLUSTER_API_VERSION (optional) # NUM_NODES # ADDITIONAL_ZONES (optional) @@ -164,8 +181,7 @@ function kube-up() { fi local shared_args=( - "--zone=${ZONE}" - "--project=${PROJECT}" + ${SCOPE_ARGS[@]} "--scopes=${NODE_SCOPES}" ) @@ -185,6 +201,7 @@ function kube-up() { "--network=${NETWORK}" "--cluster-version=${CLUSTER_API_VERSION}" "--machine-type=${MACHINE_TYPE}" + "--quiet" ) if [[ ! -z "${ENABLE_KUBERNETES_ALPHA:-}" ]]; then @@ -239,7 +256,7 @@ function test-setup() { detect-nodes >&2 # At this point, CLUSTER_NAME should have been used, so its value is final. - NODE_TAG=$($GCLOUD compute instances describe ${NODE_NAMES[0]} --project="${PROJECT}" --zone="${ZONE}" --format='value(tags.items)' | grep -o "gke-${CLUSTER_NAME}-.\{8\}-node") + NODE_TAG=$($GCLOUD compute instances list ${NODE_NAMES[0]} --project="${PROJECT}" --format='value(tags.items)' | grep -o "gke-${CLUSTER_NAME}-.\{8\}-node") OLD_NODE_TAG="k8s-${CLUSTER_NAME}-node" # Open up port 80 & 8080 so common containers on minions can be reached. @@ -276,7 +293,7 @@ function detect-master() { echo "... in gke:detect-master()" >&2 detect-project >&2 KUBE_MASTER_IP=$("${GCLOUD}" ${CMD_GROUP:-} container clusters describe \ - --project="${PROJECT}" --zone="${ZONE}" --format='value(endpoint)' \ + ${SCOPE_ARGS[@]} --format='value(endpoint)' \ "${CLUSTER_NAME}") } @@ -291,11 +308,16 @@ function detect-nodes() { # Detect minions created in the minion group # -# Note that this will only select nodes in the same zone as the +# Note that for zonal clusters this will only select nodes in the same zone as the # cluster, meaning that it won't include all nodes in a multi-zone cluster. +# For regional clusters, this will select nodes only from arbitrarily chosen node instance group. # # Assumed vars: -# none +# GCLOUD +# PROJECT +# ZONE (optional) +# REGION (optional) +# CLUSTER_NAME # Vars set: # NODE_NAMES function detect-node-names { @@ -305,8 +327,10 @@ function detect-node-names { NODE_NAMES=() for group in "${NODE_INSTANCE_GROUPS[@]:-}"; do + # We can't simply use --zone "${ZONE}" as ZONE may not be set (e.g. when REGION is set). + local igm_zone=$(gcloud compute instance-groups managed list "${group}" --format='value(zone)') NODE_NAMES+=($(gcloud compute instance-groups managed list-instances \ - "${group}" --zone "${ZONE}" \ + "${group}" --zone "${igm_zone}" \ --project "${PROJECT}" --format='value(instance)')) done echo "NODE_NAMES=${NODE_NAMES[*]:-}" @@ -314,15 +338,18 @@ function detect-node-names { # Detect instance group name generated by gke. # -# Note that the NODE_INSTANCE_GROUPS var will only have instance groups in the +# Note that for zonal clusters the NODE_INSTANCE_GROUPS var will only have instance groups in the # same zone as the cluster, meaning that it won't include all groups in a -# multi-zone cluster. The ALL_INSTANCE_GROUP_URLS will contain all the -# instance group URLs, which include multi-zone groups. +# multi-zone cluster. +# For regional clusters, NODE_INSTANCE_GROUPS is set to arbitrarily chosen node instance group. +# The ALL_INSTANCE_GROUP_URLS will contain all the instance group URLs, +# which include multi-zone groups. # # Assumed vars: # GCLOUD -# PROJECT -# ZONE +# SCOPE_ARGS +# ZONE (optional) +# REGION (optional) # CLUSTER_NAME # Vars set: # NODE_INSTANCE_GROUPS @@ -330,17 +357,21 @@ function detect-node-names { function detect-node-instance-groups { echo "... in gke:detect-node-instance-groups()" >&2 local urls=$("${GCLOUD}" ${CMD_GROUP:-} container clusters describe \ - --project="${PROJECT}" --zone="${ZONE}" \ - --format='value(instanceGroupUrls)' "${CLUSTER_NAME}") + ${SCOPE_ARGS[@]} --format='value(instanceGroupUrls)' "${CLUSTER_NAME}") urls=(${urls//;/ }) ALL_INSTANCE_GROUP_URLS=${urls[*]} NODE_INSTANCE_GROUPS=() - for url in "${urls[@]:-}"; do - local igm_zone=$(expr ${url} : '.*/zones/\([a-z0-9-]*\)/') - if [[ "${igm_zone}" == "${ZONE}" ]]; then - NODE_INSTANCE_GROUPS+=("${url##*/}") - fi - done + if [[ ! -z "${ZONE:-}" ]]; then + for url in "${urls[@]:-}"; do + local igm_zone=$(expr ${url} : '.*/zones/\([a-z0-9-]*\)/') + if [[ "${igm_zone}" == "${ZONE}" ]]; then + NODE_INSTANCE_GROUPS+=("${url##*/}") + fi + done + fi + if [[ ! -z "${REGION:-}" ]]; then + NODE_INSTANCE_GROUPS+=("${urls[0]}") + fi } # SSH to a node by name ($1) and run a command ($2). @@ -413,13 +444,15 @@ function test-teardown() { # # Assumed vars: # GCLOUD -# ZONE +# SCOPE_ARGS +# ZONE (optional) +# REGION (optional) # CLUSTER_NAME function kube-down() { echo "... in gke:kube-down()" >&2 detect-project >&2 - if "${GCLOUD}" ${CMD_GROUP:-} container clusters describe --project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" --quiet &>/dev/null; then - with-retry 3 "${GCLOUD}" ${CMD_GROUP:-} container clusters delete --project="${PROJECT}" \ - --zone="${ZONE}" "${CLUSTER_NAME}" --quiet + if "${GCLOUD}" ${CMD_GROUP:-} container clusters describe ${SCOPE_ARGS[@]} "${CLUSTER_NAME}" --quiet &>/dev/null; then + with-retry 3 "${GCLOUD}" ${CMD_GROUP:-} container clusters delete ${SCOPE_ARGS[@]} \ + "${CLUSTER_NAME}" --quiet fi } diff --git a/hack/ginkgo-e2e.sh b/hack/ginkgo-e2e.sh index c6dd4a693eb..051db280e58 100755 --- a/hack/ginkgo-e2e.sh +++ b/hack/ginkgo-e2e.sh @@ -136,6 +136,7 @@ export PATH=$(dirname "${e2e_test}"):"${PATH}" --provider="${KUBERNETES_PROVIDER}" \ --gce-project="${PROJECT:-}" \ --gce-zone="${ZONE:-}" \ + --gce-region="${REGION:-}" \ --gce-multizone="${MULTIZONE:-false}" \ --gke-cluster="${CLUSTER_NAME:-}" \ --kube-master="${KUBE_MASTER:-}" \ diff --git a/hack/verify-flags/known-flags.txt b/hack/verify-flags/known-flags.txt index b06f95b318a..3d29834e7f2 100644 --- a/hack/verify-flags/known-flags.txt +++ b/hack/verify-flags/known-flags.txt @@ -303,6 +303,7 @@ gather-suite-metrics-at-teardown gce-api-endpoint gce-multizone gce-project +gce-region gce-service-account gce-upgrade-script gce-zone diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 7eb8a578637..6400fc544ff 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -65,9 +65,13 @@ func setupProviderConfig() error { var err error framework.Logf("Fetching cloud provider for %q\r\n", framework.TestContext.Provider) zone := framework.TestContext.CloudConfig.Zone - region, err := gcecloud.GetGCERegion(zone) - if err != nil { - return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err) + region := framework.TestContext.CloudConfig.Region + + if region == "" { + region, err = gcecloud.GetGCERegion(zone) + if err != nil { + return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err) + } } managedZones := []string{} // Manage all zones in the region if !framework.TestContext.CloudConfig.MultiZone { diff --git a/test/e2e/framework/test_context.go b/test/e2e/framework/test_context.go index 9969fc2f9e6..6099e75cdc3 100644 --- a/test/e2e/framework/test_context.go +++ b/test/e2e/framework/test_context.go @@ -145,6 +145,7 @@ type CloudConfig struct { ApiEndpoint string ProjectID string Zone string + Region string MultiZone bool Cluster string MasterName string @@ -221,6 +222,7 @@ func RegisterClusterFlags() { flag.StringVar(&cloudConfig.ApiEndpoint, "gce-api-endpoint", "", "The GCE ApiEndpoint being used, if applicable") flag.StringVar(&cloudConfig.ProjectID, "gce-project", "", "The GCE project being used, if applicable") flag.StringVar(&cloudConfig.Zone, "gce-zone", "", "GCE zone being used, if applicable") + flag.StringVar(&cloudConfig.Region, "gce-region", "", "GCE region being used, if applicable") flag.BoolVar(&cloudConfig.MultiZone, "gce-multizone", false, "If true, start GCE cloud provider with multizone support.") flag.StringVar(&cloudConfig.Cluster, "gke-cluster", "", "GKE name of cluster being used, if applicable") flag.StringVar(&cloudConfig.NodeInstanceGroup, "node-instance-group", "", "Name of the managed instance group for nodes. Valid only for gce, gke or aws. If there is more than one group: comma separated list of groups.")