Merge pull request #49708 from mborsz/regional

Automatic merge from submit-queue (batch tested with PRs 49538, 49708, 47665, 49750, 49528)

Add a support for GKE regional clusters in e2e tests.

**What this PR does / why we need it**:
Add a support for GKE regional clusters in e2e tests.
**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #

**Special notes for your reviewer**:

**Release note**:
```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue 2017-07-29 05:28:51 -07:00 committed by GitHub
commit ba8b26d47a
6 changed files with 76 additions and 29 deletions

View File

@ -18,7 +18,13 @@
# Specifically, the following environment variables are assumed:
# - CLUSTER_NAME (the name of the cluster)
ZONE="${ZONE:-us-central1-f}"
if [ ! -z "${REGION:-}" ] && [ ! -z "${ZONE:-}" ]; then
echo "Only one of REGION and ZONE can be set." >&2
exit 1
fi
if [ -z "${REGION:-}" ]; then
ZONE="${ZONE:-us-central1-f}"
fi
NUM_NODES="${NUM_NODES:-3}"
ADDITIONAL_ZONES="${ADDITIONAL_ZONES:-}"
CLUSTER_API_VERSION="${CLUSTER_API_VERSION:-}"

View File

@ -68,6 +68,7 @@ function prepare-e2e() {
# GCLOUD
# Vars set:
# PROJECT
# SCOPE_ARGS
function detect-project() {
echo "... in gke:detect-project()" >&2
if [[ -z "${PROJECT:-}" ]]; then
@ -79,6 +80,18 @@ function detect-project() {
echo "'gcloud config set project <PROJECT>'" >&2
exit 1
fi
SCOPE_ARGS=(
"--project=${PROJECT}"
)
if [[ ! -z "${ZONE:-}" ]]; then
SCOPE_ARGS+=("--zone=${ZONE}")
fi
if [[ ! -z "${REGION:-}" ]]; then
SCOPE_ARGS+=("--region=${REGION}")
fi
}
# Execute prior to running tests to build a release if required for env.
@ -116,6 +129,9 @@ function validate-cluster {
# Simply override the NUM_NODES variable if we've spread nodes across multiple
# zones before calling into the generic validate-cluster logic.
local EXPECTED_NUM_NODES="${NUM_NODES}"
if [ ! -z "${REGION:-}" ]; then
(( EXPECTED_NUM_NODES *= 3 ))
fi
for zone in $(echo "${ADDITIONAL_ZONES}" | sed "s/,/ /g")
do
(( EXPECTED_NUM_NODES += NUM_NODES ))
@ -128,7 +144,8 @@ function validate-cluster {
# Assumed vars:
# GCLOUD
# CLUSTER_NAME
# ZONE
# ZONE (optional)
# REGION (optional)
# CLUSTER_API_VERSION (optional)
# NUM_NODES
# ADDITIONAL_ZONES (optional)
@ -164,8 +181,7 @@ function kube-up() {
fi
local shared_args=(
"--zone=${ZONE}"
"--project=${PROJECT}"
${SCOPE_ARGS[@]}
"--scopes=${NODE_SCOPES}"
)
@ -185,6 +201,7 @@ function kube-up() {
"--network=${NETWORK}"
"--cluster-version=${CLUSTER_API_VERSION}"
"--machine-type=${MACHINE_TYPE}"
"--quiet"
)
if [[ ! -z "${ENABLE_KUBERNETES_ALPHA:-}" ]]; then
@ -239,7 +256,7 @@ function test-setup() {
detect-nodes >&2
# At this point, CLUSTER_NAME should have been used, so its value is final.
NODE_TAG=$($GCLOUD compute instances describe ${NODE_NAMES[0]} --project="${PROJECT}" --zone="${ZONE}" --format='value(tags.items)' | grep -o "gke-${CLUSTER_NAME}-.\{8\}-node")
NODE_TAG=$($GCLOUD compute instances list ${NODE_NAMES[0]} --project="${PROJECT}" --format='value(tags.items)' | grep -o "gke-${CLUSTER_NAME}-.\{8\}-node")
OLD_NODE_TAG="k8s-${CLUSTER_NAME}-node"
# Open up port 80 & 8080 so common containers on minions can be reached.
@ -276,7 +293,7 @@ function detect-master() {
echo "... in gke:detect-master()" >&2
detect-project >&2
KUBE_MASTER_IP=$("${GCLOUD}" ${CMD_GROUP:-} container clusters describe \
--project="${PROJECT}" --zone="${ZONE}" --format='value(endpoint)' \
${SCOPE_ARGS[@]} --format='value(endpoint)' \
"${CLUSTER_NAME}")
}
@ -291,11 +308,16 @@ function detect-nodes() {
# Detect minions created in the minion group
#
# Note that this will only select nodes in the same zone as the
# Note that for zonal clusters this will only select nodes in the same zone as the
# cluster, meaning that it won't include all nodes in a multi-zone cluster.
# For regional clusters, this will select nodes only from arbitrarily chosen node instance group.
#
# Assumed vars:
# none
# GCLOUD
# PROJECT
# ZONE (optional)
# REGION (optional)
# CLUSTER_NAME
# Vars set:
# NODE_NAMES
function detect-node-names {
@ -305,8 +327,10 @@ function detect-node-names {
NODE_NAMES=()
for group in "${NODE_INSTANCE_GROUPS[@]:-}"; do
# We can't simply use --zone "${ZONE}" as ZONE may not be set (e.g. when REGION is set).
local igm_zone=$(gcloud compute instance-groups managed list "${group}" --format='value(zone)')
NODE_NAMES+=($(gcloud compute instance-groups managed list-instances \
"${group}" --zone "${ZONE}" \
"${group}" --zone "${igm_zone}" \
--project "${PROJECT}" --format='value(instance)'))
done
echo "NODE_NAMES=${NODE_NAMES[*]:-}"
@ -314,15 +338,18 @@ function detect-node-names {
# Detect instance group name generated by gke.
#
# Note that the NODE_INSTANCE_GROUPS var will only have instance groups in the
# Note that for zonal clusters the NODE_INSTANCE_GROUPS var will only have instance groups in the
# same zone as the cluster, meaning that it won't include all groups in a
# multi-zone cluster. The ALL_INSTANCE_GROUP_URLS will contain all the
# instance group URLs, which include multi-zone groups.
# multi-zone cluster.
# For regional clusters, NODE_INSTANCE_GROUPS is set to arbitrarily chosen node instance group.
# The ALL_INSTANCE_GROUP_URLS will contain all the instance group URLs,
# which include multi-zone groups.
#
# Assumed vars:
# GCLOUD
# PROJECT
# ZONE
# SCOPE_ARGS
# ZONE (optional)
# REGION (optional)
# CLUSTER_NAME
# Vars set:
# NODE_INSTANCE_GROUPS
@ -330,17 +357,21 @@ function detect-node-names {
function detect-node-instance-groups {
echo "... in gke:detect-node-instance-groups()" >&2
local urls=$("${GCLOUD}" ${CMD_GROUP:-} container clusters describe \
--project="${PROJECT}" --zone="${ZONE}" \
--format='value(instanceGroupUrls)' "${CLUSTER_NAME}")
${SCOPE_ARGS[@]} --format='value(instanceGroupUrls)' "${CLUSTER_NAME}")
urls=(${urls//;/ })
ALL_INSTANCE_GROUP_URLS=${urls[*]}
NODE_INSTANCE_GROUPS=()
for url in "${urls[@]:-}"; do
local igm_zone=$(expr ${url} : '.*/zones/\([a-z0-9-]*\)/')
if [[ "${igm_zone}" == "${ZONE}" ]]; then
NODE_INSTANCE_GROUPS+=("${url##*/}")
fi
done
if [[ ! -z "${ZONE:-}" ]]; then
for url in "${urls[@]:-}"; do
local igm_zone=$(expr ${url} : '.*/zones/\([a-z0-9-]*\)/')
if [[ "${igm_zone}" == "${ZONE}" ]]; then
NODE_INSTANCE_GROUPS+=("${url##*/}")
fi
done
fi
if [[ ! -z "${REGION:-}" ]]; then
NODE_INSTANCE_GROUPS+=("${urls[0]}")
fi
}
# SSH to a node by name ($1) and run a command ($2).
@ -413,13 +444,15 @@ function test-teardown() {
#
# Assumed vars:
# GCLOUD
# ZONE
# SCOPE_ARGS
# ZONE (optional)
# REGION (optional)
# CLUSTER_NAME
function kube-down() {
echo "... in gke:kube-down()" >&2
detect-project >&2
if "${GCLOUD}" ${CMD_GROUP:-} container clusters describe --project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" --quiet &>/dev/null; then
with-retry 3 "${GCLOUD}" ${CMD_GROUP:-} container clusters delete --project="${PROJECT}" \
--zone="${ZONE}" "${CLUSTER_NAME}" --quiet
if "${GCLOUD}" ${CMD_GROUP:-} container clusters describe ${SCOPE_ARGS[@]} "${CLUSTER_NAME}" --quiet &>/dev/null; then
with-retry 3 "${GCLOUD}" ${CMD_GROUP:-} container clusters delete ${SCOPE_ARGS[@]} \
"${CLUSTER_NAME}" --quiet
fi
}

View File

@ -136,6 +136,7 @@ export PATH=$(dirname "${e2e_test}"):"${PATH}"
--provider="${KUBERNETES_PROVIDER}" \
--gce-project="${PROJECT:-}" \
--gce-zone="${ZONE:-}" \
--gce-region="${REGION:-}" \
--gce-multizone="${MULTIZONE:-false}" \
--gke-cluster="${CLUSTER_NAME:-}" \
--kube-master="${KUBE_MASTER:-}" \

View File

@ -303,6 +303,7 @@ gather-suite-metrics-at-teardown
gce-api-endpoint
gce-multizone
gce-project
gce-region
gce-service-account
gce-upgrade-script
gce-zone

View File

@ -65,9 +65,13 @@ func setupProviderConfig() error {
var err error
framework.Logf("Fetching cloud provider for %q\r\n", framework.TestContext.Provider)
zone := framework.TestContext.CloudConfig.Zone
region, err := gcecloud.GetGCERegion(zone)
if err != nil {
return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
region := framework.TestContext.CloudConfig.Region
if region == "" {
region, err = gcecloud.GetGCERegion(zone)
if err != nil {
return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
}
}
managedZones := []string{} // Manage all zones in the region
if !framework.TestContext.CloudConfig.MultiZone {

View File

@ -145,6 +145,7 @@ type CloudConfig struct {
ApiEndpoint string
ProjectID string
Zone string
Region string
MultiZone bool
Cluster string
MasterName string
@ -221,6 +222,7 @@ func RegisterClusterFlags() {
flag.StringVar(&cloudConfig.ApiEndpoint, "gce-api-endpoint", "", "The GCE ApiEndpoint being used, if applicable")
flag.StringVar(&cloudConfig.ProjectID, "gce-project", "", "The GCE project being used, if applicable")
flag.StringVar(&cloudConfig.Zone, "gce-zone", "", "GCE zone being used, if applicable")
flag.StringVar(&cloudConfig.Region, "gce-region", "", "GCE region being used, if applicable")
flag.BoolVar(&cloudConfig.MultiZone, "gce-multizone", false, "If true, start GCE cloud provider with multizone support.")
flag.StringVar(&cloudConfig.Cluster, "gke-cluster", "", "GKE name of cluster being used, if applicable")
flag.StringVar(&cloudConfig.NodeInstanceGroup, "node-instance-group", "", "Name of the managed instance group for nodes. Valid only for gce, gke or aws. If there is more than one group: comma separated list of groups.")