Merge pull request #90405 from joakimr-axis/joakimr-axis_util.sh

Fix shellcheck warnings/errors in cluster/gce/util.sh
This commit is contained in:
Kubernetes Prow Robot 2021-02-10 10:02:16 -08:00 committed by GitHub
commit 0f3aada51e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 101 additions and 97 deletions

View File

@ -41,8 +41,8 @@ else
exit 1
fi
if [[ ${NODE_LOCAL_SSDS:-} -ge 1 ]] && [[ ! -z ${NODE_LOCAL_SSDS_EXT:-} ]] ; then
echo -e "${color_red}Local SSD: Only one of NODE_LOCAL_SSDS and NODE_LOCAL_SSDS_EXT can be specified at once${color_norm}" >&2
if [[ ${NODE_LOCAL_SSDS:-} -ge 1 ]] && [[ -n ${NODE_LOCAL_SSDS_EXT:-} ]] ; then
echo -e "${color_red:-}Local SSD: Only one of NODE_LOCAL_SSDS and NODE_LOCAL_SSDS_EXT can be specified at once${color_norm:-}" >&2
exit 2
fi
@ -166,7 +166,7 @@ function verify-prereqs() {
local resp="n"
if [[ "${KUBE_PROMPT_FOR_UPDATE}" == "y" ]]; then
echo "Can't find ${cmd} in PATH. Do you wish to install the Google Cloud SDK? [Y/n]"
read resp
read -r resp
fi
if [[ "${resp}" != "n" && "${resp}" != "N" ]]; then
curl https://sdk.cloud.google.com | bash
@ -216,7 +216,7 @@ function gsutil_get_tar_md5() {
local -r tar_location=$1
#parse the output and return the md5 hash
#the sed command at the end removes whitespace
local -r tar_md5=$(gsutil hash -h -m ${tar_location} 2>/dev/null | grep "Hash (md5):" | awk -F ':' '{print $2}' | sed 's/^[[:space:]]*//g')
local -r tar_md5=$(gsutil hash -h -m "${tar_location}" 2>/dev/null | grep "Hash (md5):" | awk -F ':' '{print $2}' | sed 's/^[[:space:]]*//g')
echo "${tar_md5}"
}
@ -226,7 +226,7 @@ function copy-to-staging() {
local -r gs_url=$2
local -r tar=$3
local -r hash=$4
local -r basename_tar=$(basename ${tar})
local -r basename_tar=$(basename "${tar}")
#check whether this tar alread exists and has the same hash
#if it matches, then don't bother uploading it again
@ -392,22 +392,22 @@ function upload-tars() {
function detect-node-names() {
detect-project
INSTANCE_GROUPS=()
INSTANCE_GROUPS+=($(gcloud compute instance-groups managed list \
kube::util::read-array INSTANCE_GROUPS < <(gcloud compute instance-groups managed list \
--project "${PROJECT}" \
--filter "name ~ '${NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \
--format='value(name)' || true))
--format='value(name)' || true)
WINDOWS_INSTANCE_GROUPS=()
WINDOWS_INSTANCE_GROUPS+=($(gcloud compute instance-groups managed list \
kube::util::read-array WINDOWS_INSTANCE_GROUPS < <(gcloud compute instance-groups managed list \
--project "${PROJECT}" \
--filter "name ~ '${WINDOWS_NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \
--format='value(name)' || true))
--format='value(name)' || true)
NODE_NAMES=()
if [[ -n "${INSTANCE_GROUPS[@]:-}" ]]; then
if [[ -n "${INSTANCE_GROUPS[*]:-}" ]]; then
for group in "${INSTANCE_GROUPS[@]}"; do
NODE_NAMES+=($(gcloud compute instance-groups managed list-instances \
kube::util::read-array NODE_NAMES < <(gcloud compute instance-groups managed list-instances \
"${group}" --zone "${ZONE}" --project "${PROJECT}" \
--format='value(instance)'))
--format='value(instance)')
done
fi
# Add heapster node name to the list too (if it exists).
@ -416,11 +416,11 @@ function detect-node-names() {
fi
export NODE_NAMES
WINDOWS_NODE_NAMES=()
if [[ -n "${WINDOWS_INSTANCE_GROUPS[@]:-}" ]]; then
if [[ -n "${WINDOWS_INSTANCE_GROUPS[*]:-}" ]]; then
for group in "${WINDOWS_INSTANCE_GROUPS[@]}"; do
WINDOWS_NODE_NAMES+=($(gcloud compute instance-groups managed \
kube::util::read-array WINDOWS_NODE_NAMES < <(gcloud compute instance-groups managed \
list-instances "${group}" --zone "${ZONE}" --project "${PROJECT}" \
--format='value(instance)'))
--format='value(instance)')
done
fi
export WINDOWS_NODE_NAMES
@ -494,7 +494,7 @@ function detect-master() {
}
function load-or-gen-kube-bearertoken() {
if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then
if [[ -n "${KUBE_CONTEXT:-}" ]]; then
get-kubeconfig-bearertoken
fi
if [[ -z "${KUBE_BEARER_TOKEN:-}" ]]; then
@ -559,10 +559,10 @@ function tars_from_version() {
# ZONE
function get-master-env() {
# TODO(zmerlynn): Make this more reliable with retries.
gcloud compute --project ${PROJECT} ssh --zone ${ZONE} ${KUBE_MASTER} --command \
gcloud compute --project "${PROJECT}" ssh --zone "${ZONE}" "${KUBE_MASTER}" --command \
"curl --fail --silent -H 'Metadata-Flavor: Google' \
'http://metadata/computeMetadata/v1/instance/attributes/kube-env'" 2>/dev/null
gcloud compute --project ${PROJECT} ssh --zone ${ZONE} ${KUBE_MASTER} --command \
gcloud compute --project "${PROJECT}" ssh --zone "${ZONE}" "${KUBE_MASTER}" --command \
"curl --fail --silent -H 'Metadata-Flavor: Google' \
'http://metadata/computeMetadata/v1/instance/attributes/kube-master-certs'" 2>/dev/null
}
@ -646,7 +646,7 @@ function build-linux-node-labels {
if [[ -n "${MASTER_NODE_LABELS:-}" && "${node_type}" == "master" ]]; then
node_labels="${node_labels:+${node_labels},}${MASTER_NODE_LABELS}"
fi
echo $node_labels
echo "$node_labels"
}
function build-windows-node-labels {
@ -657,7 +657,7 @@ function build-windows-node-labels {
if [[ -n "${WINDOWS_NON_MASTER_NODE_LABELS:-}" ]]; then
node_labels="${node_labels:+${node_labels},}${WINDOWS_NON_MASTER_NODE_LABELS}"
fi
echo $node_labels
echo "$node_labels"
}
# yaml-map-string-stringarray converts the encoded structure to yaml format, and echoes the result
@ -748,7 +748,7 @@ function construct-common-kubelet-flags {
if [[ -n "${MAX_PODS_PER_NODE:-}" ]]; then
flags+=" --max-pods=${MAX_PODS_PER_NODE}"
fi
echo $flags
echo "$flags"
}
# Sets KUBELET_ARGS with the kubelet flags for Linux nodes.
@ -1509,9 +1509,12 @@ EOF
# TODO(kubernetes/autoscaler#718): AUTOSCALER_ENV_VARS is a hotfix for cluster autoscaler,
# which reads the kube-env to determine the shape of a node and was broken by #60020.
# This should be removed as soon as a more reliable source of information is available!
local node_labels="$(build-linux-node-labels node)"
local node_taints="${NODE_TAINTS:-}"
local autoscaler_env_vars="node_labels=${node_labels};node_taints=${node_taints}"
local node_labels
local node_taints
local autoscaler_env_vars
node_labels="$(build-linux-node-labels node)"
node_taints="${NODE_TAINTS:-}"
autoscaler_env_vars="node_labels=${node_labels};node_taints=${node_taints}"
cat >>"$file" <<EOF
AUTOSCALER_ENV_VARS: $(yaml-quote "${autoscaler_env_vars}")
EOF
@ -1528,17 +1531,17 @@ MAX_PODS_PER_NODE: $(yaml-quote "${MAX_PODS_PER_NODE}")
EOF
fi
if [[ "${PREPARE_KONNECTIVITY_SERVICE:-false}" == "true" ]]; then
cat >>$file <<EOF
cat >>"$file" <<EOF
PREPARE_KONNECTIVITY_SERVICE: $(yaml-quote "${PREPARE_KONNECTIVITY_SERVICE}")
EOF
fi
if [[ "${EGRESS_VIA_KONNECTIVITY:-false}" == "true" ]]; then
cat >>$file <<EOF
cat >>"$file" <<EOF
EGRESS_VIA_KONNECTIVITY: $(yaml-quote "${EGRESS_VIA_KONNECTIVITY}")
EOF
fi
if [[ "${RUN_KONNECTIVITY_PODS:-false}" == "true" ]]; then
cat >>$file <<EOF
cat >>"$file" <<EOF
RUN_KONNECTIVITY_PODS: $(yaml-quote "${RUN_KONNECTIVITY_PODS}")
EOF
fi
@ -1634,7 +1637,7 @@ function create-certs {
service_ip=${SERVICE_CLUSTER_IP_RANGE%/*}
service_ip="${service_ip%.*}.$((${service_ip##*.} + 1))"
local sans=""
for extra in $@; do
for extra in "$@"; do
if [[ -n "${extra}" ]]; then
sans="${sans}IP:${extra},"
fi
@ -1687,10 +1690,10 @@ function setup-easyrsa {
mkdir easy-rsa-master/kubelet
cp -r easy-rsa-master/easyrsa3/* easy-rsa-master/kubelet
mkdir easy-rsa-master/aggregator
cp -r easy-rsa-master/easyrsa3/* easy-rsa-master/aggregator) &>${cert_create_debug_output} || true
cp -r easy-rsa-master/easyrsa3/* easy-rsa-master/aggregator) &>"${cert_create_debug_output}" || true
CERT_DIR="${KUBE_TEMP}/easy-rsa-master/easyrsa3"
AGGREGATOR_CERT_DIR="${KUBE_TEMP}/easy-rsa-master/aggregator"
if [ ! -x "${CERT_DIR}/easyrsa" -o ! -x "${AGGREGATOR_CERT_DIR}/easyrsa" ]; then
if [ ! -x "${CERT_DIR}/easyrsa" ] || [ ! -x "${AGGREGATOR_CERT_DIR}/easyrsa" ]; then
# TODO(roberthbailey,porridge): add better error handling here,
# see https://github.com/kubernetes/kubernetes/issues/55229
cat "${cert_create_debug_output}" >&2
@ -1739,7 +1742,7 @@ function generate-certs {
./easyrsa --dn-mode=org \
--req-cn=kubecfg --req-org=system:masters \
--req-c= --req-st= --req-city= --req-email= --req-ou= \
build-client-full kubecfg nopass) &>${cert_create_debug_output} || true
build-client-full kubecfg nopass) &>"${cert_create_debug_output}" || true
local output_file_missing=0
local output_file
for output_file in \
@ -1759,7 +1762,7 @@ function generate-certs {
output_file_missing=1
fi
done
if (( $output_file_missing )); then
if [ $output_file_missing -ne 0 ]; then
# TODO(roberthbailey,porridge): add better error handling here,
# see https://github.com/kubernetes/kubernetes/issues/55229
cat "${cert_create_debug_output}" >&2
@ -1804,7 +1807,7 @@ function generate-aggregator-certs {
./easyrsa --dn-mode=org \
--req-cn=proxy-clientcfg --req-org=system:aggregator \
--req-c= --req-st= --req-city= --req-email= --req-ou= \
build-client-full proxy-clientcfg nopass) &>${cert_create_debug_output} || true
build-client-full proxy-clientcfg nopass) &>"${cert_create_debug_output}" || true
local output_file_missing=0
local output_file
for output_file in \
@ -1818,7 +1821,7 @@ function generate-aggregator-certs {
output_file_missing=1
fi
done
if (( $output_file_missing )); then
if [ $output_file_missing -ne 0 ]; then
# TODO(roberthbailey,porridge): add better error handling here,
# see https://github.com/kubernetes/kubernetes/issues/55229
cat "${cert_create_debug_output}" >&2
@ -1833,11 +1836,12 @@ function generate-aggregator-certs {
# $1 master env (kube-env of master; result of calling get-master-env)
# $2 env key to use
function get-env-val() {
local match=`(echo "${1}" | grep -E "^${2}:") || echo ""`
if [[ -z ${match} ]]; then
local match
match=$( (echo "${1}" | grep -E "^${2}:") || echo '')
if [[ -z "${match}" ]]; then
echo ""
fi
echo ${match} | cut -d : -f 2 | cut -d \' -f 2
echo "${match}" | cut -d : -f 2 | cut -d \' -f 2
}
# Load the master env by calling get-master-env, and extract important values
@ -1875,7 +1879,7 @@ function parse-master-env() {
# KUBE_PROMPT_FOR_UPDATE
function update-or-verify-gcloud() {
local sudo_prefix=""
if [ ! -w $(dirname `which gcloud`) ]; then
if [ ! -w "$(dirname "$(which gcloud)")" ]; then
sudo_prefix="sudo"
fi
# update and install components as needed
@ -1886,20 +1890,21 @@ function update-or-verify-gcloud() {
${sudo_prefix} gcloud ${gcloud_prompt:-} components install beta
${sudo_prefix} gcloud ${gcloud_prompt:-} components update
else
local version=$(gcloud version --format=json)
python -c'
local version
version=$(gcloud version --format=json)
python -c"
import json,sys
from distutils import version
minVersion = version.LooseVersion("1.3.0")
required = [ "alpha", "beta", "core" ]
minVersion = version.LooseVersion('1.3.0')
required = [ 'alpha', 'beta', 'core' ]
data = json.loads(sys.argv[1])
rel = data.get("Google Cloud SDK")
if "CL @" in rel:
print("Using dev version of gcloud: %s" %rel)
rel = data.get('Google Cloud SDK')
if 'CL @' in rel:
print('Using dev version of gcloud: %s' %rel)
exit(0)
if rel != "HEAD" and version.LooseVersion(rel) < minVersion:
print("gcloud version out of date ( < %s )" % minVersion)
if rel != 'HEAD' and version.LooseVersion(rel) < minVersion:
print('gcloud version out of date ( < %s )' % minVersion)
exit(1)
missing = []
for c in required:
@ -1907,10 +1912,10 @@ for c in required:
missing += [c]
if missing:
for c in missing:
print ("missing required gcloud component \"{0}\"".format(c))
print ("Try running `gcloud components install {0}`".format(c))
print ('missing required gcloud component \"{0}\"'.format(c))
print ('Try running \$(gcloud components install {0})'.format(c))
exit(1)
' """${version}"""
" "${version}"
fi
}
@ -2267,7 +2272,7 @@ function check-existing() {
echo "${KUBE_RESOURCE_FOUND} found." >&2
# Get user input only if running in terminal.
if [[ ${running_in_terminal} == "true" && ${KUBE_UP_AUTOMATIC_CLEANUP} == "false" ]]; then
read -p "Would you like to shut down the old cluster (call kube-down)? [y/N] " run_kube_down
read -r -p "Would you like to shut down the old cluster (call kube-down)? [y/N] " run_kube_down
fi
if [[ ${run_kube_down} == "y" || ${run_kube_down} == "Y" || ${KUBE_UP_AUTOMATIC_CLEANUP} == "true" ]]; then
echo "... calling kube-down" >&2
@ -2279,7 +2284,7 @@ function check-existing() {
function check-network-mode() {
local mode
mode="$(gcloud compute networks list --filter="name=('${NETWORK}')" --project "${NETWORK_PROJECT}" --format='value(x_gcloud_subnet_mode)' || true)"
mode=$(gcloud compute networks list --filter="name=('${NETWORK}')" --project "${NETWORK_PROJECT}" --format='value(x_gcloud_subnet_mode)' || true)
# The deprecated field uses lower case. Convert to upper case for consistency.
echo "$mode" | tr '[:lower:]' '[:upper:]'
}
@ -2374,22 +2379,22 @@ function create-subnetworks() {
local subnet
subnet=$(gcloud compute networks subnets describe \
--project "${NETWORK_PROJECT}" \
--region ${REGION} \
${IP_ALIAS_SUBNETWORK} 2>/dev/null || true)
if [[ -z ${subnet} ]]; then
--region "${REGION}" \
"${IP_ALIAS_SUBNETWORK}" 2>/dev/null || true)
if [[ -z "${subnet}" ]]; then
echo "Creating subnet ${NETWORK}:${IP_ALIAS_SUBNETWORK}"
gcloud compute networks subnets create \
${IP_ALIAS_SUBNETWORK} \
"${IP_ALIAS_SUBNETWORK}" \
--description "Automatically generated subnet for ${INSTANCE_PREFIX} cluster. This will be removed on cluster teardown." \
--project "${NETWORK_PROJECT}" \
--network ${NETWORK} \
--region ${REGION} \
--range ${NODE_IP_RANGE} \
--network "${NETWORK}" \
--region "${REGION}" \
--range "${NODE_IP_RANGE}" \
--secondary-range "pods-default=${CLUSTER_IP_RANGE}" \
--secondary-range "services-default=${SERVICE_CLUSTER_IP_RANGE}"
echo "Created subnetwork ${IP_ALIAS_SUBNETWORK}"
else
if ! echo ${subnet} | grep --quiet secondaryIpRanges; then
if ! echo "${subnet}" | grep --quiet secondaryIpRanges; then
echo "${color_red}Subnet ${IP_ALIAS_SUBNETWORK} does not have a secondary range${color_norm}"
exit 1
fi
@ -2418,9 +2423,9 @@ function detect-subnetworks() {
fi
SUBNETWORK=$(gcloud compute networks subnets list \
--network=${NETWORK} \
--regions=${REGION} \
--project=${NETWORK_PROJECT} \
--network="${NETWORK}" \
--regions="${REGION}" \
--project="${NETWORK_PROJECT}" \
--limit=1 \
--format='value(name)' 2>/dev/null)
@ -2439,17 +2444,17 @@ function detect-subnetworks() {
# NETWORK
function create-cloud-nat-router() {
if [[ ${GCE_PRIVATE_CLUSTER:-} == "true" ]]; then
if gcloud compute routers describe "$NETWORK-nat-router" --project $NETWORK_PROJECT --region $REGION &>/dev/null; then
if gcloud compute routers describe "$NETWORK-nat-router" --project "$NETWORK_PROJECT" --region "$REGION" &>/dev/null; then
echo "Cloud nat already exists"
return 0
fi
gcloud compute routers create "$NETWORK-nat-router" \
--project $NETWORK_PROJECT \
--region $REGION \
--network $NETWORK
--project "$NETWORK_PROJECT" \
--region "$REGION" \
--network "$NETWORK"
gcloud compute routers nats create "$NETWORK-nat-config" \
--project $NETWORK_PROJECT \
--router-region $REGION \
--project "$NETWORK_PROJECT" \
--router-region "$REGION" \
--router "$NETWORK-nat-router" \
--nat-primary-subnet-ip-ranges \
--auto-allocate-nat-external-ips \
@ -2523,12 +2528,12 @@ function delete-subnetworks() {
echo "Removing auto-created subnet ${NETWORK}:${IP_ALIAS_SUBNETWORK}"
if [[ -n $(gcloud compute networks subnets describe \
--project "${NETWORK_PROJECT}" \
--region ${REGION} \
${IP_ALIAS_SUBNETWORK} 2>/dev/null) ]]; then
--region "${REGION}" \
"${IP_ALIAS_SUBNETWORK}" 2>/dev/null) ]]; then
gcloud --quiet compute networks subnets delete \
--project "${NETWORK_PROJECT}" \
--region ${REGION} \
${IP_ALIAS_SUBNETWORK}
--region "${REGION}" \
"${IP_ALIAS_SUBNETWORK}"
fi
fi
fi
@ -2669,7 +2674,7 @@ function create-master() {
MASTER_INTERNAL_IP=""
if [[ ${GCE_PRIVATE_CLUSTER:-} == "true" ]]; then
gcloud compute addresses create "${MASTER_NAME}-internal-ip" --project "${PROJECT}" --region $REGION --subnet $SUBNETWORK
gcloud compute addresses create "${MASTER_NAME}-internal-ip" --project "${PROJECT}" --region "$REGION" --subnet "$SUBNETWORK"
MASTER_INTERNAL_IP=$(gcloud compute addresses describe "${MASTER_NAME}-internal-ip" --project "${PROJECT}" --region "${REGION}" -q --format='value(address)')
echo "Master internal ip is: $MASTER_INTERNAL_IP"
KUBERNETES_MASTER_NAME="${MASTER_INTERNAL_IP}"
@ -2677,8 +2682,8 @@ function create-master() {
fi
create-certs "${MASTER_RESERVED_IP}" "${MASTER_INTERNAL_IP}"
create-etcd-certs ${MASTER_NAME}
create-etcd-apiserver-certs "etcd-${MASTER_NAME}" ${MASTER_NAME}
create-etcd-certs "${MASTER_NAME}"
create-etcd-apiserver-certs "etcd-${MASTER_NAME}" "${MASTER_NAME}"
if [[ "$(get-num-nodes)" -ge "50" ]]; then
# We block on master creation for large clusters to avoid doing too much
@ -2790,7 +2795,7 @@ function attach-external-ip() {
gcloud compute instances delete-access-config "${NAME}" \
--project "${PROJECT}" --zone "${ZONE}" \
--access-config-name "${ACCESS_CONFIG_NAME}"
if [[ -z ${IP_ADDR} ]]; then
if [[ -z "${IP_ADDR}" ]]; then
gcloud compute instances add-access-config "${NAME}" \
--project "${PROJECT}" --zone "${ZONE}" \
--access-config-name "${ACCESS_CONFIG_NAME}"
@ -2812,8 +2817,8 @@ function attach-external-ip() {
# REGION
function create-loadbalancer() {
# Step 0: Return early if LB is already configured.
if gcloud compute forwarding-rules describe ${MASTER_NAME} \
--project "${PROJECT}" --region ${REGION} > /dev/null 2>&1; then
if gcloud compute forwarding-rules describe "${MASTER_NAME}" \
--project "${PROJECT}" --region "${REGION}" > /dev/null 2>&1; then
echo "Load balancer already exists"
return
fi
@ -2836,13 +2841,13 @@ function create-loadbalancer() {
# Step 3: Create forwarding rule.
# TODO: This step can take up to 20 min. We need to speed this up...
gcloud compute forwarding-rules create ${MASTER_NAME} \
--project "${PROJECT}" --region ${REGION} \
--target-pool ${MASTER_NAME} --address=${KUBE_MASTER_IP} --ports=443
gcloud compute forwarding-rules create "${MASTER_NAME}" \
--project "${PROJECT}" --region "${REGION}" \
--target-pool "${MASTER_NAME}" --address="${KUBE_MASTER_IP}" --ports=443
echo -n "Waiting for the load balancer configuration to propagate..."
local counter=0
until $(curl -k -m1 https://${KUBE_MASTER_IP} &> /dev/null); do
until curl -k -m1 "https://${KUBE_MASTER_IP}" &> /dev/null; do
counter=$((counter+1))
echo -n .
if [[ ${counter} -ge 1800 ]]; then
@ -2904,7 +2909,7 @@ function detach-internal-master-ip() {
# * REGION
function create-internal-loadbalancer() {
if gcloud compute forwarding-rules describe "${MASTER_NAME}-internal" \
--project "${PROJECT}" --region ${REGION} > /dev/null 2>&1; then
--project "${PROJECT}" --region "${REGION}" > /dev/null 2>&1; then
echo "Load balancer already exists"
return
fi
@ -3033,11 +3038,11 @@ function create-nodes-template() {
# create-[linux,windows]-nodes() as well as get-template()!
local linux_template_name="${NODE_INSTANCE_PREFIX}-template"
local windows_template_name="${WINDOWS_NODE_INSTANCE_PREFIX}-template"
create-linux-node-instance-template $linux_template_name
create-windows-node-instance-template $windows_template_name "${scope_flags[*]}"
create-linux-node-instance-template "$linux_template_name"
create-windows-node-instance-template "$windows_template_name" "${scope_flags[*]}"
if [[ -n "${ADDITIONAL_MACHINE_TYPE:-}" ]]; then
local linux_extra_template_name="${NODE_INSTANCE_PREFIX}-extra-template"
create-linux-node-instance-template $linux_extra_template_name "${ADDITIONAL_MACHINE_TYPE}"
create-linux-node-instance-template "$linux_extra_template_name" "${ADDITIONAL_MACHINE_TYPE}"
fi
}
@ -3070,7 +3075,7 @@ function create-linux-nodes() {
local extra_template_name="${NODE_INSTANCE_PREFIX}-extra-template"
local nodes="${NUM_NODES}"
if [[ ! -z "${HEAPSTER_MACHINE_TYPE:-}" ]]; then
if [[ -n "${HEAPSTER_MACHINE_TYPE:-}" ]]; then
echo "Creating a special node for heapster with machine-type ${HEAPSTER_MACHINE_TYPE}"
create-heapster-node
nodes=$(( nodes - 1 ))
@ -3355,7 +3360,7 @@ function check-cluster() {
fi
echo
echo -e "${color_green}Kubernetes cluster is running. The master is running at:"
echo -e "${color_green:-}Kubernetes cluster is running. The master is running at:"
echo
echo -e "${color_yellow} https://${KUBE_MASTER_IP}"
echo
@ -3674,10 +3679,10 @@ function kube-down() {
function get-replica-name() {
# Ignore if gcloud compute fails and successfully echo any outcome
# shellcheck disable=SC2005
echo $(gcloud compute instances list \
echo "$(gcloud compute instances list \
--project "${PROJECT}" \
--filter="name ~ '$(get-replica-name-regexp)' AND zone:(${ZONE})" \
--format "value(name)" | head -n1)
--format "value(name)" | head -n1)"
}
# Prints comma-separated names of all of the master replicas in all zones.
@ -3690,10 +3695,10 @@ function get-replica-name() {
function get-all-replica-names() {
# Ignore if gcloud compute fails and successfully echo any outcome
# shellcheck disable=SC2005
echo $(gcloud compute instances list \
echo "$(gcloud compute instances list \
--project "${PROJECT}" \
--filter="name ~ '$(get-replica-name-regexp)'" \
--format "value(name)" | tr "\n" "," | sed 's/,$//')
--format "value(name)" | tr "\n" "," | sed 's/,$//')"
}
# Prints the number of all of the master replicas in all zones.
@ -3882,7 +3887,7 @@ function test-setup() {
# wait some additional time (20 minutes altogether).
while ! gcloud compute firewall-rules describe --project "${NETWORK_PROJECT}" "${NODE_TAG}-http-alt" 2> /dev/null; do
if [[ $((start + 1200)) -lt $(date +%s) ]]; then
echo -e "${color_red}Failed to create firewall ${NODE_TAG}-http-alt in ${NETWORK_PROJECT}" >&2
echo -e "${color_red:-}Failed to create firewall ${NODE_TAG}-http-alt in ${NETWORK_PROJECT}" >&2
exit 1
fi
sleep 5
@ -3890,7 +3895,7 @@ function test-setup() {
# Open up the NodePort range
# TODO(justinsb): Move to main setup, if we decide whether we want to do this by default.
start=`date +%s`
start=$(date +%s)
gcloud compute firewall-rules create \
--project "${NETWORK_PROJECT}" \
--target-tags "${NODE_TAG}" \
@ -3934,7 +3939,7 @@ function ssh-to-node() {
local cmd="$2"
# Loop until we can successfully ssh into the box
for (( i=0; i<5; i++)); do
if gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --ssh-flag="-o ConnectTimeout=30" --project "${PROJECT}" --zone="${ZONE}" "${node}" --command "echo test > /dev/null"; then
if gcloud compute ssh --ssh-flag='-o LogLevel=quiet' --ssh-flag='-o ConnectTimeout=30' --project "${PROJECT}" --zone="${ZONE}" "${node}" --command 'echo test > /dev/null'; then
break
fi
sleep 5

View File

@ -1,2 +1 @@
./cluster/gce/gci/configure.sh
./cluster/gce/util.sh