mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Fix remaining shellcheck issues in cluster/gce/util.sh
Signed-off-by: Joakim Roubert <joakimr@axis.com>
This commit is contained in:
parent
6b9379eae4
commit
8bf2836e41
@ -41,8 +41,8 @@ else
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ ${NODE_LOCAL_SSDS:-} -ge 1 ]] && [[ ! -z ${NODE_LOCAL_SSDS_EXT:-} ]] ; then
|
if [[ ${NODE_LOCAL_SSDS:-} -ge 1 ]] && [[ -n ${NODE_LOCAL_SSDS_EXT:-} ]] ; then
|
||||||
echo -e "${color_red}Local SSD: Only one of NODE_LOCAL_SSDS and NODE_LOCAL_SSDS_EXT can be specified at once${color_norm}" >&2
|
echo -e "${color_red:-}Local SSD: Only one of NODE_LOCAL_SSDS and NODE_LOCAL_SSDS_EXT can be specified at once${color_norm:-}" >&2
|
||||||
exit 2
|
exit 2
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -166,7 +166,7 @@ function verify-prereqs() {
|
|||||||
local resp="n"
|
local resp="n"
|
||||||
if [[ "${KUBE_PROMPT_FOR_UPDATE}" == "y" ]]; then
|
if [[ "${KUBE_PROMPT_FOR_UPDATE}" == "y" ]]; then
|
||||||
echo "Can't find ${cmd} in PATH. Do you wish to install the Google Cloud SDK? [Y/n]"
|
echo "Can't find ${cmd} in PATH. Do you wish to install the Google Cloud SDK? [Y/n]"
|
||||||
read resp
|
read -r resp
|
||||||
fi
|
fi
|
||||||
if [[ "${resp}" != "n" && "${resp}" != "N" ]]; then
|
if [[ "${resp}" != "n" && "${resp}" != "N" ]]; then
|
||||||
curl https://sdk.cloud.google.com | bash
|
curl https://sdk.cloud.google.com | bash
|
||||||
@ -216,7 +216,7 @@ function gsutil_get_tar_md5() {
|
|||||||
local -r tar_location=$1
|
local -r tar_location=$1
|
||||||
#parse the output and return the md5 hash
|
#parse the output and return the md5 hash
|
||||||
#the sed command at the end removes whitespace
|
#the sed command at the end removes whitespace
|
||||||
local -r tar_md5=$(gsutil hash -h -m ${tar_location} 2>/dev/null | grep "Hash (md5):" | awk -F ':' '{print $2}' | sed 's/^[[:space:]]*//g')
|
local -r tar_md5=$(gsutil hash -h -m "${tar_location}" 2>/dev/null | grep "Hash (md5):" | awk -F ':' '{print $2}' | sed 's/^[[:space:]]*//g')
|
||||||
echo "${tar_md5}"
|
echo "${tar_md5}"
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -226,7 +226,7 @@ function copy-to-staging() {
|
|||||||
local -r gs_url=$2
|
local -r gs_url=$2
|
||||||
local -r tar=$3
|
local -r tar=$3
|
||||||
local -r hash=$4
|
local -r hash=$4
|
||||||
local -r basename_tar=$(basename ${tar})
|
local -r basename_tar=$(basename "${tar}")
|
||||||
|
|
||||||
#check whether this tar alread exists and has the same hash
|
#check whether this tar alread exists and has the same hash
|
||||||
#if it matches, then don't bother uploading it again
|
#if it matches, then don't bother uploading it again
|
||||||
@ -392,22 +392,22 @@ function upload-tars() {
|
|||||||
function detect-node-names() {
|
function detect-node-names() {
|
||||||
detect-project
|
detect-project
|
||||||
INSTANCE_GROUPS=()
|
INSTANCE_GROUPS=()
|
||||||
INSTANCE_GROUPS+=($(gcloud compute instance-groups managed list \
|
kube::util::read-array INSTANCE_GROUPS < <(gcloud compute instance-groups managed list \
|
||||||
--project "${PROJECT}" \
|
--project "${PROJECT}" \
|
||||||
--filter "name ~ '${NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \
|
--filter "name ~ '${NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \
|
||||||
--format='value(name)' || true))
|
--format='value(name)' || true)
|
||||||
WINDOWS_INSTANCE_GROUPS=()
|
WINDOWS_INSTANCE_GROUPS=()
|
||||||
WINDOWS_INSTANCE_GROUPS+=($(gcloud compute instance-groups managed list \
|
kube::util::read-array WINDOWS_INSTANCE_GROUPS < <(gcloud compute instance-groups managed list \
|
||||||
--project "${PROJECT}" \
|
--project "${PROJECT}" \
|
||||||
--filter "name ~ '${WINDOWS_NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \
|
--filter "name ~ '${WINDOWS_NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \
|
||||||
--format='value(name)' || true))
|
--format='value(name)' || true)
|
||||||
|
|
||||||
NODE_NAMES=()
|
NODE_NAMES=()
|
||||||
if [[ -n "${INSTANCE_GROUPS[@]:-}" ]]; then
|
if [[ -n "${INSTANCE_GROUPS[*]:-}" ]]; then
|
||||||
for group in "${INSTANCE_GROUPS[@]}"; do
|
for group in "${INSTANCE_GROUPS[@]}"; do
|
||||||
NODE_NAMES+=($(gcloud compute instance-groups managed list-instances \
|
kube::util::read-array NODE_NAMES < <(gcloud compute instance-groups managed list-instances \
|
||||||
"${group}" --zone "${ZONE}" --project "${PROJECT}" \
|
"${group}" --zone "${ZONE}" --project "${PROJECT}" \
|
||||||
--format='value(instance)'))
|
--format='value(instance)')
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
# Add heapster node name to the list too (if it exists).
|
# Add heapster node name to the list too (if it exists).
|
||||||
@ -416,11 +416,11 @@ function detect-node-names() {
|
|||||||
fi
|
fi
|
||||||
export NODE_NAMES
|
export NODE_NAMES
|
||||||
WINDOWS_NODE_NAMES=()
|
WINDOWS_NODE_NAMES=()
|
||||||
if [[ -n "${WINDOWS_INSTANCE_GROUPS[@]:-}" ]]; then
|
if [[ -n "${WINDOWS_INSTANCE_GROUPS[*]:-}" ]]; then
|
||||||
for group in "${WINDOWS_INSTANCE_GROUPS[@]}"; do
|
for group in "${WINDOWS_INSTANCE_GROUPS[@]}"; do
|
||||||
WINDOWS_NODE_NAMES+=($(gcloud compute instance-groups managed \
|
kube::util::read-array WINDOWS_NODE_NAMES < <(gcloud compute instance-groups managed \
|
||||||
list-instances "${group}" --zone "${ZONE}" --project "${PROJECT}" \
|
list-instances "${group}" --zone "${ZONE}" --project "${PROJECT}" \
|
||||||
--format='value(instance)'))
|
--format='value(instance)')
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
export WINDOWS_NODE_NAMES
|
export WINDOWS_NODE_NAMES
|
||||||
@ -494,7 +494,7 @@ function detect-master() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function load-or-gen-kube-bearertoken() {
|
function load-or-gen-kube-bearertoken() {
|
||||||
if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then
|
if [[ -n "${KUBE_CONTEXT:-}" ]]; then
|
||||||
get-kubeconfig-bearertoken
|
get-kubeconfig-bearertoken
|
||||||
fi
|
fi
|
||||||
if [[ -z "${KUBE_BEARER_TOKEN:-}" ]]; then
|
if [[ -z "${KUBE_BEARER_TOKEN:-}" ]]; then
|
||||||
@ -559,10 +559,10 @@ function tars_from_version() {
|
|||||||
# ZONE
|
# ZONE
|
||||||
function get-master-env() {
|
function get-master-env() {
|
||||||
# TODO(zmerlynn): Make this more reliable with retries.
|
# TODO(zmerlynn): Make this more reliable with retries.
|
||||||
gcloud compute --project ${PROJECT} ssh --zone ${ZONE} ${KUBE_MASTER} --command \
|
gcloud compute --project "${PROJECT}" ssh --zone "${ZONE}" "${KUBE_MASTER}" --command \
|
||||||
"curl --fail --silent -H 'Metadata-Flavor: Google' \
|
"curl --fail --silent -H 'Metadata-Flavor: Google' \
|
||||||
'http://metadata/computeMetadata/v1/instance/attributes/kube-env'" 2>/dev/null
|
'http://metadata/computeMetadata/v1/instance/attributes/kube-env'" 2>/dev/null
|
||||||
gcloud compute --project ${PROJECT} ssh --zone ${ZONE} ${KUBE_MASTER} --command \
|
gcloud compute --project "${PROJECT}" ssh --zone "${ZONE}" "${KUBE_MASTER}" --command \
|
||||||
"curl --fail --silent -H 'Metadata-Flavor: Google' \
|
"curl --fail --silent -H 'Metadata-Flavor: Google' \
|
||||||
'http://metadata/computeMetadata/v1/instance/attributes/kube-master-certs'" 2>/dev/null
|
'http://metadata/computeMetadata/v1/instance/attributes/kube-master-certs'" 2>/dev/null
|
||||||
}
|
}
|
||||||
@ -646,7 +646,7 @@ function build-linux-node-labels {
|
|||||||
if [[ -n "${MASTER_NODE_LABELS:-}" && "${node_type}" == "master" ]]; then
|
if [[ -n "${MASTER_NODE_LABELS:-}" && "${node_type}" == "master" ]]; then
|
||||||
node_labels="${node_labels:+${node_labels},}${MASTER_NODE_LABELS}"
|
node_labels="${node_labels:+${node_labels},}${MASTER_NODE_LABELS}"
|
||||||
fi
|
fi
|
||||||
echo $node_labels
|
echo "$node_labels"
|
||||||
}
|
}
|
||||||
|
|
||||||
function build-windows-node-labels {
|
function build-windows-node-labels {
|
||||||
@ -657,7 +657,7 @@ function build-windows-node-labels {
|
|||||||
if [[ -n "${WINDOWS_NON_MASTER_NODE_LABELS:-}" ]]; then
|
if [[ -n "${WINDOWS_NON_MASTER_NODE_LABELS:-}" ]]; then
|
||||||
node_labels="${node_labels:+${node_labels},}${WINDOWS_NON_MASTER_NODE_LABELS}"
|
node_labels="${node_labels:+${node_labels},}${WINDOWS_NON_MASTER_NODE_LABELS}"
|
||||||
fi
|
fi
|
||||||
echo $node_labels
|
echo "$node_labels"
|
||||||
}
|
}
|
||||||
|
|
||||||
# yaml-map-string-stringarray converts the encoded structure to yaml format, and echoes the result
|
# yaml-map-string-stringarray converts the encoded structure to yaml format, and echoes the result
|
||||||
@ -748,7 +748,7 @@ function construct-common-kubelet-flags {
|
|||||||
if [[ -n "${MAX_PODS_PER_NODE:-}" ]]; then
|
if [[ -n "${MAX_PODS_PER_NODE:-}" ]]; then
|
||||||
flags+=" --max-pods=${MAX_PODS_PER_NODE}"
|
flags+=" --max-pods=${MAX_PODS_PER_NODE}"
|
||||||
fi
|
fi
|
||||||
echo $flags
|
echo "$flags"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Sets KUBELET_ARGS with the kubelet flags for Linux nodes.
|
# Sets KUBELET_ARGS with the kubelet flags for Linux nodes.
|
||||||
@ -1509,9 +1509,12 @@ EOF
|
|||||||
# TODO(kubernetes/autoscaler#718): AUTOSCALER_ENV_VARS is a hotfix for cluster autoscaler,
|
# TODO(kubernetes/autoscaler#718): AUTOSCALER_ENV_VARS is a hotfix for cluster autoscaler,
|
||||||
# which reads the kube-env to determine the shape of a node and was broken by #60020.
|
# which reads the kube-env to determine the shape of a node and was broken by #60020.
|
||||||
# This should be removed as soon as a more reliable source of information is available!
|
# This should be removed as soon as a more reliable source of information is available!
|
||||||
local node_labels="$(build-linux-node-labels node)"
|
local node_labels
|
||||||
local node_taints="${NODE_TAINTS:-}"
|
local node_taints
|
||||||
local autoscaler_env_vars="node_labels=${node_labels};node_taints=${node_taints}"
|
local autoscaler_env_vars
|
||||||
|
node_labels="$(build-linux-node-labels node)"
|
||||||
|
node_taints="${NODE_TAINTS:-}"
|
||||||
|
autoscaler_env_vars="node_labels=${node_labels};node_taints=${node_taints}"
|
||||||
cat >>"$file" <<EOF
|
cat >>"$file" <<EOF
|
||||||
AUTOSCALER_ENV_VARS: $(yaml-quote "${autoscaler_env_vars}")
|
AUTOSCALER_ENV_VARS: $(yaml-quote "${autoscaler_env_vars}")
|
||||||
EOF
|
EOF
|
||||||
@ -1528,17 +1531,17 @@ MAX_PODS_PER_NODE: $(yaml-quote "${MAX_PODS_PER_NODE}")
|
|||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
if [[ "${PREPARE_KONNECTIVITY_SERVICE:-false}" == "true" ]]; then
|
if [[ "${PREPARE_KONNECTIVITY_SERVICE:-false}" == "true" ]]; then
|
||||||
cat >>$file <<EOF
|
cat >>"$file" <<EOF
|
||||||
PREPARE_KONNECTIVITY_SERVICE: $(yaml-quote "${PREPARE_KONNECTIVITY_SERVICE}")
|
PREPARE_KONNECTIVITY_SERVICE: $(yaml-quote "${PREPARE_KONNECTIVITY_SERVICE}")
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
if [[ "${EGRESS_VIA_KONNECTIVITY:-false}" == "true" ]]; then
|
if [[ "${EGRESS_VIA_KONNECTIVITY:-false}" == "true" ]]; then
|
||||||
cat >>$file <<EOF
|
cat >>"$file" <<EOF
|
||||||
EGRESS_VIA_KONNECTIVITY: $(yaml-quote "${EGRESS_VIA_KONNECTIVITY}")
|
EGRESS_VIA_KONNECTIVITY: $(yaml-quote "${EGRESS_VIA_KONNECTIVITY}")
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
if [[ "${RUN_KONNECTIVITY_PODS:-false}" == "true" ]]; then
|
if [[ "${RUN_KONNECTIVITY_PODS:-false}" == "true" ]]; then
|
||||||
cat >>$file <<EOF
|
cat >>"$file" <<EOF
|
||||||
RUN_KONNECTIVITY_PODS: $(yaml-quote "${RUN_KONNECTIVITY_PODS}")
|
RUN_KONNECTIVITY_PODS: $(yaml-quote "${RUN_KONNECTIVITY_PODS}")
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
@ -1634,7 +1637,7 @@ function create-certs {
|
|||||||
service_ip=${SERVICE_CLUSTER_IP_RANGE%/*}
|
service_ip=${SERVICE_CLUSTER_IP_RANGE%/*}
|
||||||
service_ip="${service_ip%.*}.$((${service_ip##*.} + 1))"
|
service_ip="${service_ip%.*}.$((${service_ip##*.} + 1))"
|
||||||
local sans=""
|
local sans=""
|
||||||
for extra in $@; do
|
for extra in "$@"; do
|
||||||
if [[ -n "${extra}" ]]; then
|
if [[ -n "${extra}" ]]; then
|
||||||
sans="${sans}IP:${extra},"
|
sans="${sans}IP:${extra},"
|
||||||
fi
|
fi
|
||||||
@ -1687,10 +1690,10 @@ function setup-easyrsa {
|
|||||||
mkdir easy-rsa-master/kubelet
|
mkdir easy-rsa-master/kubelet
|
||||||
cp -r easy-rsa-master/easyrsa3/* easy-rsa-master/kubelet
|
cp -r easy-rsa-master/easyrsa3/* easy-rsa-master/kubelet
|
||||||
mkdir easy-rsa-master/aggregator
|
mkdir easy-rsa-master/aggregator
|
||||||
cp -r easy-rsa-master/easyrsa3/* easy-rsa-master/aggregator) &>${cert_create_debug_output} || true
|
cp -r easy-rsa-master/easyrsa3/* easy-rsa-master/aggregator) &>"${cert_create_debug_output}" || true
|
||||||
CERT_DIR="${KUBE_TEMP}/easy-rsa-master/easyrsa3"
|
CERT_DIR="${KUBE_TEMP}/easy-rsa-master/easyrsa3"
|
||||||
AGGREGATOR_CERT_DIR="${KUBE_TEMP}/easy-rsa-master/aggregator"
|
AGGREGATOR_CERT_DIR="${KUBE_TEMP}/easy-rsa-master/aggregator"
|
||||||
if [ ! -x "${CERT_DIR}/easyrsa" -o ! -x "${AGGREGATOR_CERT_DIR}/easyrsa" ]; then
|
if [ ! -x "${CERT_DIR}/easyrsa" ] || [ ! -x "${AGGREGATOR_CERT_DIR}/easyrsa" ]; then
|
||||||
# TODO(roberthbailey,porridge): add better error handling here,
|
# TODO(roberthbailey,porridge): add better error handling here,
|
||||||
# see https://github.com/kubernetes/kubernetes/issues/55229
|
# see https://github.com/kubernetes/kubernetes/issues/55229
|
||||||
cat "${cert_create_debug_output}" >&2
|
cat "${cert_create_debug_output}" >&2
|
||||||
@ -1739,7 +1742,7 @@ function generate-certs {
|
|||||||
./easyrsa --dn-mode=org \
|
./easyrsa --dn-mode=org \
|
||||||
--req-cn=kubecfg --req-org=system:masters \
|
--req-cn=kubecfg --req-org=system:masters \
|
||||||
--req-c= --req-st= --req-city= --req-email= --req-ou= \
|
--req-c= --req-st= --req-city= --req-email= --req-ou= \
|
||||||
build-client-full kubecfg nopass) &>${cert_create_debug_output} || true
|
build-client-full kubecfg nopass) &>"${cert_create_debug_output}" || true
|
||||||
local output_file_missing=0
|
local output_file_missing=0
|
||||||
local output_file
|
local output_file
|
||||||
for output_file in \
|
for output_file in \
|
||||||
@ -1759,7 +1762,7 @@ function generate-certs {
|
|||||||
output_file_missing=1
|
output_file_missing=1
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
if (( $output_file_missing )); then
|
if [ $output_file_missing -ne 0 ]; then
|
||||||
# TODO(roberthbailey,porridge): add better error handling here,
|
# TODO(roberthbailey,porridge): add better error handling here,
|
||||||
# see https://github.com/kubernetes/kubernetes/issues/55229
|
# see https://github.com/kubernetes/kubernetes/issues/55229
|
||||||
cat "${cert_create_debug_output}" >&2
|
cat "${cert_create_debug_output}" >&2
|
||||||
@ -1804,7 +1807,7 @@ function generate-aggregator-certs {
|
|||||||
./easyrsa --dn-mode=org \
|
./easyrsa --dn-mode=org \
|
||||||
--req-cn=proxy-clientcfg --req-org=system:aggregator \
|
--req-cn=proxy-clientcfg --req-org=system:aggregator \
|
||||||
--req-c= --req-st= --req-city= --req-email= --req-ou= \
|
--req-c= --req-st= --req-city= --req-email= --req-ou= \
|
||||||
build-client-full proxy-clientcfg nopass) &>${cert_create_debug_output} || true
|
build-client-full proxy-clientcfg nopass) &>"${cert_create_debug_output}" || true
|
||||||
local output_file_missing=0
|
local output_file_missing=0
|
||||||
local output_file
|
local output_file
|
||||||
for output_file in \
|
for output_file in \
|
||||||
@ -1818,7 +1821,7 @@ function generate-aggregator-certs {
|
|||||||
output_file_missing=1
|
output_file_missing=1
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
if (( $output_file_missing )); then
|
if [ $output_file_missing -ne 0 ]; then
|
||||||
# TODO(roberthbailey,porridge): add better error handling here,
|
# TODO(roberthbailey,porridge): add better error handling here,
|
||||||
# see https://github.com/kubernetes/kubernetes/issues/55229
|
# see https://github.com/kubernetes/kubernetes/issues/55229
|
||||||
cat "${cert_create_debug_output}" >&2
|
cat "${cert_create_debug_output}" >&2
|
||||||
@ -1833,11 +1836,12 @@ function generate-aggregator-certs {
|
|||||||
# $1 master env (kube-env of master; result of calling get-master-env)
|
# $1 master env (kube-env of master; result of calling get-master-env)
|
||||||
# $2 env key to use
|
# $2 env key to use
|
||||||
function get-env-val() {
|
function get-env-val() {
|
||||||
local match=`(echo "${1}" | grep -E "^${2}:") || echo ""`
|
local match
|
||||||
if [[ -z ${match} ]]; then
|
match=$( (echo "${1}" | grep -E "^${2}:") || echo '')
|
||||||
|
if [[ -z "${match}" ]]; then
|
||||||
echo ""
|
echo ""
|
||||||
fi
|
fi
|
||||||
echo ${match} | cut -d : -f 2 | cut -d \' -f 2
|
echo "${match}" | cut -d : -f 2 | cut -d \' -f 2
|
||||||
}
|
}
|
||||||
|
|
||||||
# Load the master env by calling get-master-env, and extract important values
|
# Load the master env by calling get-master-env, and extract important values
|
||||||
@ -1875,7 +1879,7 @@ function parse-master-env() {
|
|||||||
# KUBE_PROMPT_FOR_UPDATE
|
# KUBE_PROMPT_FOR_UPDATE
|
||||||
function update-or-verify-gcloud() {
|
function update-or-verify-gcloud() {
|
||||||
local sudo_prefix=""
|
local sudo_prefix=""
|
||||||
if [ ! -w $(dirname `which gcloud`) ]; then
|
if [ ! -w "$(dirname "$(which gcloud)")" ]; then
|
||||||
sudo_prefix="sudo"
|
sudo_prefix="sudo"
|
||||||
fi
|
fi
|
||||||
# update and install components as needed
|
# update and install components as needed
|
||||||
@ -1886,20 +1890,21 @@ function update-or-verify-gcloud() {
|
|||||||
${sudo_prefix} gcloud ${gcloud_prompt:-} components install beta
|
${sudo_prefix} gcloud ${gcloud_prompt:-} components install beta
|
||||||
${sudo_prefix} gcloud ${gcloud_prompt:-} components update
|
${sudo_prefix} gcloud ${gcloud_prompt:-} components update
|
||||||
else
|
else
|
||||||
local version=$(gcloud version --format=json)
|
local version
|
||||||
python -c'
|
version=$(gcloud version --format=json)
|
||||||
|
python -c"
|
||||||
import json,sys
|
import json,sys
|
||||||
from distutils import version
|
from distutils import version
|
||||||
|
|
||||||
minVersion = version.LooseVersion("1.3.0")
|
minVersion = version.LooseVersion('1.3.0')
|
||||||
required = [ "alpha", "beta", "core" ]
|
required = [ 'alpha', 'beta', 'core' ]
|
||||||
data = json.loads(sys.argv[1])
|
data = json.loads(sys.argv[1])
|
||||||
rel = data.get("Google Cloud SDK")
|
rel = data.get('Google Cloud SDK')
|
||||||
if "CL @" in rel:
|
if 'CL @' in rel:
|
||||||
print("Using dev version of gcloud: %s" %rel)
|
print('Using dev version of gcloud: %s' %rel)
|
||||||
exit(0)
|
exit(0)
|
||||||
if rel != "HEAD" and version.LooseVersion(rel) < minVersion:
|
if rel != 'HEAD' and version.LooseVersion(rel) < minVersion:
|
||||||
print("gcloud version out of date ( < %s )" % minVersion)
|
print('gcloud version out of date ( < %s )' % minVersion)
|
||||||
exit(1)
|
exit(1)
|
||||||
missing = []
|
missing = []
|
||||||
for c in required:
|
for c in required:
|
||||||
@ -1907,10 +1912,10 @@ for c in required:
|
|||||||
missing += [c]
|
missing += [c]
|
||||||
if missing:
|
if missing:
|
||||||
for c in missing:
|
for c in missing:
|
||||||
print ("missing required gcloud component \"{0}\"".format(c))
|
print ('missing required gcloud component \"{0}\"'.format(c))
|
||||||
print ("Try running `gcloud components install {0}`".format(c))
|
print ('Try running \$(gcloud components install {0})'.format(c))
|
||||||
exit(1)
|
exit(1)
|
||||||
' """${version}"""
|
" "${version}"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2267,7 +2272,7 @@ function check-existing() {
|
|||||||
echo "${KUBE_RESOURCE_FOUND} found." >&2
|
echo "${KUBE_RESOURCE_FOUND} found." >&2
|
||||||
# Get user input only if running in terminal.
|
# Get user input only if running in terminal.
|
||||||
if [[ ${running_in_terminal} == "true" && ${KUBE_UP_AUTOMATIC_CLEANUP} == "false" ]]; then
|
if [[ ${running_in_terminal} == "true" && ${KUBE_UP_AUTOMATIC_CLEANUP} == "false" ]]; then
|
||||||
read -p "Would you like to shut down the old cluster (call kube-down)? [y/N] " run_kube_down
|
read -r -p "Would you like to shut down the old cluster (call kube-down)? [y/N] " run_kube_down
|
||||||
fi
|
fi
|
||||||
if [[ ${run_kube_down} == "y" || ${run_kube_down} == "Y" || ${KUBE_UP_AUTOMATIC_CLEANUP} == "true" ]]; then
|
if [[ ${run_kube_down} == "y" || ${run_kube_down} == "Y" || ${KUBE_UP_AUTOMATIC_CLEANUP} == "true" ]]; then
|
||||||
echo "... calling kube-down" >&2
|
echo "... calling kube-down" >&2
|
||||||
@ -2279,7 +2284,7 @@ function check-existing() {
|
|||||||
|
|
||||||
function check-network-mode() {
|
function check-network-mode() {
|
||||||
local mode
|
local mode
|
||||||
mode="$(gcloud compute networks list --filter="name=('${NETWORK}')" --project "${NETWORK_PROJECT}" --format='value(x_gcloud_subnet_mode)' || true)"
|
mode=$(gcloud compute networks list --filter="name=('${NETWORK}')" --project "${NETWORK_PROJECT}" --format='value(x_gcloud_subnet_mode)' || true)
|
||||||
# The deprecated field uses lower case. Convert to upper case for consistency.
|
# The deprecated field uses lower case. Convert to upper case for consistency.
|
||||||
echo "$mode" | tr '[:lower:]' '[:upper:]'
|
echo "$mode" | tr '[:lower:]' '[:upper:]'
|
||||||
}
|
}
|
||||||
@ -2374,22 +2379,22 @@ function create-subnetworks() {
|
|||||||
local subnet
|
local subnet
|
||||||
subnet=$(gcloud compute networks subnets describe \
|
subnet=$(gcloud compute networks subnets describe \
|
||||||
--project "${NETWORK_PROJECT}" \
|
--project "${NETWORK_PROJECT}" \
|
||||||
--region ${REGION} \
|
--region "${REGION}" \
|
||||||
${IP_ALIAS_SUBNETWORK} 2>/dev/null || true)
|
"${IP_ALIAS_SUBNETWORK}" 2>/dev/null || true)
|
||||||
if [[ -z ${subnet} ]]; then
|
if [[ -z "${subnet}" ]]; then
|
||||||
echo "Creating subnet ${NETWORK}:${IP_ALIAS_SUBNETWORK}"
|
echo "Creating subnet ${NETWORK}:${IP_ALIAS_SUBNETWORK}"
|
||||||
gcloud compute networks subnets create \
|
gcloud compute networks subnets create \
|
||||||
${IP_ALIAS_SUBNETWORK} \
|
"${IP_ALIAS_SUBNETWORK}" \
|
||||||
--description "Automatically generated subnet for ${INSTANCE_PREFIX} cluster. This will be removed on cluster teardown." \
|
--description "Automatically generated subnet for ${INSTANCE_PREFIX} cluster. This will be removed on cluster teardown." \
|
||||||
--project "${NETWORK_PROJECT}" \
|
--project "${NETWORK_PROJECT}" \
|
||||||
--network ${NETWORK} \
|
--network "${NETWORK}" \
|
||||||
--region ${REGION} \
|
--region "${REGION}" \
|
||||||
--range ${NODE_IP_RANGE} \
|
--range "${NODE_IP_RANGE}" \
|
||||||
--secondary-range "pods-default=${CLUSTER_IP_RANGE}" \
|
--secondary-range "pods-default=${CLUSTER_IP_RANGE}" \
|
||||||
--secondary-range "services-default=${SERVICE_CLUSTER_IP_RANGE}"
|
--secondary-range "services-default=${SERVICE_CLUSTER_IP_RANGE}"
|
||||||
echo "Created subnetwork ${IP_ALIAS_SUBNETWORK}"
|
echo "Created subnetwork ${IP_ALIAS_SUBNETWORK}"
|
||||||
else
|
else
|
||||||
if ! echo ${subnet} | grep --quiet secondaryIpRanges; then
|
if ! echo "${subnet}" | grep --quiet secondaryIpRanges; then
|
||||||
echo "${color_red}Subnet ${IP_ALIAS_SUBNETWORK} does not have a secondary range${color_norm}"
|
echo "${color_red}Subnet ${IP_ALIAS_SUBNETWORK} does not have a secondary range${color_norm}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
@ -2418,9 +2423,9 @@ function detect-subnetworks() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
SUBNETWORK=$(gcloud compute networks subnets list \
|
SUBNETWORK=$(gcloud compute networks subnets list \
|
||||||
--network=${NETWORK} \
|
--network="${NETWORK}" \
|
||||||
--regions=${REGION} \
|
--regions="${REGION}" \
|
||||||
--project=${NETWORK_PROJECT} \
|
--project="${NETWORK_PROJECT}" \
|
||||||
--limit=1 \
|
--limit=1 \
|
||||||
--format='value(name)' 2>/dev/null)
|
--format='value(name)' 2>/dev/null)
|
||||||
|
|
||||||
@ -2439,17 +2444,17 @@ function detect-subnetworks() {
|
|||||||
# NETWORK
|
# NETWORK
|
||||||
function create-cloud-nat-router() {
|
function create-cloud-nat-router() {
|
||||||
if [[ ${GCE_PRIVATE_CLUSTER:-} == "true" ]]; then
|
if [[ ${GCE_PRIVATE_CLUSTER:-} == "true" ]]; then
|
||||||
if gcloud compute routers describe "$NETWORK-nat-router" --project $NETWORK_PROJECT --region $REGION &>/dev/null; then
|
if gcloud compute routers describe "$NETWORK-nat-router" --project "$NETWORK_PROJECT" --region "$REGION" &>/dev/null; then
|
||||||
echo "Cloud nat already exists"
|
echo "Cloud nat already exists"
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
gcloud compute routers create "$NETWORK-nat-router" \
|
gcloud compute routers create "$NETWORK-nat-router" \
|
||||||
--project $NETWORK_PROJECT \
|
--project "$NETWORK_PROJECT" \
|
||||||
--region $REGION \
|
--region "$REGION" \
|
||||||
--network $NETWORK
|
--network "$NETWORK"
|
||||||
gcloud compute routers nats create "$NETWORK-nat-config" \
|
gcloud compute routers nats create "$NETWORK-nat-config" \
|
||||||
--project $NETWORK_PROJECT \
|
--project "$NETWORK_PROJECT" \
|
||||||
--router-region $REGION \
|
--router-region "$REGION" \
|
||||||
--router "$NETWORK-nat-router" \
|
--router "$NETWORK-nat-router" \
|
||||||
--nat-primary-subnet-ip-ranges \
|
--nat-primary-subnet-ip-ranges \
|
||||||
--auto-allocate-nat-external-ips \
|
--auto-allocate-nat-external-ips \
|
||||||
@ -2523,12 +2528,12 @@ function delete-subnetworks() {
|
|||||||
echo "Removing auto-created subnet ${NETWORK}:${IP_ALIAS_SUBNETWORK}"
|
echo "Removing auto-created subnet ${NETWORK}:${IP_ALIAS_SUBNETWORK}"
|
||||||
if [[ -n $(gcloud compute networks subnets describe \
|
if [[ -n $(gcloud compute networks subnets describe \
|
||||||
--project "${NETWORK_PROJECT}" \
|
--project "${NETWORK_PROJECT}" \
|
||||||
--region ${REGION} \
|
--region "${REGION}" \
|
||||||
${IP_ALIAS_SUBNETWORK} 2>/dev/null) ]]; then
|
"${IP_ALIAS_SUBNETWORK}" 2>/dev/null) ]]; then
|
||||||
gcloud --quiet compute networks subnets delete \
|
gcloud --quiet compute networks subnets delete \
|
||||||
--project "${NETWORK_PROJECT}" \
|
--project "${NETWORK_PROJECT}" \
|
||||||
--region ${REGION} \
|
--region "${REGION}" \
|
||||||
${IP_ALIAS_SUBNETWORK}
|
"${IP_ALIAS_SUBNETWORK}"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
@ -2669,7 +2674,7 @@ function create-master() {
|
|||||||
|
|
||||||
MASTER_INTERNAL_IP=""
|
MASTER_INTERNAL_IP=""
|
||||||
if [[ ${GCE_PRIVATE_CLUSTER:-} == "true" ]]; then
|
if [[ ${GCE_PRIVATE_CLUSTER:-} == "true" ]]; then
|
||||||
gcloud compute addresses create "${MASTER_NAME}-internal-ip" --project "${PROJECT}" --region $REGION --subnet $SUBNETWORK
|
gcloud compute addresses create "${MASTER_NAME}-internal-ip" --project "${PROJECT}" --region "$REGION" --subnet "$SUBNETWORK"
|
||||||
MASTER_INTERNAL_IP=$(gcloud compute addresses describe "${MASTER_NAME}-internal-ip" --project "${PROJECT}" --region "${REGION}" -q --format='value(address)')
|
MASTER_INTERNAL_IP=$(gcloud compute addresses describe "${MASTER_NAME}-internal-ip" --project "${PROJECT}" --region "${REGION}" -q --format='value(address)')
|
||||||
echo "Master internal ip is: $MASTER_INTERNAL_IP"
|
echo "Master internal ip is: $MASTER_INTERNAL_IP"
|
||||||
KUBERNETES_MASTER_NAME="${MASTER_INTERNAL_IP}"
|
KUBERNETES_MASTER_NAME="${MASTER_INTERNAL_IP}"
|
||||||
@ -2677,8 +2682,8 @@ function create-master() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
create-certs "${MASTER_RESERVED_IP}" "${MASTER_INTERNAL_IP}"
|
create-certs "${MASTER_RESERVED_IP}" "${MASTER_INTERNAL_IP}"
|
||||||
create-etcd-certs ${MASTER_NAME}
|
create-etcd-certs "${MASTER_NAME}"
|
||||||
create-etcd-apiserver-certs "etcd-${MASTER_NAME}" ${MASTER_NAME}
|
create-etcd-apiserver-certs "etcd-${MASTER_NAME}" "${MASTER_NAME}"
|
||||||
|
|
||||||
if [[ "$(get-num-nodes)" -ge "50" ]]; then
|
if [[ "$(get-num-nodes)" -ge "50" ]]; then
|
||||||
# We block on master creation for large clusters to avoid doing too much
|
# We block on master creation for large clusters to avoid doing too much
|
||||||
@ -2790,7 +2795,7 @@ function attach-external-ip() {
|
|||||||
gcloud compute instances delete-access-config "${NAME}" \
|
gcloud compute instances delete-access-config "${NAME}" \
|
||||||
--project "${PROJECT}" --zone "${ZONE}" \
|
--project "${PROJECT}" --zone "${ZONE}" \
|
||||||
--access-config-name "${ACCESS_CONFIG_NAME}"
|
--access-config-name "${ACCESS_CONFIG_NAME}"
|
||||||
if [[ -z ${IP_ADDR} ]]; then
|
if [[ -z "${IP_ADDR}" ]]; then
|
||||||
gcloud compute instances add-access-config "${NAME}" \
|
gcloud compute instances add-access-config "${NAME}" \
|
||||||
--project "${PROJECT}" --zone "${ZONE}" \
|
--project "${PROJECT}" --zone "${ZONE}" \
|
||||||
--access-config-name "${ACCESS_CONFIG_NAME}"
|
--access-config-name "${ACCESS_CONFIG_NAME}"
|
||||||
@ -2812,8 +2817,8 @@ function attach-external-ip() {
|
|||||||
# REGION
|
# REGION
|
||||||
function create-loadbalancer() {
|
function create-loadbalancer() {
|
||||||
# Step 0: Return early if LB is already configured.
|
# Step 0: Return early if LB is already configured.
|
||||||
if gcloud compute forwarding-rules describe ${MASTER_NAME} \
|
if gcloud compute forwarding-rules describe "${MASTER_NAME}" \
|
||||||
--project "${PROJECT}" --region ${REGION} > /dev/null 2>&1; then
|
--project "${PROJECT}" --region "${REGION}" > /dev/null 2>&1; then
|
||||||
echo "Load balancer already exists"
|
echo "Load balancer already exists"
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
@ -2836,13 +2841,13 @@ function create-loadbalancer() {
|
|||||||
|
|
||||||
# Step 3: Create forwarding rule.
|
# Step 3: Create forwarding rule.
|
||||||
# TODO: This step can take up to 20 min. We need to speed this up...
|
# TODO: This step can take up to 20 min. We need to speed this up...
|
||||||
gcloud compute forwarding-rules create ${MASTER_NAME} \
|
gcloud compute forwarding-rules create "${MASTER_NAME}" \
|
||||||
--project "${PROJECT}" --region ${REGION} \
|
--project "${PROJECT}" --region "${REGION}" \
|
||||||
--target-pool ${MASTER_NAME} --address=${KUBE_MASTER_IP} --ports=443
|
--target-pool "${MASTER_NAME}" --address="${KUBE_MASTER_IP}" --ports=443
|
||||||
|
|
||||||
echo -n "Waiting for the load balancer configuration to propagate..."
|
echo -n "Waiting for the load balancer configuration to propagate..."
|
||||||
local counter=0
|
local counter=0
|
||||||
until $(curl -k -m1 https://${KUBE_MASTER_IP} &> /dev/null); do
|
until curl -k -m1 "https://${KUBE_MASTER_IP}" &> /dev/null; do
|
||||||
counter=$((counter+1))
|
counter=$((counter+1))
|
||||||
echo -n .
|
echo -n .
|
||||||
if [[ ${counter} -ge 1800 ]]; then
|
if [[ ${counter} -ge 1800 ]]; then
|
||||||
@ -2904,7 +2909,7 @@ function detach-internal-master-ip() {
|
|||||||
# * REGION
|
# * REGION
|
||||||
function create-internal-loadbalancer() {
|
function create-internal-loadbalancer() {
|
||||||
if gcloud compute forwarding-rules describe "${MASTER_NAME}-internal" \
|
if gcloud compute forwarding-rules describe "${MASTER_NAME}-internal" \
|
||||||
--project "${PROJECT}" --region ${REGION} > /dev/null 2>&1; then
|
--project "${PROJECT}" --region "${REGION}" > /dev/null 2>&1; then
|
||||||
echo "Load balancer already exists"
|
echo "Load balancer already exists"
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
@ -3033,11 +3038,11 @@ function create-nodes-template() {
|
|||||||
# create-[linux,windows]-nodes() as well as get-template()!
|
# create-[linux,windows]-nodes() as well as get-template()!
|
||||||
local linux_template_name="${NODE_INSTANCE_PREFIX}-template"
|
local linux_template_name="${NODE_INSTANCE_PREFIX}-template"
|
||||||
local windows_template_name="${WINDOWS_NODE_INSTANCE_PREFIX}-template"
|
local windows_template_name="${WINDOWS_NODE_INSTANCE_PREFIX}-template"
|
||||||
create-linux-node-instance-template $linux_template_name
|
create-linux-node-instance-template "$linux_template_name"
|
||||||
create-windows-node-instance-template $windows_template_name "${scope_flags[*]}"
|
create-windows-node-instance-template "$windows_template_name" "${scope_flags[*]}"
|
||||||
if [[ -n "${ADDITIONAL_MACHINE_TYPE:-}" ]]; then
|
if [[ -n "${ADDITIONAL_MACHINE_TYPE:-}" ]]; then
|
||||||
local linux_extra_template_name="${NODE_INSTANCE_PREFIX}-extra-template"
|
local linux_extra_template_name="${NODE_INSTANCE_PREFIX}-extra-template"
|
||||||
create-linux-node-instance-template $linux_extra_template_name "${ADDITIONAL_MACHINE_TYPE}"
|
create-linux-node-instance-template "$linux_extra_template_name" "${ADDITIONAL_MACHINE_TYPE}"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3070,7 +3075,7 @@ function create-linux-nodes() {
|
|||||||
local extra_template_name="${NODE_INSTANCE_PREFIX}-extra-template"
|
local extra_template_name="${NODE_INSTANCE_PREFIX}-extra-template"
|
||||||
|
|
||||||
local nodes="${NUM_NODES}"
|
local nodes="${NUM_NODES}"
|
||||||
if [[ ! -z "${HEAPSTER_MACHINE_TYPE:-}" ]]; then
|
if [[ -n "${HEAPSTER_MACHINE_TYPE:-}" ]]; then
|
||||||
echo "Creating a special node for heapster with machine-type ${HEAPSTER_MACHINE_TYPE}"
|
echo "Creating a special node for heapster with machine-type ${HEAPSTER_MACHINE_TYPE}"
|
||||||
create-heapster-node
|
create-heapster-node
|
||||||
nodes=$(( nodes - 1 ))
|
nodes=$(( nodes - 1 ))
|
||||||
@ -3355,7 +3360,7 @@ function check-cluster() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
echo
|
echo
|
||||||
echo -e "${color_green}Kubernetes cluster is running. The master is running at:"
|
echo -e "${color_green:-}Kubernetes cluster is running. The master is running at:"
|
||||||
echo
|
echo
|
||||||
echo -e "${color_yellow} https://${KUBE_MASTER_IP}"
|
echo -e "${color_yellow} https://${KUBE_MASTER_IP}"
|
||||||
echo
|
echo
|
||||||
@ -3674,10 +3679,10 @@ function kube-down() {
|
|||||||
function get-replica-name() {
|
function get-replica-name() {
|
||||||
# Ignore if gcloud compute fails and successfully echo any outcome
|
# Ignore if gcloud compute fails and successfully echo any outcome
|
||||||
# shellcheck disable=SC2005
|
# shellcheck disable=SC2005
|
||||||
echo $(gcloud compute instances list \
|
echo "$(gcloud compute instances list \
|
||||||
--project "${PROJECT}" \
|
--project "${PROJECT}" \
|
||||||
--filter="name ~ '$(get-replica-name-regexp)' AND zone:(${ZONE})" \
|
--filter="name ~ '$(get-replica-name-regexp)' AND zone:(${ZONE})" \
|
||||||
--format "value(name)" | head -n1)
|
--format "value(name)" | head -n1)"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Prints comma-separated names of all of the master replicas in all zones.
|
# Prints comma-separated names of all of the master replicas in all zones.
|
||||||
@ -3690,10 +3695,10 @@ function get-replica-name() {
|
|||||||
function get-all-replica-names() {
|
function get-all-replica-names() {
|
||||||
# Ignore if gcloud compute fails and successfully echo any outcome
|
# Ignore if gcloud compute fails and successfully echo any outcome
|
||||||
# shellcheck disable=SC2005
|
# shellcheck disable=SC2005
|
||||||
echo $(gcloud compute instances list \
|
echo "$(gcloud compute instances list \
|
||||||
--project "${PROJECT}" \
|
--project "${PROJECT}" \
|
||||||
--filter="name ~ '$(get-replica-name-regexp)'" \
|
--filter="name ~ '$(get-replica-name-regexp)'" \
|
||||||
--format "value(name)" | tr "\n" "," | sed 's/,$//')
|
--format "value(name)" | tr "\n" "," | sed 's/,$//')"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Prints the number of all of the master replicas in all zones.
|
# Prints the number of all of the master replicas in all zones.
|
||||||
@ -3882,7 +3887,7 @@ function test-setup() {
|
|||||||
# wait some additional time (20 minutes altogether).
|
# wait some additional time (20 minutes altogether).
|
||||||
while ! gcloud compute firewall-rules describe --project "${NETWORK_PROJECT}" "${NODE_TAG}-http-alt" 2> /dev/null; do
|
while ! gcloud compute firewall-rules describe --project "${NETWORK_PROJECT}" "${NODE_TAG}-http-alt" 2> /dev/null; do
|
||||||
if [[ $((start + 1200)) -lt $(date +%s) ]]; then
|
if [[ $((start + 1200)) -lt $(date +%s) ]]; then
|
||||||
echo -e "${color_red}Failed to create firewall ${NODE_TAG}-http-alt in ${NETWORK_PROJECT}" >&2
|
echo -e "${color_red:-}Failed to create firewall ${NODE_TAG}-http-alt in ${NETWORK_PROJECT}" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
sleep 5
|
sleep 5
|
||||||
@ -3890,7 +3895,7 @@ function test-setup() {
|
|||||||
|
|
||||||
# Open up the NodePort range
|
# Open up the NodePort range
|
||||||
# TODO(justinsb): Move to main setup, if we decide whether we want to do this by default.
|
# TODO(justinsb): Move to main setup, if we decide whether we want to do this by default.
|
||||||
start=`date +%s`
|
start=$(date +%s)
|
||||||
gcloud compute firewall-rules create \
|
gcloud compute firewall-rules create \
|
||||||
--project "${NETWORK_PROJECT}" \
|
--project "${NETWORK_PROJECT}" \
|
||||||
--target-tags "${NODE_TAG}" \
|
--target-tags "${NODE_TAG}" \
|
||||||
@ -3934,7 +3939,7 @@ function ssh-to-node() {
|
|||||||
local cmd="$2"
|
local cmd="$2"
|
||||||
# Loop until we can successfully ssh into the box
|
# Loop until we can successfully ssh into the box
|
||||||
for (( i=0; i<5; i++)); do
|
for (( i=0; i<5; i++)); do
|
||||||
if gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --ssh-flag="-o ConnectTimeout=30" --project "${PROJECT}" --zone="${ZONE}" "${node}" --command "echo test > /dev/null"; then
|
if gcloud compute ssh --ssh-flag='-o LogLevel=quiet' --ssh-flag='-o ConnectTimeout=30' --project "${PROJECT}" --zone="${ZONE}" "${node}" --command 'echo test > /dev/null'; then
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
sleep 5
|
sleep 5
|
||||||
|
@ -1,2 +1 @@
|
|||||||
./cluster/gce/gci/configure.sh
|
./cluster/gce/gci/configure.sh
|
||||||
./cluster/gce/util.sh
|
|
||||||
|
Loading…
Reference in New Issue
Block a user