Fix shellcheck warnings/errors in cluster/gce/upgrade.sh (#88576)

* Fix shellcheck warnings/errors in cluster/gce/upgrade.sh

* Update cluster/gce/upgrade.sh

Co-Authored-By: Slava Semushin <slava.semushin@gmail.com>

Co-authored-by: Slava Semushin <slava.semushin@gmail.com>
This commit is contained in:
Jie Shen 2020-05-01 12:26:06 +08:00 committed by GitHub
parent f372c5417b
commit c71a25e912
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 50 additions and 38 deletions

View File

@ -165,7 +165,7 @@ function prepare-upgrade() {
# ZONE # ZONE
function get-node-env() { function get-node-env() {
# TODO(zmerlynn): Make this more reliable with retries. # TODO(zmerlynn): Make this more reliable with retries.
gcloud compute --project ${PROJECT} ssh --zone ${ZONE} ${NODE_NAMES[0]} --command \ gcloud compute --project "${PROJECT}" ssh --zone "${ZONE}" "${NODE_NAMES[0]}" --command \
"curl --fail --silent -H 'Metadata-Flavor: Google' \ "curl --fail --silent -H 'Metadata-Flavor: Google' \
'http://metadata/computeMetadata/v1/instance/attributes/kube-env'" 2>/dev/null 'http://metadata/computeMetadata/v1/instance/attributes/kube-env'" 2>/dev/null
} }
@ -241,24 +241,31 @@ function prepare-node-upgrade() {
echo "== Preparing node upgrade (to ${KUBE_VERSION}). ==" >&2 echo "== Preparing node upgrade (to ${KUBE_VERSION}). ==" >&2
setup-base-image setup-base-image
SANITIZED_VERSION=$(echo ${KUBE_VERSION} | sed 's/[\.\+]/-/g') SANITIZED_VERSION="${KUBE_VERSION//[\.\+]/-}"
# TODO(zmerlynn): Refactor setting scope flags. # TODO(zmerlynn): Refactor setting scope flags.
local scope_flags= local scope_flags=
if [ -n "${NODE_SCOPES}" ]; then if [ -n "${NODE_SCOPES}" ]; then
scope_flags="--scopes ${NODE_SCOPES}" scope_flags="--scopes ${NODE_SCOPES}"
else else
# shellcheck disable=SC2034 # 'scope_flags' is used by upstream
scope_flags="--no-scopes" scope_flags="--no-scopes"
fi fi
# Get required node env vars from exiting template. # Get required node env vars from exiting template.
local node_env=$(get-node-env) local node_env
node_env=$(get-node-env)
KUBE_PROXY_TOKEN=$(get-env-val "${node_env}" "KUBE_PROXY_TOKEN") KUBE_PROXY_TOKEN=$(get-env-val "${node_env}" "KUBE_PROXY_TOKEN")
export KUBE_PROXY_TOKEN
NODE_PROBLEM_DETECTOR_TOKEN=$(get-env-val "${node_env}" "NODE_PROBLEM_DETECTOR_TOKEN") NODE_PROBLEM_DETECTOR_TOKEN=$(get-env-val "${node_env}" "NODE_PROBLEM_DETECTOR_TOKEN")
CA_CERT_BASE64=$(get-env-val "${node_env}" "CA_CERT") CA_CERT_BASE64=$(get-env-val "${node_env}" "CA_CERT")
export CA_CERT_BASE64
EXTRA_DOCKER_OPTS=$(get-env-val "${node_env}" "EXTRA_DOCKER_OPTS") EXTRA_DOCKER_OPTS=$(get-env-val "${node_env}" "EXTRA_DOCKER_OPTS")
export EXTRA_DOCKER_OPTS
KUBELET_CERT_BASE64=$(get-env-val "${node_env}" "KUBELET_CERT") KUBELET_CERT_BASE64=$(get-env-val "${node_env}" "KUBELET_CERT")
export KUBELET_CERT_BASE64
KUBELET_KEY_BASE64=$(get-env-val "${node_env}" "KUBELET_KEY") KUBELET_KEY_BASE64=$(get-env-val "${node_env}" "KUBELET_KEY")
export KUBELET_KEY_BASE64
upgrade-node-env upgrade-node-env
@ -268,7 +275,8 @@ function prepare-node-upgrade() {
# TODO(zmerlynn): Get configure-vm script from ${version}. (Must plumb this # TODO(zmerlynn): Get configure-vm script from ${version}. (Must plumb this
# through all create-linux-node-instance-template implementations). # through all create-linux-node-instance-template implementations).
local template_name=$(get-template-name-from-version ${SANITIZED_VERSION} ${NODE_INSTANCE_PREFIX}) local template_name
template_name=$(get-template-name-from-version "${SANITIZED_VERSION}" "${NODE_INSTANCE_PREFIX}")
create-linux-node-instance-template "${template_name}" create-linux-node-instance-template "${template_name}"
# The following is echo'd so that callers can get the template name. # The following is echo'd so that callers can get the template name.
echo "Instance template name: ${template_name}" echo "Instance template name: ${template_name}"
@ -281,7 +289,8 @@ function upgrade-node-env() {
# the original node. # the original node.
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" && "${NODE_PROBLEM_DETECTOR_TOKEN:-}" == "" ]]; then if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" && "${NODE_PROBLEM_DETECTOR_TOKEN:-}" == "" ]]; then
detect-master detect-master
local master_env=$(get-master-env) local master_env
master_env=$(get-master-env)
NODE_PROBLEM_DETECTOR_TOKEN=$(get-env-val "${master_env}" "NODE_PROBLEM_DETECTOR_TOKEN") NODE_PROBLEM_DETECTOR_TOKEN=$(get-env-val "${master_env}" "NODE_PROBLEM_DETECTOR_TOKEN")
fi fi
} }
@ -293,7 +302,8 @@ function upgrade-node-env() {
function do-single-node-upgrade() { function do-single-node-upgrade() {
local -r instance="$1" local -r instance="$1"
local kubectl_rc local kubectl_rc
local boot_id=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output=jsonpath='{.status.nodeInfo.bootID}' 2>&1) && kubectl_rc=$? || kubectl_rc=$? local boot_id
boot_id=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output=jsonpath='{.status.nodeInfo.bootID}' 2>&1) && kubectl_rc=$? || kubectl_rc=$?
if [[ "${kubectl_rc}" != 0 ]]; then if [[ "${kubectl_rc}" != 0 ]]; then
echo "== FAILED to get bootID ${instance} ==" echo "== FAILED to get bootID ${instance} =="
echo "${boot_id}" echo "${boot_id}"
@ -313,7 +323,8 @@ function do-single-node-upgrade() {
# Recreate instance # Recreate instance
echo "== Recreating instance ${instance}. ==" >&2 echo "== Recreating instance ${instance}. ==" >&2
local recreate_rc local recreate_rc
local recreate=$(gcloud compute instance-groups managed recreate-instances "${group}" \ local recreate
recreate=$(gcloud compute instance-groups managed recreate-instances "${group}" \
--project="${PROJECT}" \ --project="${PROJECT}" \
--zone="${ZONE}" \ --zone="${ZONE}" \
--instances="${instance}" 2>&1) && recreate_rc=$? || recreate_rc=$? --instances="${instance}" 2>&1) && recreate_rc=$? || recreate_rc=$?
@ -329,7 +340,8 @@ function do-single-node-upgrade() {
# it is a best effort approximation. # it is a best effort approximation.
echo "== Waiting for new node to be added to k8s. ==" >&2 echo "== Waiting for new node to be added to k8s. ==" >&2
while true; do while true; do
local new_boot_id=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output=jsonpath='{.status.nodeInfo.bootID}' 2>&1) && kubectl_rc=$? || kubectl_rc=$? local new_boot_id
new_boot_id=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output=jsonpath='{.status.nodeInfo.bootID}' 2>&1) && kubectl_rc=$? || kubectl_rc=$?
if [[ "${kubectl_rc}" != 0 ]]; then if [[ "${kubectl_rc}" != 0 ]]; then
echo "== FAILED to get node ${instance} ==" echo "== FAILED to get node ${instance} =="
echo "${boot_id}" echo "${boot_id}"
@ -346,7 +358,8 @@ function do-single-node-upgrade() {
# Wait for the node to have Ready=True. # Wait for the node to have Ready=True.
echo "== Waiting for ${instance} to become ready. ==" >&2 echo "== Waiting for ${instance} to become ready. ==" >&2
while true; do while true; do
local ready=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output='jsonpath={.status.conditions[?(@.type == "Ready")].status}') local ready
ready=$("${KUBE_ROOT}/cluster/kubectl.sh" get node "${instance}" --output='jsonpath={.status.conditions[?(@.type == "Ready")].status}')
if [[ "${ready}" != 'True' ]]; then if [[ "${ready}" != 'True' ]]; then
echo "Node ${instance} is still not ready: Ready=${ready}" echo "Node ${instance} is still not ready: Ready=${ready}"
else else
@ -374,14 +387,14 @@ function do-node-upgrade() {
# Do the actual upgrade. # Do the actual upgrade.
# NOTE(zmerlynn): If you are changing this gcloud command, update # NOTE(zmerlynn): If you are changing this gcloud command, update
# test/e2e/cluster_upgrade.go to match this EXACTLY. # test/e2e/cluster_upgrade.go to match this EXACTLY.
local template_name=$(get-template-name-from-version ${SANITIZED_VERSION} ${NODE_INSTANCE_PREFIX}) local template_name
template_name=$(get-template-name-from-version "${SANITIZED_VERSION}" "${NODE_INSTANCE_PREFIX}")
local old_templates=() local old_templates=()
local updates=() for group in "${INSTANCE_GROUPS[@]}"; do
for group in ${INSTANCE_GROUPS[@]}; do while IFS='' read -r line; do old_templates+=("$line"); done < <(gcloud compute instance-groups managed list \
old_templates+=($(gcloud compute instance-groups managed list \
--project="${PROJECT}" \ --project="${PROJECT}" \
--filter="name ~ '${group}' AND zone:(${ZONE})" \ --filter="name ~ '${group}' AND zone:(${ZONE})" \
--format='value(instanceTemplate)' || true)) --format='value(instanceTemplate)' || true)
set_instance_template_out=$(gcloud compute instance-groups managed set-instance-template "${group}" \ set_instance_template_out=$(gcloud compute instance-groups managed set-instance-template "${group}" \
--template="${template_name}" \ --template="${template_name}" \
--project="${PROJECT}" \ --project="${PROJECT}" \
@ -392,20 +405,20 @@ function do-node-upgrade() {
return ${set_instance_template_rc} return ${set_instance_template_rc}
fi fi
instances=() instances=()
instances+=($(gcloud compute instance-groups managed list-instances "${group}" \ while IFS='' read -r line; do instances+=("$line"); done < <(gcloud compute instance-groups managed list-instances "${group}" \
--format='value(instance)' \ --format='value(instance)' \
--project="${PROJECT}" \ --project="${PROJECT}" \
--zone="${ZONE}" 2>&1)) && list_instances_rc=$? || list_instances_rc=$? --zone="${ZONE}" 2>&1) && list_instances_rc=$? || list_instances_rc=$?
if [[ "${list_instances_rc}" != 0 ]]; then if [[ "${list_instances_rc}" != 0 ]]; then
echo "== FAILED to list instances in group ${group} ==" echo "== FAILED to list instances in group ${group} =="
echo "${instances}" echo "${instances[@]}"
return ${list_instances_rc} return ${list_instances_rc}
fi fi
process_count_left=${node_upgrade_parallelism} process_count_left=${node_upgrade_parallelism}
pids=() pids=()
ret_code_sum=0 # Should stay 0 in the loop iff all parallel node upgrades succeed. ret_code_sum=0 # Should stay 0 in the loop iff all parallel node upgrades succeed.
for instance in ${instances[@]}; do for instance in "${instances[@]}"; do
do-single-node-upgrade "${instance}" & pids+=("$!") do-single-node-upgrade "${instance}" & pids+=("$!")
# We don't want to run more than ${node_upgrade_parallelism} upgrades at a time, # We don't want to run more than ${node_upgrade_parallelism} upgrades at a time,
@ -415,7 +428,7 @@ function do-node-upgrade() {
if [[ process_count_left -eq 0 || "${instance}" == "${instances[-1]}" ]]; then if [[ process_count_left -eq 0 || "${instance}" == "${instances[-1]}" ]]; then
# Wait for each of the parallel node upgrades to finish. # Wait for each of the parallel node upgrades to finish.
for pid in "${pids[@]}"; do for pid in "${pids[@]}"; do
wait $pid wait "$pid"
ret_code_sum=$(( ret_code_sum + $? )) ret_code_sum=$(( ret_code_sum + $? ))
done done
# Return even if at least one of the node upgrades failed. # Return even if at least one of the node upgrades failed.
@ -430,7 +443,7 @@ function do-node-upgrade() {
# Remove the old templates. # Remove the old templates.
echo "== Deleting old templates in ${PROJECT}. ==" >&2 echo "== Deleting old templates in ${PROJECT}. ==" >&2
for tmpl in ${old_templates[@]}; do for tmpl in "${old_templates[@]}"; do
gcloud compute instance-templates delete \ gcloud compute instance-templates delete \
--quiet \ --quiet \
--project="${PROJECT}" \ --project="${PROJECT}" \
@ -455,11 +468,11 @@ function update-coredns-config() {
# Get the new installed CoreDNS version # Get the new installed CoreDNS version
echo "Waiting for CoreDNS to update" echo "Waiting for CoreDNS to update"
until [[ $(${KUBE_ROOT}/cluster/kubectl.sh -n kube-system get deployment coredns -o=jsonpath='{$.metadata.resourceVersion}') -ne ${COREDNS_DEPLOY_RESOURCE_VERSION} ]]; do until [[ $("${KUBE_ROOT}"/cluster/kubectl.sh -n kube-system get deployment coredns -o=jsonpath='{$.metadata.resourceVersion}') -ne ${COREDNS_DEPLOY_RESOURCE_VERSION} ]]; do
sleep 1 sleep 1
done done
echo "Fetching the latest installed CoreDNS version" echo "Fetching the latest installed CoreDNS version"
NEW_COREDNS_VERSION=$(${KUBE_ROOT}/cluster/kubectl.sh -n kube-system get deployment coredns -o=jsonpath='{$.spec.template.spec.containers[:1].image}' | cut -d ":" -f 2) NEW_COREDNS_VERSION=$("${KUBE_ROOT}"/cluster/kubectl.sh -n kube-system get deployment coredns -o=jsonpath='{$.spec.template.spec.containers[:1].image}' | cut -d ":" -f 2)
case "$(uname -m)" in case "$(uname -m)" in
x86_64*) x86_64*)
@ -502,37 +515,37 @@ function update-coredns-config() {
# Download the CoreDNS migration tool # Download the CoreDNS migration tool
echo "== Downloading the CoreDNS migration tool ==" echo "== Downloading the CoreDNS migration tool =="
wget -P ${download_dir} "https://github.com/coredns/corefile-migration/releases/download/v1.0.6/corefile-tool-${host_arch}" >/dev/null 2>&1 wget -P "${download_dir}" "https://github.com/coredns/corefile-migration/releases/download/v1.0.6/corefile-tool-${host_arch}" >/dev/null 2>&1
local -r checkSHA=$(sha256sum ${download_dir}/corefile-tool-${host_arch} | cut -d " " -f 1) local -r checkSHA=$(sha256sum "${download_dir}/corefile-tool-${host_arch}" | cut -d " " -f 1)
if [[ "${checkSHA}" != "${corefile_tool_SHA}" ]]; then if [[ "${checkSHA}" != "${corefile_tool_SHA}" ]]; then
echo "!!! CheckSum for the CoreDNS migration tool did not match !!!" >&2 echo "!!! CheckSum for the CoreDNS migration tool did not match !!!" >&2
exit 1 exit 1
fi fi
chmod +x ${download_dir}/corefile-tool-${host_arch} chmod +x "${download_dir}/corefile-tool-${host_arch}"
# Migrate the CoreDNS ConfigMap depending on whether it is being downgraded or upgraded. # Migrate the CoreDNS ConfigMap depending on whether it is being downgraded or upgraded.
${KUBE_ROOT}/cluster/kubectl.sh -n kube-system get cm coredns -o jsonpath='{.data.Corefile}' > ${download_dir}/Corefile-old "${KUBE_ROOT}/cluster/kubectl.sh" -n kube-system get cm coredns -o jsonpath='{.data.Corefile}' > "${download_dir}/Corefile-old"
if test "$(printf '%s\n' ${CURRENT_COREDNS_VERSION} ${NEW_COREDNS_VERSION} | sort -V | head -n 1)" != ${NEW_COREDNS_VERSION}; then if test "$(printf '%s\n' "${CURRENT_COREDNS_VERSION}" "${NEW_COREDNS_VERSION}" | sort -V | head -n 1)" != "${NEW_COREDNS_VERSION}"; then
echo "== Upgrading the CoreDNS ConfigMap ==" echo "== Upgrading the CoreDNS ConfigMap =="
${download_dir}/corefile-tool-${host_arch} migrate --from ${CURRENT_COREDNS_VERSION} --to ${NEW_COREDNS_VERSION} --corefile ${download_dir}/Corefile-old > ${download_dir}/Corefile "${download_dir}/corefile-tool-${host_arch}" migrate --from "${CURRENT_COREDNS_VERSION}" --to "${NEW_COREDNS_VERSION}" --corefile "${download_dir}/Corefile-old" > "${download_dir}/Corefile"
${KUBE_ROOT}/cluster/kubectl.sh -n kube-system create configmap coredns --from-file ${download_dir}/Corefile -o yaml --dry-run=client | ${KUBE_ROOT}/cluster/kubectl.sh apply -f - "${KUBE_ROOT}/cluster/kubectl.sh" -n kube-system create configmap coredns --from-file "${download_dir}/Corefile" -o yaml --dry-run=client | "${KUBE_ROOT}/cluster/kubectl.sh" apply -f -
else else
# In case of a downgrade, a custom CoreDNS Corefile will be overwritten by a default Corefile. In that case, # In case of a downgrade, a custom CoreDNS Corefile will be overwritten by a default Corefile. In that case,
# the user will need to manually modify the resulting (default) Corefile after the downgrade is complete. # the user will need to manually modify the resulting (default) Corefile after the downgrade is complete.
echo "== Applying the latest default CoreDNS configuration ==" echo "== Applying the latest default CoreDNS configuration =="
gcloud compute --project ${PROJECT} scp --zone ${ZONE} ${MASTER_NAME}:${coredns_addon_path}/coredns.yaml ${download_dir}/coredns-manifest.yaml > /dev/null gcloud compute --project "${PROJECT}" scp --zone "${ZONE}" "${MASTER_NAME}:${coredns_addon_path}/coredns.yaml" "${download_dir}/coredns-manifest.yaml" > /dev/null
${KUBE_ROOT}/cluster/kubectl.sh apply -f ${download_dir}/coredns-manifest.yaml "${KUBE_ROOT}/cluster/kubectl.sh" apply -f "${download_dir}/coredns-manifest.yaml"
fi fi
echo "== The CoreDNS Config has been updated ==" echo "== The CoreDNS Config has been updated =="
} }
echo "Fetching the previously installed CoreDNS version" echo "Fetching the previously installed CoreDNS version"
CURRENT_COREDNS_VERSION=$(${KUBE_ROOT}/cluster/kubectl.sh -n kube-system get deployment coredns -o=jsonpath='{$.spec.template.spec.containers[:1].image}' | cut -d ":" -f 2) CURRENT_COREDNS_VERSION=$("${KUBE_ROOT}/cluster/kubectl.sh" -n kube-system get deployment coredns -o=jsonpath='{$.spec.template.spec.containers[:1].image}' | cut -d ":" -f 2)
COREDNS_DEPLOY_RESOURCE_VERSION=$(${KUBE_ROOT}/cluster/kubectl.sh -n kube-system get deployment coredns -o=jsonpath='{$.metadata.resourceVersion}') COREDNS_DEPLOY_RESOURCE_VERSION=$("${KUBE_ROOT}/cluster/kubectl.sh" -n kube-system get deployment coredns -o=jsonpath='{$.metadata.resourceVersion}')
master_upgrade=true master_upgrade=true
node_upgrade=true node_upgrade=true
@ -542,7 +555,7 @@ env_os_distro=false
node_upgrade_parallelism=1 node_upgrade_parallelism=1
while getopts ":MNPlcho" opt; do while getopts ":MNPlcho" opt; do
case ${opt} in case "${opt}" in
M) M)
node_upgrade=false node_upgrade=false
;; ;;
@ -603,7 +616,7 @@ if [[ -z "${STORAGE_MEDIA_TYPE:-}" ]] && [[ "${STORAGE_BACKEND:-}" != "etcd2" ]]
echo "export STORAGE_MEDIA_TYPE=application/json" echo "export STORAGE_MEDIA_TYPE=application/json"
echo "" echo ""
if [ -t 0 ] && [ -t 1 ]; then if [ -t 0 ] && [ -t 1 ]; then
read -p "Would you like to continue with the new default, and lose the ability to downgrade to etcd2? [y/N] " confirm read -r -p "Would you like to continue with the new default, and lose the ability to downgrade to etcd2? [y/N] " confirm
if [[ "${confirm}" != "y" ]]; then if [[ "${confirm}" != "y" ]]; then
exit 1 exit 1
fi fi
@ -638,7 +651,7 @@ if [[ "${master_upgrade}" == "true" ]]; then
echo "In all cases, it is strongly recommended to have an etcd backup before upgrading." echo "In all cases, it is strongly recommended to have an etcd backup before upgrading."
echo echo
if [ -t 0 ] && [ -t 1 ]; then if [ -t 0 ] && [ -t 1 ]; then
read -p "Continue with default etcd version, which might upgrade etcd? [y/N] " confirm read -r -p "Continue with default etcd version, which might upgrade etcd? [y/N] " confirm
if [[ "${confirm}" != "y" ]]; then if [[ "${confirm}" != "y" ]]; then
exit 1 exit 1
fi fi
@ -652,7 +665,7 @@ fi
print-node-version-info "Pre-Upgrade" print-node-version-info "Pre-Upgrade"
if [[ "${local_binaries}" == "false" ]]; then if [[ "${local_binaries}" == "false" ]]; then
set_binary_version ${1} set_binary_version "${1}"
fi fi
prepare-upgrade prepare-upgrade

View File

@ -1,6 +1,5 @@
./cluster/gce/gci/configure-helper.sh ./cluster/gce/gci/configure-helper.sh
./cluster/gce/gci/configure.sh ./cluster/gce/gci/configure.sh
./cluster/gce/gci/master-helper.sh ./cluster/gce/gci/master-helper.sh
./cluster/gce/upgrade.sh
./cluster/gce/util.sh ./cluster/gce/util.sh
./cluster/log-dump/log-dump.sh ./cluster/log-dump/log-dump.sh