mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Fix some shellcheck warnings/errors in cluster/gce/util.sh
This patch splits declaration and assignment for local variables where shellcheck triggers risk of shadowing warnings. Some missing exports also added, and an unused variable was removed. Signed-off-by: Joakim Roubert <joakim.roubert@axis.com>
This commit is contained in:
parent
efb41ea0fb
commit
251c4111d5
@ -51,10 +51,10 @@ if [[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]]; then
|
||||
if [[ "${GCI_VERSION}" == "cos"* ]]; then
|
||||
DEFAULT_GCI_PROJECT=cos-cloud
|
||||
fi
|
||||
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-${DEFAULT_GCI_PROJECT}}
|
||||
export MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-${DEFAULT_GCI_PROJECT}}
|
||||
# If the master image is not set, we use the latest GCI image.
|
||||
# Otherwise, we respect whatever is set by the user.
|
||||
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-${GCI_VERSION}}
|
||||
export MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-${GCI_VERSION}}
|
||||
fi
|
||||
|
||||
# Sets node image based on the specified os distro. Currently this function only
|
||||
@ -332,7 +332,6 @@ function upload-tars() {
|
||||
|
||||
local server_binary_tar_urls=()
|
||||
local node_binary_tar_urls=()
|
||||
local kube_manifest_tar_urls=()
|
||||
|
||||
for region in "${PREFERRED_REGION[@]}"; do
|
||||
suffix="-${region}"
|
||||
@ -415,6 +414,7 @@ function detect-node-names() {
|
||||
if [[ -n "${HEAPSTER_MACHINE_TYPE:-}" ]]; then
|
||||
NODE_NAMES+=("${NODE_INSTANCE_PREFIX}-heapster")
|
||||
fi
|
||||
export NODE_NAMES
|
||||
WINDOWS_NODE_NAMES=()
|
||||
if [[ -n "${WINDOWS_INSTANCE_GROUPS[@]:-}" ]]; then
|
||||
for group in "${WINDOWS_INSTANCE_GROUPS[@]}"; do
|
||||
@ -423,6 +423,7 @@ function detect-node-names() {
|
||||
--format='value(instance)'))
|
||||
done
|
||||
fi
|
||||
export WINDOWS_NODE_NAMES
|
||||
|
||||
echo "INSTANCE_GROUPS=${INSTANCE_GROUPS[*]:-}" >&2
|
||||
echo "NODE_NAMES=${NODE_NAMES[*]:-}" >&2
|
||||
@ -440,7 +441,8 @@ function detect-nodes() {
|
||||
detect-node-names
|
||||
KUBE_NODE_IP_ADDRESSES=()
|
||||
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
|
||||
local node_ip=$(gcloud compute instances describe --project "${PROJECT}" --zone "${ZONE}" \
|
||||
local node_ip
|
||||
node_ip=$(gcloud compute instances describe --project "${PROJECT}" --zone "${ZONE}" \
|
||||
"${NODE_NAMES[$i]}" --format='value(networkInterfaces[0].accessConfigs[0].natIP)')
|
||||
if [[ -z "${node_ip-}" ]] ; then
|
||||
echo "Did not find ${NODE_NAMES[$i]}" >&2
|
||||
@ -752,8 +754,9 @@ function construct-common-kubelet-flags {
|
||||
# Sets KUBELET_ARGS with the kubelet flags for Linux nodes.
|
||||
# $1: if 'true', we're rendering flags for a master, else a node
|
||||
function construct-linux-kubelet-flags {
|
||||
local node_type="$1"
|
||||
local flags="$(construct-common-kubelet-flags)"
|
||||
local node_type=$1
|
||||
local flags
|
||||
flags=$(construct-common-kubelet-flags)
|
||||
# Keep in sync with CONTAINERIZED_MOUNTER_HOME in configure-helper.sh
|
||||
flags+=" --experimental-mounter-path=/home/kubernetes/containerized_mounter/mounter"
|
||||
flags+=" --experimental-check-node-capabilities-before-mount=true"
|
||||
@ -812,7 +815,8 @@ function construct-linux-kubelet-flags {
|
||||
flags+=" --non-masquerade-cidr=${NON_MASQUERADE_CIDR}"
|
||||
fi
|
||||
flags+=" --volume-plugin-dir=${VOLUME_PLUGIN_DIR}"
|
||||
local node_labels="$(build-linux-node-labels ${node_type})"
|
||||
local node_labels
|
||||
node_labels=$(build-linux-node-labels "${node_type}")
|
||||
if [[ -n "${node_labels:-}" ]]; then
|
||||
flags+=" --node-labels=${node_labels}"
|
||||
fi
|
||||
@ -840,12 +844,14 @@ function construct-linux-kubelet-flags {
|
||||
# binPath parameter, and single-quotes get parsed as characters instead of
|
||||
# string delimiters.
|
||||
function construct-windows-kubelet-flags {
|
||||
local flags="$(construct-common-kubelet-flags)"
|
||||
local flags
|
||||
flags=$(construct-common-kubelet-flags)
|
||||
|
||||
# Note: NODE_KUBELET_TEST_ARGS is empty in typical kube-up runs.
|
||||
flags+=" ${NODE_KUBELET_TEST_ARGS:-}"
|
||||
|
||||
local node_labels="$(build-windows-node-labels)"
|
||||
local node_labels
|
||||
node_labels=$(build-windows-node-labels)
|
||||
if [[ -n "${node_labels:-}" ]]; then
|
||||
flags+=" --node-labels=${node_labels}"
|
||||
fi
|
||||
@ -2271,7 +2277,8 @@ function check-existing() {
|
||||
}
|
||||
|
||||
function check-network-mode() {
|
||||
local mode="$(gcloud compute networks list --filter="name=('${NETWORK}')" --project ${NETWORK_PROJECT} --format='value(x_gcloud_subnet_mode)' || true)"
|
||||
local mode
|
||||
mode=$(gcloud compute networks list --filter="name=('${NETWORK}')" --project "${NETWORK_PROJECT}" --format='value(x_gcloud_subnet_mode)' || true)
|
||||
# The deprecated field uses lower case. Convert to upper case for consistency.
|
||||
echo "$mode" | tr '[:lower:]' '[:upper:]'
|
||||
}
|
||||
@ -2363,7 +2370,8 @@ function create-subnetworks() {
|
||||
|
||||
# Look for the alias subnet, it must exist and have a secondary
|
||||
# range configured.
|
||||
local subnet=$(gcloud compute networks subnets describe \
|
||||
local subnet
|
||||
subnet=$(gcloud compute networks subnets describe \
|
||||
--project "${NETWORK_PROJECT}" \
|
||||
--region ${REGION} \
|
||||
${IP_ALIAS_SUBNETWORK} 2>/dev/null)
|
||||
@ -2716,7 +2724,8 @@ function add-replica-to-etcd() {
|
||||
#
|
||||
# NOTE: Must be in sync with get-replica-name-regexp
|
||||
function set-existing-master() {
|
||||
local existing_master=$(gcloud compute instances list \
|
||||
local existing_master
|
||||
existing_master=$(gcloud compute instances list \
|
||||
--project "${PROJECT}" \
|
||||
--filter "name ~ '$(get-replica-name-regexp)'" \
|
||||
--format "value(name,zone)" | head -n1)
|
||||
@ -2748,7 +2757,8 @@ function replicate-master() {
|
||||
--type "${MASTER_DISK_TYPE}" \
|
||||
--size "${MASTER_DISK_SIZE}"
|
||||
|
||||
local existing_master_replicas="$(get-all-replica-names)"
|
||||
local existing_master_replicas
|
||||
existing_master_replicas=$(get-all-replica-names)
|
||||
replicate-master-instance "${EXISTING_MASTER_ZONE}" "${EXISTING_MASTER_NAME}" "${existing_master_replicas}"
|
||||
|
||||
# Add new replica to the load balancer.
|
||||
@ -2772,7 +2782,8 @@ function attach-external-ip() {
|
||||
local NAME=${1}
|
||||
local ZONE=${2}
|
||||
local IP_ADDR=${3:-}
|
||||
local ACCESS_CONFIG_NAME=$(gcloud compute instances describe "${NAME}" \
|
||||
local ACCESS_CONFIG_NAME
|
||||
ACCESS_CONFIG_NAME=$(gcloud compute instances describe "${NAME}" \
|
||||
--project "${PROJECT}" --zone "${ZONE}" \
|
||||
--format="value(networkInterfaces[0].accessConfigs[0].name)")
|
||||
gcloud compute instances delete-access-config "${NAME}" \
|
||||
@ -2806,9 +2817,11 @@ function create-loadbalancer() {
|
||||
return
|
||||
fi
|
||||
|
||||
local EXISTING_MASTER_NAME="$(get-all-replica-names)"
|
||||
local EXISTING_MASTER_ZONE=$(gcloud compute instances list "${EXISTING_MASTER_NAME}" \
|
||||
--project "${PROJECT}" --format="value(zone)")
|
||||
local EXISTING_MASTER_NAME
|
||||
local EXISTING_MASTER_ZONE
|
||||
EXISTING_MASTER_NAME=$(get-all-replica-names)
|
||||
EXISTING_MASTER_ZONE=$(gcloud compute instances list "${EXISTING_MASTER_NAME}" \
|
||||
--project "${PROJECT}" --format='value(zone)')
|
||||
|
||||
echo "Creating load balancer in front of an already existing master in ${EXISTING_MASTER_ZONE}"
|
||||
|
||||
@ -2850,7 +2863,8 @@ function attach-internal-master-ip() {
|
||||
local zone="${2}"
|
||||
local ip="${3}"
|
||||
|
||||
local aliases=$(gcloud compute instances describe "${name}" --project "${PROJECT}" --zone "${zone}" --flatten='networkInterfaces[0].aliasIpRanges[]' --format='value[separator=':'](networkInterfaces[0].aliasIpRanges.subnetworkRangeName,networkInterfaces[0].aliasIpRanges.ipCidrRange)' | sed 's/^://' | paste -s -d';' -)
|
||||
local aliases
|
||||
aliases=$(gcloud compute instances describe "${name}" --project "${PROJECT}" --zone "${zone}" --flatten='networkInterfaces[0].aliasIpRanges[]' --format='value[separator=':'](networkInterfaces[0].aliasIpRanges.subnetworkRangeName,networkInterfaces[0].aliasIpRanges.ipCidrRange)' | sed 's/^://' | paste -s -d';' -)
|
||||
aliases="${aliases:+${aliases};}${ip}/32"
|
||||
echo "Setting ${name}'s aliases to '${aliases}' (added ${ip})"
|
||||
# Attach ${ip} to ${name}
|
||||
@ -2870,7 +2884,8 @@ function detach-internal-master-ip() {
|
||||
local zone="${2}"
|
||||
local ip="${3}"
|
||||
|
||||
local aliases=$(gcloud compute instances describe "${name}" --project "${PROJECT}" --zone "${zone}" --flatten='networkInterfaces[0].aliasIpRanges[]' --format='value[separator=':'](networkInterfaces[0].aliasIpRanges.subnetworkRangeName,networkInterfaces[0].aliasIpRanges.ipCidrRange)' | sed 's/^://' | grep -v "${ip}" | paste -s -d';' -)
|
||||
local aliases
|
||||
aliases=$(gcloud compute instances describe "${name}" --project "${PROJECT}" --zone "${zone}" --flatten='networkInterfaces[0].aliasIpRanges[]' --format='value[separator=':'](networkInterfaces[0].aliasIpRanges.subnetworkRangeName,networkInterfaces[0].aliasIpRanges.ipCidrRange)' | sed 's/^://' | grep -v "${ip}" | paste -s -d';' -)
|
||||
echo "Setting ${name}'s aliases to '${aliases}' (removed ${ip})"
|
||||
# Detach ${MASTER_NAME}-internal-ip from ${name}
|
||||
gcloud compute instances network-interfaces update "${name}" --project "${PROJECT}" --zone "${zone}" --aliases="${aliases}"
|
||||
@ -2893,9 +2908,11 @@ function create-internal-loadbalancer() {
|
||||
return
|
||||
fi
|
||||
|
||||
local EXISTING_MASTER_NAME="$(get-all-replica-names)"
|
||||
local EXISTING_MASTER_ZONE=$(gcloud compute instances list "${EXISTING_MASTER_NAME}" \
|
||||
--project "${PROJECT}" --format="value(zone)")
|
||||
local EXISTING_MASTER_NAME
|
||||
local EXISTING_MASTER_ZONE
|
||||
EXISTING_MASTER_NAME=$(get-all-replica-names)
|
||||
EXISTING_MASTER_ZONE=$(gcloud compute instances list "${EXISTING_MASTER_NAME}" \
|
||||
--project "${PROJECT}" --format='value(zone)')
|
||||
|
||||
echo "Detaching ${KUBE_MASTER_INTERNAL_IP} from ${EXISTING_MASTER_NAME}/${EXISTING_MASTER_ZONE}"
|
||||
detach-internal-master-ip "${EXISTING_MASTER_NAME}" "${EXISTING_MASTER_ZONE}" "${KUBE_MASTER_INTERNAL_IP}"
|
||||
@ -3005,7 +3022,8 @@ function get-scope-flags() {
|
||||
function create-nodes-template() {
|
||||
echo "Creating nodes."
|
||||
|
||||
local scope_flags=$(get-scope-flags)
|
||||
local scope_flags
|
||||
scope_flags=$(get-scope-flags)
|
||||
|
||||
write-linux-node-env
|
||||
write-windows-node-env
|
||||
@ -3290,15 +3308,18 @@ function check-cluster() {
|
||||
fi
|
||||
fi
|
||||
|
||||
local start_time=$(date +%s)
|
||||
local curl_out=$(mktemp)
|
||||
local start_time
|
||||
local curl_out
|
||||
start_time=$(date +%s)
|
||||
curl_out=$(mktemp)
|
||||
kube::util::trap_add "rm -f ${curl_out}" EXIT
|
||||
until curl -vsS --cacert "${CERT_DIR}/pki/ca.crt" \
|
||||
-H "Authorization: Bearer ${KUBE_BEARER_TOKEN}" \
|
||||
${secure} \
|
||||
--max-time 5 --fail \
|
||||
"https://${KUBE_MASTER_IP}/api/v1/pods?limit=100" > "${curl_out}" 2>&1; do
|
||||
local elapsed=$(($(date +%s) - ${start_time}))
|
||||
local elapsed
|
||||
elapsed=$(($(date +%s) - start_time))
|
||||
if [[ ${elapsed} -gt ${KUBE_CLUSTER_INITIALIZATION_TIMEOUT} ]]; then
|
||||
echo -e "${color_red}Cluster failed to initialize within ${KUBE_CLUSTER_INITIALIZATION_TIMEOUT} seconds.${color_norm}" >&2
|
||||
echo "Last output from querying API server follows:" >&2
|
||||
@ -3393,7 +3414,8 @@ function kube-down() {
|
||||
# Get the name of the managed instance group template before we delete the
|
||||
# managed instance group. (The name of the managed instance group template may
|
||||
# change during a cluster upgrade.)
|
||||
local templates=$(get-template "${PROJECT}")
|
||||
local templates
|
||||
templates=$(get-template "${PROJECT}")
|
||||
|
||||
# Deliberately allow globbing, do not change unless a bug is found
|
||||
# shellcheck disable=SC2206
|
||||
@ -3492,7 +3514,8 @@ function kube-down() {
|
||||
fi
|
||||
|
||||
# Check if this are any remaining master replicas.
|
||||
local REMAINING_MASTER_COUNT=$(gcloud compute instances list \
|
||||
local REMAINING_MASTER_COUNT
|
||||
REMAINING_MASTER_COUNT=$(gcloud compute instances list \
|
||||
--project "${PROJECT}" \
|
||||
--filter="name ~ '$(get-replica-name-regexp)'" \
|
||||
--format "value(zone)" | wc -l)
|
||||
@ -3500,9 +3523,11 @@ function kube-down() {
|
||||
# In the replicated scenario, if there's only a single master left, we should also delete load balancer in front of it.
|
||||
if [[ "${REMAINING_MASTER_COUNT}" -eq 1 ]]; then
|
||||
detect-master
|
||||
local REMAINING_REPLICA_NAME="$(get-all-replica-names)"
|
||||
local REMAINING_REPLICA_ZONE=$(gcloud compute instances list "${REMAINING_REPLICA_NAME}" \
|
||||
--project "${PROJECT}" --format="value(zone)")
|
||||
local REMAINING_REPLICA_NAME
|
||||
local REMAINING_REPLICA_ZONE
|
||||
REMAINING_REPLICA_NAME=$(get-all-replica-names)
|
||||
REMAINING_REPLICA_ZONE=$(gcloud compute instances list "${REMAINING_REPLICA_NAME}" \
|
||||
--project "${PROJECT}" --format='value(zone)')
|
||||
if gcloud compute forwarding-rules describe "${MASTER_NAME}" --region "${REGION}" --project "${PROJECT}" &>/dev/null; then
|
||||
gcloud compute forwarding-rules delete \
|
||||
--project "${PROJECT}" \
|
||||
@ -3675,7 +3700,8 @@ function get-all-replica-names() {
|
||||
# MASTER_NAME
|
||||
function get-master-replicas-count() {
|
||||
detect-project
|
||||
local num_masters=$(gcloud compute instances list \
|
||||
local num_masters
|
||||
num_masters=$(gcloud compute instances list \
|
||||
--project "${PROJECT}" \
|
||||
--filter="name ~ '$(get-replica-name-regexp)'" \
|
||||
--format "value(zone)" | wc -l)
|
||||
@ -3699,7 +3725,8 @@ function get-replica-name-regexp() {
|
||||
# Sets:
|
||||
# REPLICA_NAME
|
||||
function set-replica-name() {
|
||||
local instances=$(gcloud compute instances list \
|
||||
local instances
|
||||
instances=$(gcloud compute instances list \
|
||||
--project "${PROJECT}" \
|
||||
--filter="name ~ '$(get-replica-name-regexp)'" \
|
||||
--format "value(name)")
|
||||
@ -3841,7 +3868,8 @@ function test-setup() {
|
||||
|
||||
# Open up port 80 & 8080 so common containers on minions can be reached
|
||||
# TODO(roberthbailey): Remove this once we are no longer relying on hostPorts.
|
||||
local start=`date +%s`
|
||||
local start
|
||||
start=$(date +%s)
|
||||
gcloud compute firewall-rules create \
|
||||
--project "${NETWORK_PROJECT}" \
|
||||
--target-tags "${NODE_TAG}" \
|
||||
@ -3886,7 +3914,8 @@ function test-teardown() {
|
||||
"${NODE_TAG}-http-alt" \
|
||||
"${NODE_TAG}-nodeports"
|
||||
if [[ ${MULTIZONE:-} == "true" && -n ${E2E_ZONES:-} ]]; then
|
||||
local zones=( ${E2E_ZONES} )
|
||||
local zones
|
||||
while IFS= read -r line; do zones+=("$line"); done <<< "${E2E_ZONES}"
|
||||
# tear them down in reverse order, finally tearing down the master too.
|
||||
for ((zone_num=${#zones[@]}-1; zone_num>0; zone_num--)); do
|
||||
KUBE_GCE_ZONE="${zones[zone_num]}" KUBE_USE_EXISTING_MASTER="true" "${KUBE_ROOT}/cluster/kube-down.sh"
|
||||
|
Loading…
Reference in New Issue
Block a user