Fix shellcheck w/e in cluster/gce/gci/configure-helper.sh

Change-Id: Ic8fca2509a7cb07f4170eaf25a878036d18ba51c
Signed-off-by: Joakim Roubert <joakimr@axis.com>
This commit is contained in:
Joakim Roubert 2020-04-24 07:58:39 +02:00
parent 7942dca975
commit 80a8566a8c
2 changed files with 111 additions and 90 deletions

View File

@ -41,7 +41,7 @@ function convert-manifest-params {
for flag in "${FLAGS[@]}"; do
params+="\n\"$flag\","
done
if [ ! -z $params ]; then
if [ -n "${params[*]}" ]; then
echo "${params::-1}" # drop trailing comma
fi
}
@ -200,7 +200,7 @@ function get-local-disk-num() {
local format="${2}"
localdisknum=0
if [[ ! -z "${NODE_LOCAL_SSDS_EXT:-}" ]]; then
if [[ -n "${NODE_LOCAL_SSDS_EXT:-}" ]]; then
IFS=";" read -r -a ssdgroups <<< "${NODE_LOCAL_SSDS_EXT:-}"
for ssdgroup in "${ssdgroups[@]}"; do
IFS="," read -r -a ssdopts <<< "${ssdgroup}"
@ -246,13 +246,16 @@ function get-or-generate-uuid(){
fi
# each line of the ssdmap looks like "${device} persistent-uuid"
if [[ ! -z $(grep ${device} ${ssdmap}) ]]; then
if grep -q "${device}" "${ssdmap}"; then
#create symlink based on saved uuid
local myuuid=$(grep ${device} ${ssdmap} | cut -d ' ' -f 2)
local myuuid
myuuid=$(grep "${device}" "${ssdmap}" | cut -d ' ' -f 2)
else
# generate new uuid and add it to the map
local myuuid=$(uuidgen)
if [[ ! ${?} -eq 0 ]]; then
local myuuid
myuuid=$(uuidgen)
# shellcheck disable=SC2181
if [[ $? -ne 0 ]]; then
echo "Failed to generate valid UUID with uuidgen" >&2
exit 2
fi
@ -270,8 +273,10 @@ function get-or-generate-uuid(){
#Formats the given device ($1) if needed and mounts it at given mount point
# ($2).
function safe-format-and-mount() {
local device="${1}"
local mountpoint="${2}"
local device
local mountpoint
device=$1
mountpoint=$2
# Format only if the disk is not already formatted.
if ! tune2fs -l "${device}" ; then
@ -288,8 +293,10 @@ function safe-format-and-mount() {
# Gets a devices UUID and bind mounts the device to mount location in
# /mnt/disks/by-id/
function unique-uuid-bind-mount(){
local mountpoint="${1}"
local actual_device="${2}"
local mountpoint
local actual_device
mountpoint=$1
actual_device=$2
# Trigger udev refresh so that newly formatted devices are propagated in by-uuid
udevadm control --reload-rules
@ -297,7 +304,8 @@ function unique-uuid-bind-mount(){
udevadm settle
# grep the exact match of actual device, prevents substring matching
local myuuid=$(ls -l /dev/disk/by-uuid/ | grep "/${actual_device}$" | tr -s ' ' | cut -d ' ' -f 9)
local myuuid
myuuid=$(find -L /dev/disk/by-uuid -maxdepth 1 -samefile /dev/"${actual_device}" -printf "%P\n")
# myuuid should be the uuid of the device as found in /dev/disk/by-uuid/
if [[ -z "${myuuid}" ]]; then
echo "Failed to get a uuid for device ${actual_device} when mounting." >&2
@ -340,7 +348,8 @@ function mount-ext(){
# TODO: Handle partitioned disks. Right now this code just ignores partitions
if [[ "${format}" == "fs" ]]; then
if [[ "${interface}" == "scsi" ]]; then
local actual_device=$(readlink -f "${ssd}" | cut -d '/' -f 3)
local actual_device
actual_device=$(readlink -f "${ssd}" | cut -d '/' -f 3)
# Error checking
if [[ "${actual_device}" != sd* ]]; then
echo "'actual_device' is not of the correct format. It must be the kernel name of the device, got ${actual_device} instead" >&2
@ -350,7 +359,8 @@ function mount-ext(){
else
# This path is required because the existing Google images do not
# expose NVMe devices in /dev/disk/by-id so we are using the /dev/nvme instead
local actual_device=$(echo ${ssd} | cut -d '/' -f 3)
local actual_device
actual_device=$(echo "${ssd}" | cut -d '/' -f 3)
# Error checking
if [[ "${actual_device}" != nvme* ]]; then
echo "'actual_device' is not of the correct format. It must be the kernel name of the device, got ${actual_device} instead" >&2
@ -362,7 +372,7 @@ function mount-ext(){
safe-format-and-mount "${ssd}" "${mountpoint}"
# We only do the bindmount if users are using the new local ssd request method
# see https://github.com/kubernetes/kubernetes/pull/53466#discussion_r146431894
if [[ ! -z "${NODE_LOCAL_SSDS_EXT:-}" ]]; then
if [[ -n "${NODE_LOCAL_SSDS_EXT:-}" ]]; then
unique-uuid-bind-mount "${mountpoint}" "${actual_device}"
fi
elif [[ "${format}" == "block" ]]; then
@ -381,7 +391,8 @@ function ensure-local-ssds() {
local i=0
for ssd in /dev/disk/by-id/google-local-ssd-*; do
if [ -e "${ssd}" ]; then
local devicenum=`echo ${ssd} | sed -e 's/\/dev\/disk\/by-id\/google-local-ssd-\([0-9]*\)/\1/'`
local devicenum
devicenum=$(echo "${ssd}" | sed -e 's/\/dev\/disk\/by-id\/google-local-ssd-\([0-9]*\)/\1/')
if [[ "${i}" -lt "${scsiblocknum}" ]]; then
mount-ext "${ssd}" "${devicenum}" "scsi" "block"
else
@ -410,8 +421,9 @@ function ensure-local-ssds() {
if [ -e "${ssd}" ]; then
# This workaround to find if the NVMe device is a disk is required because
# the existing Google images does not expose NVMe devices in /dev/disk/by-id
if [[ `udevadm info --query=property --name=${ssd} | grep DEVTYPE | sed "s/DEVTYPE=//"` == "disk" ]]; then
local devicenum=`echo ${ssd} | sed -e 's/\/dev\/nvme0n\([0-9]*\)/\1/'`
if [[ $(udevadm info --query=property --name="${ssd}" | grep DEVTYPE | sed "s/DEVTYPE=//") == "disk" ]]; then
local devicenum
devicenum=$(echo "${ssd}" | sed -e 's/\/dev\/nvme0n\([0-9]*\)/\1/')
if [[ "${i}" -lt "${nvmeblocknum}" ]]; then
mount-ext "${ssd}" "${devicenum}" "nvme" "block"
else
@ -506,7 +518,7 @@ function mount-master-pd {
# locations.
# Contains all the data stored in etcd.
mkdir -m 700 -p "${mount_point}/var/etcd"
install -d -m 700 "${mount_point}/var/etcd"
ln -s -f "${mount_point}/var/etcd" /var/etcd
mkdir -p /etc/srv
# Contains the dynamically generated apiserver auth certs and keys.
@ -528,8 +540,8 @@ function append_or_replace_prefixed_line {
local -r file="${1:-}"
local -r prefix="${2:-}"
local -r suffix="${3:-}"
local -r dirname="$(dirname ${file})"
local -r tmpfile="$(mktemp -t filtered.XXXX --tmpdir=${dirname})"
local -r dirname=$(dirname "${file}")
local -r tmpfile=$(mktemp -t filtered.XXXX --tmpdir="${dirname}")
touch "${file}"
awk -v pfx="${prefix}" 'substr($0,1,length(pfx)) != pfx { print }' "${file}" > "${tmpfile}"
@ -556,7 +568,7 @@ function create-node-pki {
CA_CERT_BUNDLE_PATH="${pki_dir}/ca-certificates.crt"
write-pki-data "${CA_CERT_BUNDLE}" "${CA_CERT_BUNDLE_PATH}"
if [[ ! -z "${KUBELET_CERT:-}" && ! -z "${KUBELET_KEY:-}" ]]; then
if [[ -n "${KUBELET_CERT:-}" && -n "${KUBELET_KEY:-}" ]]; then
KUBELET_CERT_PATH="${pki_dir}/kubelet.crt"
write-pki-data "${KUBELET_CERT}" "${KUBELET_CERT_PATH}"
@ -575,7 +587,7 @@ function create-master-pki {
write-pki-data "${CA_CERT}" "${CA_CERT_PATH}"
# this is not true on GKE
if [[ ! -z "${CA_KEY:-}" ]]; then
if [[ -n "${CA_KEY:-}" ]]; then
CA_KEY_PATH="${pki_dir}/ca.key"
write-pki-data "${CA_KEY}" "${CA_KEY_PATH}"
fi
@ -620,7 +632,7 @@ function create-master-pki {
SERVICEACCOUNT_KEY_PATH="${pki_dir}/serviceaccount.key"
write-pki-data "${SERVICEACCOUNT_KEY}" "${SERVICEACCOUNT_KEY_PATH}"
if [[ ! -z "${REQUESTHEADER_CA_CERT:-}" ]]; then
if [[ -n "${REQUESTHEADER_CA_CERT:-}" ]]; then
REQUESTHEADER_CA_CERT_PATH="${pki_dir}/aggr_ca.crt"
write-pki-data "${REQUESTHEADER_CA_CERT}" "${REQUESTHEADER_CA_CERT_PATH}"
@ -685,14 +697,15 @@ function create-master-auth {
fi
if [[ -n "${KONNECTIVITY_SERVER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KONNECTIVITY_SERVER_TOKEN}," "system:konnectivity-server,uid:system:konnectivity-server"
create-kubeconfig "konnectivity-server" ${KONNECTIVITY_SERVER_TOKEN}
create-kubeconfig "konnectivity-server" "${KONNECTIVITY_SERVER_TOKEN}"
fi
if [[ -n "${EXTRA_STATIC_AUTH_COMPONENTS:-}" ]]; then
# Create a static Bearer token and kubeconfig for extra, comma-separated components.
IFS="," read -r -a extra_components <<< "${EXTRA_STATIC_AUTH_COMPONENTS:-}"
for extra_component in "${extra_components[@]}"; do
local token="$(secure_random 32)"
local token
token="$(secure_random 32)"
append_or_replace_prefixed_line "${known_tokens_csv}" "${token}," "system:${extra_component},uid:system:${extra_component}"
create-kubeconfig "${extra_component}" "${token}"
done
@ -747,14 +760,15 @@ EOF
use_cloud_config="true"
if [[ -n "${NODE_TAGS:-}" ]]; then
# split NODE_TAGS into an array by comma.
IFS=',' read -r -a node_tags <<< ${NODE_TAGS}
IFS=',' read -r -a node_tags <<< "${NODE_TAGS}"
else
local -r node_tags="${NODE_INSTANCE_PREFIX}"
local -r node_tags
node_tags=${NODE_INSTANCE_PREFIX}
fi
cat <<EOF >>/etc/gce.conf
node-instance-prefix = ${NODE_INSTANCE_PREFIX}
EOF
for tag in ${node_tags[@]}; do
for tag in "${node_tags[@]}"; do
cat <<EOF >>/etc/gce.conf
node-tags = ${tag}
EOF
@ -778,8 +792,8 @@ EOF
if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then
use_cloud_config="true"
# split GCE_ALPHA_FEATURES into an array by comma.
IFS=',' read -r -a alpha_features <<< ${GCE_ALPHA_FEATURES}
for feature in ${alpha_features[@]}; do
IFS=',' read -r -a alpha_features <<< "${GCE_ALPHA_FEATURES}"
for feature in "${alpha_features[@]}"; do
cat <<EOF >>/etc/gce.conf
alpha-features = ${feature}
EOF
@ -1168,8 +1182,8 @@ function create-kubeconfig {
local component=$1
local token=$2
echo "Creating kubeconfig file for component ${component}"
mkdir -p /etc/srv/kubernetes/${component}
cat <<EOF >/etc/srv/kubernetes/${component}/kubeconfig
mkdir -p "/etc/srv/kubernetes/${component}"
cat <<EOF >"/etc/srv/kubernetes/${component}/kubeconfig"
apiVersion: v1
kind: Config
users:
@ -1237,7 +1251,7 @@ function create-master-kubelet-auth {
# set in the environment.
if [[ -n "${KUBELET_APISERVER:-}" && -n "${KUBELET_CERT:-}" && -n "${KUBELET_KEY:-}" ]]; then
REGISTER_MASTER_KUBELET="true"
create-kubelet-kubeconfig ${KUBELET_APISERVER}
create-kubelet-kubeconfig "${KUBELET_APISERVER}"
fi
}
@ -1359,12 +1373,10 @@ function assemble-docker-flags {
else
docker_opts+=" --log-level=warn"
fi
local use_net_plugin="true"
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" || "${NETWORK_PROVIDER:-}" == "cni" ]]; then
# set docker0 cidr to private ip address range to avoid conflict with cbr0 cidr range
docker_opts+=" --bip=169.254.123.1/24"
else
use_net_plugin="false"
docker_opts+=" --bridge=cbr0"
fi
@ -1509,9 +1521,9 @@ EOF
# $2: the log owner uid to set for the log file.
# $3: the log owner gid to set for the log file.
function prepare-log-file {
touch $1
chmod 644 $1
chown "${2:-${LOG_OWNER_USER:-root}}":"${3:-${LOG_OWNER_GROUP:-root}}" $1
touch "$1"
chmod 644 "$1"
chown "${2:-${LOG_OWNER_USER:-root}}":"${3:-${LOG_OWNER_GROUP:-root}}" "$1"
}
# Prepares parameters for kube-proxy manifest.
@ -1535,13 +1547,11 @@ function prepare-kube-proxy-manifest-variables {
# https://github.com/kubernetes/kubernetes/pull/70398
local -r kernel_version=$(uname -r | cut -d\. -f1,2)
local conntrack_module="nf_conntrack"
if [[ $(printf "${kernel_version}\n4.18\n" | sort -V | tail -1) == "4.18" ]]; then
if [[ $(printf "%s\n4.18\n" "${kernel_version}" | sort -V | tail -1) == "4.18" ]]; then
conntrack_module="nf_conntrack_ipv4"
fi
sudo modprobe -a ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh ${conntrack_module}
if [[ $? -eq 0 ]];
then
if sudo modprobe -a ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh ${conntrack_module}; then
params+=" --proxy-mode=ipvs"
else
# If IPVS modules are not present, make sure the node does not come up as
@ -1561,18 +1571,18 @@ function prepare-kube-proxy-manifest-variables {
kube_cache_mutation_detector_env_name="- name: KUBE_CACHE_MUTATION_DETECTOR"
kube_cache_mutation_detector_env_value="value: \"${ENABLE_CACHE_MUTATION_DETECTOR}\""
fi
sed -i -e "s@{{kubeconfig}}@${kubeconfig}@g" ${src_file}
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${kube_docker_registry}@g" ${src_file}
sed -i -e "s@{{pillar\['kube-proxy_docker_tag'\]}}@${kube_proxy_docker_tag}@g" ${src_file}
sed -i -e "s@{{params}}@${params}@g" ${src_file}
sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
sed -i -e "s@{{kube_cache_mutation_detector_env_name}}@${kube_cache_mutation_detector_env_name}@g" ${src_file}
sed -i -e "s@{{kube_cache_mutation_detector_env_value}}@${kube_cache_mutation_detector_env_value}@g" ${src_file}
sed -i -e "s@{{ cpurequest }}@100m@g" ${src_file}
sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" ${src_file}
sed -i -e "s@{{kubernetes_service_host_env_value}}@${KUBERNETES_MASTER_NAME}@g" ${src_file}
sed -i -e "s@{{kubeconfig}}@${kubeconfig}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${kube_docker_registry}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-proxy_docker_tag'\]}}@${kube_proxy_docker_tag}@g" "${src_file}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{container_env}}@${container_env}@g" "${src_file}"
sed -i -e "s@{{kube_cache_mutation_detector_env_name}}@${kube_cache_mutation_detector_env_name}@g" "${src_file}"
sed -i -e "s@{{kube_cache_mutation_detector_env_value}}@${kube_cache_mutation_detector_env_value}@g" "${src_file}"
sed -i -e "s@{{ cpurequest }}@100m@g" "${src_file}"
sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" "${src_file}"
sed -i -e "s@{{kubernetes_service_host_env_value}}@${KUBERNETES_MASTER_NAME}@g" "${src_file}"
if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
sed -i -e "s@{{cluster_cidr}}@--cluster-cidr=${CLUSTER_IP_RANGE}@g" ${src_file}
sed -i -e "s@{{cluster_cidr}}@--cluster-cidr=${CLUSTER_IP_RANGE}@g" "${src_file}"
fi
}
@ -1595,7 +1605,8 @@ function start-kube-proxy {
# $5: pod name, which should be either etcd or etcd-events
function prepare-etcd-manifest {
local host_name=${ETCD_HOSTNAME:-$(hostname -s)}
local host_ip=$(${PYTHON} -c "import socket;print(socket.gethostbyname(\"${host_name}\"))")
local host_ip
host_ip=$(${PYTHON} -c "import socket;print(socket.gethostbyname(\"${host_name}\"))")
local etcd_cluster=""
local cluster_state="new"
local etcd_protocol="http"
@ -1642,8 +1653,8 @@ function prepare-etcd-manifest {
sed -i -e "s@{{ *liveness_probe_initial_delay *}}@${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${temp_file}"
sed -i -e "s@{{ *listen_client_ip *}}@${ETCD_LISTEN_CLIENT_IP:-127.0.0.1}@g" "${temp_file}"
# Get default storage backend from manifest file.
local -r default_storage_backend=$(cat "${temp_file}" | \
grep -o "{{ *pillar\.get('storage_backend', '\(.*\)') *}}" | \
local -r default_storage_backend=$( \
grep -o "{{ *pillar\.get('storage_backend', '\(.*\)') *}}" "${temp_file}" | \
sed -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g")
if [[ -n "${STORAGE_BACKEND:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@${STORAGE_BACKEND}@g" "${temp_file}"
@ -1789,10 +1800,14 @@ function compute-master-manifest-variables {
FLEXVOLUME_HOSTPATH_VOLUME="{ \"name\": \"flexvolumedir\", \"hostPath\": {\"path\": \"${VOLUME_PLUGIN_DIR}\"}},"
fi
INSECURE_PORT_MAPPING=""
INSECURE_PORT_MAPPING=''
if [[ "${ENABLE_APISERVER_INSECURE_PORT:-false}" == "true" ]]; then
INSECURE_PORT_MAPPING="{ \"name\": \"local\", \"containerPort\": 8080, \"hostPort\": 8080},"
# INSECURE_PORT_MAPPING is used by sed
# shellcheck disable=SC2089
INSECURE_PORT_MAPPING='{ "name": "local", "containerPort": 8080, "hostPort": 8080},'
fi
# shellcheck disable=SC2090
export INSECURE_PORT_MAPPING
}
# A helper function that bind mounts kubelet dirs for running mount in a chroot
@ -1859,10 +1874,11 @@ function run-kube-controller-manager-as-non-root {
# DOCKER_REGISTRY
function start-kube-controller-manager {
echo "Start kubernetes controller-manager"
create-kubeconfig "kube-controller-manager" ${KUBE_CONTROLLER_MANAGER_TOKEN}
create-kubeconfig "kube-controller-manager" "${KUBE_CONTROLLER_MANAGER_TOKEN}"
prepare-log-file /var/log/kube-controller-manager.log
# Calculate variables and assemble the command line.
local params="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-"--v=2"} ${CONTROLLER_MANAGER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}"
local params
params="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-"--v=2"} ${CONTROLLER_MANAGER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}"
params+=" --use-service-account-credentials"
params+=" --cloud-provider=gce"
params+=" --kubeconfig=/etc/srv/kubernetes/kube-controller-manager/kubeconfig"
@ -1934,7 +1950,7 @@ function start-kube-controller-manager {
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-controller-manager_docker_tag'\]}}@${kube_rc_docker_tag}@g" "${src_file}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
sed -i -e "s@{{container_env}}@${container_env}@g" "${src_file}"
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}"
@ -1965,10 +1981,11 @@ function start-kube-controller-manager {
# DOCKER_REGISTRY
function start-kube-scheduler {
echo "Start kubernetes scheduler"
create-kubeconfig "kube-scheduler" ${KUBE_SCHEDULER_TOKEN}
prepare-log-file /var/log/kube-scheduler.log ${KUBE_SCHEDULER_RUNASUSER:-2001} ${KUBE_SCHEDULER_RUNASGROUP:-2001}
create-kubeconfig "kube-scheduler" "${KUBE_SCHEDULER_TOKEN}"
prepare-log-file /var/log/kube-scheduler.log "${KUBE_SCHEDULER_RUNASUSER:-2001}" "${KUBE_SCHEDULER_RUNASGROUP:-2001}"
# Calculate variables and set them in the manifest.
local params
params="${SCHEDULER_TEST_LOG_LEVEL:-"--v=2"} ${SCHEDULER_TEST_ARGS:-}"
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
@ -2014,13 +2031,14 @@ function start-cluster-autoscaler {
if [[ "${ENABLE_CLUSTER_AUTOSCALER:-}" == "true" ]]; then
echo "Start kubernetes cluster autoscaler"
setup-addon-manifests "addons" "rbac/cluster-autoscaler"
create-kubeconfig "cluster-autoscaler" ${KUBE_CLUSTER_AUTOSCALER_TOKEN}
create-kubeconfig "cluster-autoscaler" "${KUBE_CLUSTER_AUTOSCALER_TOKEN}"
prepare-log-file /var/log/cluster-autoscaler.log
# Remove salt comments and replace variables with values
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cluster-autoscaler.manifest"
local params="${AUTOSCALER_MIG_CONFIG} ${CLOUD_CONFIG_OPT} ${AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
local params
params="${AUTOSCALER_MIG_CONFIG}" "${CLOUD_CONFIG_OPT}" "${AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
params+=" --kubeconfig=/etc/srv/kubernetes/cluster-autoscaler/kubeconfig"
# split the params into separate arguments passed to binary
@ -2098,7 +2116,7 @@ function get-metadata-value {
curl \
--retry 5 \
--retry-delay 3 \
${CURL_RETRY_CONNREFUSED} \
"${CURL_RETRY_CONNREFUSED}" \
--fail \
--silent \
-H 'Metadata-Flavor: Google' \
@ -2116,7 +2134,8 @@ function copy-manifests {
if [[ ! -d "${dst_dir}" ]]; then
mkdir -p "${dst_dir}"
fi
local files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml")
local files
files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml")
if [[ -n "${files}" ]]; then
cp "${src_dir}/"*.yaml "${dst_dir}"
fi
@ -2265,7 +2284,7 @@ function update-daemon-set-prometheus-to-sd-parameters {
# Removes all lines between two patterns (throws away prometheus-to-sd)
sed -i -e "/# BEGIN_PROMETHEUS_TO_SD/,/# END_PROMETHEUS_TO_SD/d" "$1"
else
update-prometheus-to-sd-parameters $1
update-prometheus-to-sd-parameters "$1"
fi
}
@ -2322,10 +2341,10 @@ function setup-fluentd {
sed -i -e "s@{{ fluentd_gcp_yaml_version }}@${fluentd_gcp_yaml_version}@g" "${fluentd_gcp_scaler_yaml}"
fluentd_gcp_version="${FLUENTD_GCP_VERSION:-1.6.17}"
sed -i -e "s@{{ fluentd_gcp_version }}@${fluentd_gcp_version}@g" "${fluentd_gcp_yaml}"
update-daemon-set-prometheus-to-sd-parameters ${fluentd_gcp_yaml}
start-fluentd-resource-update ${fluentd_gcp_yaml}
update-container-runtime ${fluentd_gcp_configmap_yaml}
update-node-journal ${fluentd_gcp_configmap_yaml}
update-daemon-set-prometheus-to-sd-parameters "${fluentd_gcp_yaml}"
start-fluentd-resource-update "${fluentd_gcp_yaml}"
update-container-runtime "${fluentd_gcp_configmap_yaml}"
update-node-journal "${fluentd_gcp_configmap_yaml}"
}
# Sets up the manifests of kube-dns for k8s addons.
@ -2338,7 +2357,7 @@ function setup-kube-dns-manifest {
cat > "${kubedns_file}" <<EOF
$CUSTOM_KUBE_DNS_YAML
EOF
update-prometheus-to-sd-parameters ${kubedns_file}
update-prometheus-to-sd-parameters "${kubedns_file}"
fi
# Replace the salt configurations with variable values.
sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${kubedns_file}"
@ -2401,8 +2420,8 @@ function start-kube-addons {
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
local -r dst_dir="/etc/kubernetes/addons"
create-kubeconfig "addon-manager" ${ADDON_MANAGER_TOKEN}
prepare-log-file /var/log/kube-addon-manager.log ${KUBE_ADDON_MANAGER_RUNASUSER:-2002} ${KUBE_ADDON_MANAGER_RUNASGROUP:-2002}
create-kubeconfig "addon-manager" "${ADDON_MANAGER_TOKEN}"
prepare-log-file /var/log/kube-addon-manager.log "${KUBE_ADDON_MANAGER_RUNASUSER:-2002}" "${KUBE_ADDON_MANAGER_RUNASGROUP:-2002}"
# prep addition kube-up specific rbac objects
setup-addon-manifests "addons" "rbac/kubelet-api-auth"
@ -2429,8 +2448,8 @@ EOF
prepare-kube-proxy-manifest-variables "$src_dir/kube-proxy/kube-proxy-ds.yaml"
setup-addon-manifests "addons" "kube-proxy"
fi
if ([[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]] &&
[[ "${LOGGING_DESTINATION:-}" == "gcp" ]]); then
if [[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]] &&
[[ "${LOGGING_DESTINATION:-}" == "gcp" ]]; then
if [[ "${ENABLE_METADATA_AGENT:-}" == "stackdriver" ]]; then
metadata_agent_cpu_request="${METADATA_AGENT_CPU_REQUEST:-40m}"
metadata_agent_memory_request="${METADATA_AGENT_MEMORY_REQUEST:-50Mi}"
@ -2467,7 +2486,7 @@ EOF
fi
if [[ "${ENABLE_NODE_TERMINATION_HANDLER:-}" == "true" ]]; then
setup-addon-manifests "addons" "node-termination-handler"
setup-node-termination-handler-manifest
setup-node-termination-handler-manifest "$@"
fi
# Setting up the konnectivity-agent daemonset
if [[ "${ENABLE_EGRESS_VIA_KONNECTIVITY_SERVICE:-false}" == "true" ]]; then
@ -2482,7 +2501,7 @@ EOF
BASE_ADDON_DIR=${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty
BASE_DNS_DIR=${BASE_ADDON_DIR}/dns
NEW_DNS_DIR=${BASE_ADDON_DIR}/0-dns
mkdir ${NEW_DNS_DIR} && mv ${BASE_DNS_DIR}/* ${NEW_DNS_DIR} && rm -r ${BASE_DNS_DIR}
mkdir "${NEW_DNS_DIR}" && mv "${BASE_DNS_DIR}"/* "${NEW_DNS_DIR}" && rm -r "${BASE_DNS_DIR}"
if [[ "${CLUSTER_DNS_CORE_DNS:-}" == "true" ]]; then
setup-coredns-manifest
else
@ -2598,7 +2617,7 @@ function start-lb-controller {
prepare-log-file /var/log/glbc.log
setup-addon-manifests "addons" "cluster-loadbalancing/glbc"
setup-addon-manifests "addons" "rbac/cluster-loadbalancing/glbc"
create-kubeconfig "l7-lb-controller" ${GCE_GLBC_TOKEN}
create-kubeconfig "l7-lb-controller" "${GCE_GLBC_TOKEN}"
local -r src_manifest="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/glbc.manifest"
local -r dest_manifest="/etc/kubernetes/manifests/glbc.manifest"
@ -2641,8 +2660,9 @@ function reset-motd {
local -r version="$("${KUBE_HOME}"/bin/kubelet --version=true | cut -f2 -d " ")"
# This logic grabs either a release tag (v1.2.1 or v1.2.1-alpha.1),
# or the git hash that's in the build info.
local gitref="$(echo "${version}" | sed -r "s/(v[0-9]+\.[0-9]+\.[0-9]+)(-[a-z]+\.[0-9]+)?.*/\1\2/g")"
local devel=""
local gitref
gitref="$(echo "${version}" | sed -r "s/(v[0-9]+\.[0-9]+\.[0-9]+)(-[a-z]+\.[0-9]+)?.*/\1\2/g")"
local devel=''
if [[ "${gitref}" != "${version}" ]]; then
devel="
Note: This looks like a development version, which might not be present on GitHub.
@ -2678,6 +2698,7 @@ function override-kubectl {
# source the file explicitly otherwise we have
# issues on a ubuntu OS image finding the kubectl
# shellcheck disable=SC1091
source /etc/profile.d/kube_env.sh
# Add ${KUBE_HOME}/bin into sudoer secure path.
@ -2701,7 +2722,7 @@ function override-pv-recycler {
PV_RECYCLER_VOLUME="{\"name\": \"pv-recycler-mount\",\"hostPath\": {\"path\": \"${PV_RECYCLER_OVERRIDE_TEMPLATE}\", \"type\": \"FileOrCreate\"}},"
PV_RECYCLER_MOUNT="{\"name\": \"pv-recycler-mount\",\"mountPath\": \"${PV_RECYCLER_OVERRIDE_TEMPLATE}\", \"readOnly\": true},"
cat > ${PV_RECYCLER_OVERRIDE_TEMPLATE} <<EOF
cat > "${PV_RECYCLER_OVERRIDE_TEMPLATE}" <<EOF
version: v1
kind: Pod
metadata:
@ -2809,8 +2830,9 @@ EOF
fi
# Reuse docker group for containerd.
local containerd_gid="$(cat /etc/group | grep ^docker: | cut -d: -f 3)"
if [[ ! -z "${containerd_gid:-}" ]]; then
local containerd_gid
containerd_gid="$(grep ^docker: /etc/group | cut -d: -f 3)"
if [[ -n "${containerd_gid:-}" ]]; then
cat >> "${config_path}" <<EOF
# reuse id of the docker group
[grpc]
@ -2861,7 +2883,7 @@ function main() {
exit 2
fi
fi
echo "Version : " $(${PYTHON} -V 2>&1)
echo "Version : $(${PYTHON} -V 2>&1)"
if [[ ! -e "${KUBE_HOME}/kube-env" ]]; then
echo "The ${KUBE_HOME}/kube-env file does not exist!! Terminate cluster initialization."
@ -2920,13 +2942,13 @@ function main() {
gke-master-start
else
create-node-pki
create-kubelet-kubeconfig ${KUBERNETES_MASTER_NAME}
create-kubelet-kubeconfig "${KUBERNETES_MASTER_NAME}"
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
create-kubeproxy-user-kubeconfig
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
if [[ -n "${NODE_PROBLEM_DETECTOR_TOKEN:-}" ]]; then
create-node-problem-detector-kubeconfig ${KUBERNETES_MASTER_NAME}
create-node-problem-detector-kubeconfig "${KUBERNETES_MASTER_NAME}"
elif [[ -f "/var/lib/kubelet/kubeconfig" ]]; then
create-node-problem-detector-kubeconfig-from-kubelet
else
@ -2961,7 +2983,7 @@ function main() {
start-kube-controller-manager
start-kube-scheduler
wait-till-apiserver-ready
start-kube-addons
start-kube-addons "$@"
start-cluster-autoscaler
start-lb-controller
update-legacy-addon-node-labels &

View File

@ -1,4 +1,3 @@
./cluster/gce/gci/configure-helper.sh
./cluster/gce/gci/configure.sh
./cluster/gce/gci/master-helper.sh
./cluster/gce/util.sh