mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 12:15:52 +00:00
Merge pull request #90433 from joakimr-axis/joakimr-axis_configure-helper.sh
Fix shellcheck w/e in cluster/gce/gci/configure-helper.sh
This commit is contained in:
commit
fd20de89d9
@ -41,7 +41,7 @@ function convert-manifest-params {
|
||||
for flag in "${FLAGS[@]}"; do
|
||||
params+="\n\"$flag\","
|
||||
done
|
||||
if [ ! -z $params ]; then
|
||||
if [ -n "$params" ]; then
|
||||
echo "${params::-1}" # drop trailing comma
|
||||
fi
|
||||
}
|
||||
@ -118,6 +118,8 @@ function gce-metadata-fw-helper {
|
||||
;;
|
||||
esac
|
||||
|
||||
# Deliberately allow word split here
|
||||
# shellcheck disable=SC2086
|
||||
iptables ${command} OUTPUT -p tcp --dport 80 -d ${METADATA_SERVER_IP} -m owner ${invert:-} --uid-owner=${METADATA_SERVER_ALLOWED_UID_RANGE:-0-2999} -j ${action}
|
||||
}
|
||||
|
||||
@ -171,7 +173,7 @@ function config-ip-firewall {
|
||||
# node because we don't expect the daemonset to run on this node.
|
||||
if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]] && [[ ! "${METADATA_CONCEALMENT_NO_FIREWALL:-}" == "true" ]]; then
|
||||
echo "Add rule for metadata concealment"
|
||||
iptables -w -t nat -I PREROUTING -p tcp -d ${METADATA_SERVER_IP} --dport 80 -m comment --comment "metadata-concealment: bridge traffic to metadata server goes to metadata proxy" -j DNAT --to-destination 127.0.0.1:988
|
||||
iptables -w -t nat -I PREROUTING -p tcp -d "${METADATA_SERVER_IP}" --dport 80 -m comment --comment "metadata-concealment: bridge traffic to metadata server goes to metadata proxy" -j DNAT --to-destination 127.0.0.1:988
|
||||
fi
|
||||
|
||||
# Log all metadata access not from approved processes.
|
||||
@ -200,7 +202,7 @@ function get-local-disk-num() {
|
||||
local format="${2}"
|
||||
|
||||
localdisknum=0
|
||||
if [[ ! -z "${NODE_LOCAL_SSDS_EXT:-}" ]]; then
|
||||
if [[ -n "${NODE_LOCAL_SSDS_EXT:-}" ]]; then
|
||||
IFS=";" read -r -a ssdgroups <<< "${NODE_LOCAL_SSDS_EXT:-}"
|
||||
for ssdgroup in "${ssdgroups[@]}"; do
|
||||
IFS="," read -r -a ssdopts <<< "${ssdgroup}"
|
||||
@ -246,13 +248,13 @@ function get-or-generate-uuid(){
|
||||
fi
|
||||
|
||||
# each line of the ssdmap looks like "${device} persistent-uuid"
|
||||
if [[ ! -z $(grep ${device} ${ssdmap}) ]]; then
|
||||
local myuuid
|
||||
if grep -q "${device}" "${ssdmap}"; then
|
||||
#create symlink based on saved uuid
|
||||
local myuuid=$(grep ${device} ${ssdmap} | cut -d ' ' -f 2)
|
||||
myuuid=$(grep "${device}" "${ssdmap}" | cut -d ' ' -f 2)
|
||||
else
|
||||
# generate new uuid and add it to the map
|
||||
local myuuid=$(uuidgen)
|
||||
if [[ ! ${?} -eq 0 ]]; then
|
||||
if ! myuuid=$(uuidgen); then
|
||||
echo "Failed to generate valid UUID with uuidgen" >&2
|
||||
exit 2
|
||||
fi
|
||||
@ -270,8 +272,10 @@ function get-or-generate-uuid(){
|
||||
#Formats the given device ($1) if needed and mounts it at given mount point
|
||||
# ($2).
|
||||
function safe-format-and-mount() {
|
||||
local device="${1}"
|
||||
local mountpoint="${2}"
|
||||
local device
|
||||
local mountpoint
|
||||
device="$1"
|
||||
mountpoint="$2"
|
||||
|
||||
# Format only if the disk is not already formatted.
|
||||
if ! tune2fs -l "${device}" ; then
|
||||
@ -288,16 +292,19 @@ function safe-format-and-mount() {
|
||||
# Gets a devices UUID and bind mounts the device to mount location in
|
||||
# /mnt/disks/by-id/
|
||||
function unique-uuid-bind-mount(){
|
||||
local mountpoint="${1}"
|
||||
local actual_device="${2}"
|
||||
local mountpoint
|
||||
local actual_device
|
||||
mountpoint="$1"
|
||||
actual_device="$2"
|
||||
|
||||
# Trigger udev refresh so that newly formatted devices are propagated in by-uuid
|
||||
udevadm control --reload-rules
|
||||
udevadm trigger
|
||||
udevadm settle
|
||||
|
||||
# grep the exact match of actual device, prevents substring matching
|
||||
local myuuid=$(ls -l /dev/disk/by-uuid/ | grep "/${actual_device}$" | tr -s ' ' | cut -d ' ' -f 9)
|
||||
# find uuid for actual_device
|
||||
local myuuid
|
||||
myuuid=$(find -L /dev/disk/by-uuid -maxdepth 1 -samefile /dev/"${actual_device}" -printf '%P')
|
||||
# myuuid should be the uuid of the device as found in /dev/disk/by-uuid/
|
||||
if [[ -z "${myuuid}" ]]; then
|
||||
echo "Failed to get a uuid for device ${actual_device} when mounting." >&2
|
||||
@ -340,7 +347,8 @@ function mount-ext(){
|
||||
# TODO: Handle partitioned disks. Right now this code just ignores partitions
|
||||
if [[ "${format}" == "fs" ]]; then
|
||||
if [[ "${interface}" == "scsi" ]]; then
|
||||
local actual_device=$(readlink -f "${ssd}" | cut -d '/' -f 3)
|
||||
local actual_device
|
||||
actual_device=$(readlink -f "${ssd}" | cut -d '/' -f 3)
|
||||
# Error checking
|
||||
if [[ "${actual_device}" != sd* ]]; then
|
||||
echo "'actual_device' is not of the correct format. It must be the kernel name of the device, got ${actual_device} instead" >&2
|
||||
@ -350,7 +358,8 @@ function mount-ext(){
|
||||
else
|
||||
# This path is required because the existing Google images do not
|
||||
# expose NVMe devices in /dev/disk/by-id so we are using the /dev/nvme instead
|
||||
local actual_device=$(echo ${ssd} | cut -d '/' -f 3)
|
||||
local actual_device
|
||||
actual_device=$(echo "${ssd}" | cut -d '/' -f 3)
|
||||
# Error checking
|
||||
if [[ "${actual_device}" != nvme* ]]; then
|
||||
echo "'actual_device' is not of the correct format. It must be the kernel name of the device, got ${actual_device} instead" >&2
|
||||
@ -362,7 +371,7 @@ function mount-ext(){
|
||||
safe-format-and-mount "${ssd}" "${mountpoint}"
|
||||
# We only do the bindmount if users are using the new local ssd request method
|
||||
# see https://github.com/kubernetes/kubernetes/pull/53466#discussion_r146431894
|
||||
if [[ ! -z "${NODE_LOCAL_SSDS_EXT:-}" ]]; then
|
||||
if [[ -n "${NODE_LOCAL_SSDS_EXT:-}" ]]; then
|
||||
unique-uuid-bind-mount "${mountpoint}" "${actual_device}"
|
||||
fi
|
||||
elif [[ "${format}" == "block" ]]; then
|
||||
@ -381,7 +390,8 @@ function ensure-local-ssds() {
|
||||
local i=0
|
||||
for ssd in /dev/disk/by-id/google-local-ssd-*; do
|
||||
if [ -e "${ssd}" ]; then
|
||||
local devicenum=`echo ${ssd} | sed -e 's/\/dev\/disk\/by-id\/google-local-ssd-\([0-9]*\)/\1/'`
|
||||
local devicenum
|
||||
devicenum=$(echo "${ssd}" | sed -e 's/\/dev\/disk\/by-id\/google-local-ssd-\([0-9]*\)/\1/')
|
||||
if [[ "${i}" -lt "${scsiblocknum}" ]]; then
|
||||
mount-ext "${ssd}" "${devicenum}" "scsi" "block"
|
||||
else
|
||||
@ -410,8 +420,9 @@ function ensure-local-ssds() {
|
||||
if [ -e "${ssd}" ]; then
|
||||
# This workaround to find if the NVMe device is a disk is required because
|
||||
# the existing Google images does not expose NVMe devices in /dev/disk/by-id
|
||||
if [[ `udevadm info --query=property --name=${ssd} | grep DEVTYPE | sed "s/DEVTYPE=//"` == "disk" ]]; then
|
||||
local devicenum=`echo ${ssd} | sed -e 's/\/dev\/nvme0n\([0-9]*\)/\1/'`
|
||||
if [[ $(udevadm info --query=property --name="${ssd}" | grep DEVTYPE | sed "s/DEVTYPE=//") == "disk" ]]; then
|
||||
# shellcheck disable=SC2155
|
||||
local devicenum=$(echo "${ssd}" | sed -e 's/\/dev\/nvme0n\([0-9]*\)/\1/')
|
||||
if [[ "${i}" -lt "${nvmeblocknum}" ]]; then
|
||||
mount-ext "${ssd}" "${devicenum}" "nvme" "block"
|
||||
else
|
||||
@ -506,7 +517,8 @@ function mount-master-pd {
|
||||
# locations.
|
||||
|
||||
# Contains all the data stored in etcd.
|
||||
mkdir -m 700 -p "${mount_point}/var/etcd"
|
||||
mkdir -p "${mount_point}/var/etcd"
|
||||
chmod 700 "${mount_point}/var/etcd"
|
||||
ln -s -f "${mount_point}/var/etcd" /var/etcd
|
||||
mkdir -p /etc/srv
|
||||
# Contains the dynamically generated apiserver auth certs and keys.
|
||||
@ -528,8 +540,8 @@ function append_or_replace_prefixed_line {
|
||||
local -r file="${1:-}"
|
||||
local -r prefix="${2:-}"
|
||||
local -r suffix="${3:-}"
|
||||
local -r dirname="$(dirname ${file})"
|
||||
local -r tmpfile="$(mktemp -t filtered.XXXX --tmpdir=${dirname})"
|
||||
local -r dirname=$(dirname "${file}")
|
||||
local -r tmpfile=$(mktemp -t filtered.XXXX --tmpdir="${dirname}")
|
||||
|
||||
touch "${file}"
|
||||
awk -v pfx="${prefix}" 'substr($0,1,length(pfx)) != pfx { print }' "${file}" > "${tmpfile}"
|
||||
@ -556,7 +568,7 @@ function create-node-pki {
|
||||
CA_CERT_BUNDLE_PATH="${pki_dir}/ca-certificates.crt"
|
||||
write-pki-data "${CA_CERT_BUNDLE}" "${CA_CERT_BUNDLE_PATH}"
|
||||
|
||||
if [[ ! -z "${KUBELET_CERT:-}" && ! -z "${KUBELET_KEY:-}" ]]; then
|
||||
if [[ -n "${KUBELET_CERT:-}" && -n "${KUBELET_KEY:-}" ]]; then
|
||||
KUBELET_CERT_PATH="${pki_dir}/kubelet.crt"
|
||||
write-pki-data "${KUBELET_CERT}" "${KUBELET_CERT_PATH}"
|
||||
|
||||
@ -575,7 +587,7 @@ function create-master-pki {
|
||||
write-pki-data "${CA_CERT}" "${CA_CERT_PATH}"
|
||||
|
||||
# this is not true on GKE
|
||||
if [[ ! -z "${CA_KEY:-}" ]]; then
|
||||
if [[ -n "${CA_KEY:-}" ]]; then
|
||||
CA_KEY_PATH="${pki_dir}/ca.key"
|
||||
write-pki-data "${CA_KEY}" "${CA_KEY_PATH}"
|
||||
fi
|
||||
@ -620,7 +632,7 @@ function create-master-pki {
|
||||
SERVICEACCOUNT_KEY_PATH="${pki_dir}/serviceaccount.key"
|
||||
write-pki-data "${SERVICEACCOUNT_KEY}" "${SERVICEACCOUNT_KEY_PATH}"
|
||||
|
||||
if [[ ! -z "${REQUESTHEADER_CA_CERT:-}" ]]; then
|
||||
if [[ -n "${REQUESTHEADER_CA_CERT:-}" ]]; then
|
||||
REQUESTHEADER_CA_CERT_PATH="${pki_dir}/aggr_ca.crt"
|
||||
write-pki-data "${REQUESTHEADER_CA_CERT}" "${REQUESTHEADER_CA_CERT_PATH}"
|
||||
|
||||
@ -685,14 +697,15 @@ function create-master-auth {
|
||||
fi
|
||||
if [[ -n "${KONNECTIVITY_SERVER_TOKEN:-}" ]]; then
|
||||
append_or_replace_prefixed_line "${known_tokens_csv}" "${KONNECTIVITY_SERVER_TOKEN}," "system:konnectivity-server,uid:system:konnectivity-server"
|
||||
create-kubeconfig "konnectivity-server" ${KONNECTIVITY_SERVER_TOKEN}
|
||||
create-kubeconfig "konnectivity-server" "${KONNECTIVITY_SERVER_TOKEN}"
|
||||
fi
|
||||
|
||||
if [[ -n "${EXTRA_STATIC_AUTH_COMPONENTS:-}" ]]; then
|
||||
# Create a static Bearer token and kubeconfig for extra, comma-separated components.
|
||||
IFS="," read -r -a extra_components <<< "${EXTRA_STATIC_AUTH_COMPONENTS:-}"
|
||||
for extra_component in "${extra_components[@]}"; do
|
||||
local token="$(secure_random 32)"
|
||||
local token
|
||||
token="$(secure_random 32)"
|
||||
append_or_replace_prefixed_line "${known_tokens_csv}" "${token}," "system:${extra_component},uid:system:${extra_component}"
|
||||
create-kubeconfig "${extra_component}" "${token}"
|
||||
done
|
||||
@ -747,14 +760,14 @@ EOF
|
||||
use_cloud_config="true"
|
||||
if [[ -n "${NODE_TAGS:-}" ]]; then
|
||||
# split NODE_TAGS into an array by comma.
|
||||
IFS=',' read -r -a node_tags <<< ${NODE_TAGS}
|
||||
IFS=',' read -r -a node_tags <<< "${NODE_TAGS}"
|
||||
else
|
||||
local -r node_tags="${NODE_INSTANCE_PREFIX}"
|
||||
local -r node_tags=("${NODE_INSTANCE_PREFIX}")
|
||||
fi
|
||||
cat <<EOF >>/etc/gce.conf
|
||||
node-instance-prefix = ${NODE_INSTANCE_PREFIX}
|
||||
EOF
|
||||
for tag in ${node_tags[@]}; do
|
||||
for tag in "${node_tags[@]}"; do
|
||||
cat <<EOF >>/etc/gce.conf
|
||||
node-tags = ${tag}
|
||||
EOF
|
||||
@ -778,8 +791,8 @@ EOF
|
||||
if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then
|
||||
use_cloud_config="true"
|
||||
# split GCE_ALPHA_FEATURES into an array by comma.
|
||||
IFS=',' read -r -a alpha_features <<< ${GCE_ALPHA_FEATURES}
|
||||
for feature in ${alpha_features[@]}; do
|
||||
IFS=',' read -r -a alpha_features <<< "${GCE_ALPHA_FEATURES}"
|
||||
for feature in "${alpha_features[@]}"; do
|
||||
cat <<EOF >>/etc/gce.conf
|
||||
alpha-features = ${feature}
|
||||
EOF
|
||||
@ -1182,8 +1195,8 @@ function create-kubeconfig {
|
||||
local component=$1
|
||||
local token=$2
|
||||
echo "Creating kubeconfig file for component ${component}"
|
||||
mkdir -p /etc/srv/kubernetes/${component}
|
||||
cat <<EOF >/etc/srv/kubernetes/${component}/kubeconfig
|
||||
mkdir -p "/etc/srv/kubernetes/${component}"
|
||||
cat <<EOF >"/etc/srv/kubernetes/${component}/kubeconfig"
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
@ -1251,7 +1264,7 @@ function create-master-kubelet-auth {
|
||||
# set in the environment.
|
||||
if [[ -n "${KUBELET_APISERVER:-}" && -n "${KUBELET_CERT:-}" && -n "${KUBELET_KEY:-}" ]]; then
|
||||
REGISTER_MASTER_KUBELET="true"
|
||||
create-kubelet-kubeconfig ${KUBELET_APISERVER}
|
||||
create-kubelet-kubeconfig "${KUBELET_APISERVER}"
|
||||
fi
|
||||
}
|
||||
|
||||
@ -1373,12 +1386,10 @@ function assemble-docker-flags {
|
||||
else
|
||||
docker_opts+=" --log-level=warn"
|
||||
fi
|
||||
local use_net_plugin="true"
|
||||
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" || "${NETWORK_PROVIDER:-}" == "cni" ]]; then
|
||||
# set docker0 cidr to private ip address range to avoid conflict with cbr0 cidr range
|
||||
docker_opts+=" --bip=169.254.123.1/24"
|
||||
else
|
||||
use_net_plugin="false"
|
||||
docker_opts+=" --bridge=cbr0"
|
||||
fi
|
||||
|
||||
@ -1523,9 +1534,9 @@ EOF
|
||||
# $2: the log owner uid to set for the log file.
|
||||
# $3: the log owner gid to set for the log file.
|
||||
function prepare-log-file {
|
||||
touch $1
|
||||
chmod 644 $1
|
||||
chown "${2:-${LOG_OWNER_USER:-root}}":"${3:-${LOG_OWNER_GROUP:-root}}" $1
|
||||
touch "$1"
|
||||
chmod 644 "$1"
|
||||
chown "${2:-${LOG_OWNER_USER:-root}}":"${3:-${LOG_OWNER_GROUP:-root}}" "$1"
|
||||
}
|
||||
|
||||
# Prepares parameters for kube-proxy manifest.
|
||||
@ -1549,13 +1560,11 @@ function prepare-kube-proxy-manifest-variables {
|
||||
# https://github.com/kubernetes/kubernetes/pull/70398
|
||||
local -r kernel_version=$(uname -r | cut -d\. -f1,2)
|
||||
local conntrack_module="nf_conntrack"
|
||||
if [[ $(printf "${kernel_version}\n4.18\n" | sort -V | tail -1) == "4.18" ]]; then
|
||||
if [[ $(printf '%s\n4.18\n' "${kernel_version}" | sort -V | tail -1) == "4.18" ]]; then
|
||||
conntrack_module="nf_conntrack_ipv4"
|
||||
fi
|
||||
|
||||
sudo modprobe -a ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh ${conntrack_module}
|
||||
if [[ $? -eq 0 ]];
|
||||
then
|
||||
if sudo modprobe -a ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh ${conntrack_module}; then
|
||||
params+=" --proxy-mode=ipvs"
|
||||
else
|
||||
# If IPVS modules are not present, make sure the node does not come up as
|
||||
@ -1575,18 +1584,18 @@ function prepare-kube-proxy-manifest-variables {
|
||||
kube_cache_mutation_detector_env_name="- name: KUBE_CACHE_MUTATION_DETECTOR"
|
||||
kube_cache_mutation_detector_env_value="value: \"${ENABLE_CACHE_MUTATION_DETECTOR}\""
|
||||
fi
|
||||
sed -i -e "s@{{kubeconfig}}@${kubeconfig}@g" ${src_file}
|
||||
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${kube_docker_registry}@g" ${src_file}
|
||||
sed -i -e "s@{{pillar\['kube-proxy_docker_tag'\]}}@${kube_proxy_docker_tag}@g" ${src_file}
|
||||
sed -i -e "s@{{params}}@${params}@g" ${src_file}
|
||||
sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
|
||||
sed -i -e "s@{{kube_cache_mutation_detector_env_name}}@${kube_cache_mutation_detector_env_name}@g" ${src_file}
|
||||
sed -i -e "s@{{kube_cache_mutation_detector_env_value}}@${kube_cache_mutation_detector_env_value}@g" ${src_file}
|
||||
sed -i -e "s@{{ cpurequest }}@100m@g" ${src_file}
|
||||
sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" ${src_file}
|
||||
sed -i -e "s@{{kubernetes_service_host_env_value}}@${KUBERNETES_MASTER_NAME}@g" ${src_file}
|
||||
sed -i -e "s@{{kubeconfig}}@${kubeconfig}@g" "${src_file}"
|
||||
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${kube_docker_registry}@g" "${src_file}"
|
||||
sed -i -e "s@{{pillar\['kube-proxy_docker_tag'\]}}@${kube_proxy_docker_tag}@g" "${src_file}"
|
||||
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
|
||||
sed -i -e "s@{{container_env}}@${container_env}@g" "${src_file}"
|
||||
sed -i -e "s@{{kube_cache_mutation_detector_env_name}}@${kube_cache_mutation_detector_env_name}@g" "${src_file}"
|
||||
sed -i -e "s@{{kube_cache_mutation_detector_env_value}}@${kube_cache_mutation_detector_env_value}@g" "${src_file}"
|
||||
sed -i -e "s@{{ cpurequest }}@100m@g" "${src_file}"
|
||||
sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" "${src_file}"
|
||||
sed -i -e "s@{{kubernetes_service_host_env_value}}@${KUBERNETES_MASTER_NAME}@g" "${src_file}"
|
||||
if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
|
||||
sed -i -e "s@{{cluster_cidr}}@--cluster-cidr=${CLUSTER_IP_RANGE}@g" ${src_file}
|
||||
sed -i -e "s@{{cluster_cidr}}@--cluster-cidr=${CLUSTER_IP_RANGE}@g" "${src_file}"
|
||||
fi
|
||||
}
|
||||
|
||||
@ -1609,7 +1618,7 @@ function start-kube-proxy {
|
||||
# $5: pod name, which should be either etcd or etcd-events
|
||||
function prepare-etcd-manifest {
|
||||
local host_name=${ETCD_HOSTNAME:-$(hostname -s)}
|
||||
local host_ip=$(${PYTHON} -c "import socket;print(socket.gethostbyname(\"${host_name}\"))")
|
||||
local -r host_ip=$(${PYTHON} -c "import socket;print(socket.gethostbyname(\"${host_name}\"))")
|
||||
local etcd_cluster=""
|
||||
local cluster_state="new"
|
||||
local etcd_protocol="http"
|
||||
@ -1656,8 +1665,8 @@ function prepare-etcd-manifest {
|
||||
sed -i -e "s@{{ *liveness_probe_initial_delay *}}@${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${temp_file}"
|
||||
sed -i -e "s@{{ *listen_client_ip *}}@${ETCD_LISTEN_CLIENT_IP:-127.0.0.1}@g" "${temp_file}"
|
||||
# Get default storage backend from manifest file.
|
||||
local -r default_storage_backend=$(cat "${temp_file}" | \
|
||||
grep -o "{{ *pillar\.get('storage_backend', '\(.*\)') *}}" | \
|
||||
local -r default_storage_backend=$( \
|
||||
grep -o "{{ *pillar\.get('storage_backend', '\(.*\)') *}}" "${temp_file}" | \
|
||||
sed -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g")
|
||||
if [[ -n "${STORAGE_BACKEND:-}" ]]; then
|
||||
sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@${STORAGE_BACKEND}@g" "${temp_file}"
|
||||
@ -1805,8 +1814,12 @@ function compute-master-manifest-variables {
|
||||
|
||||
INSECURE_PORT_MAPPING=""
|
||||
if [[ "${ENABLE_APISERVER_INSECURE_PORT:-false}" == "true" ]]; then
|
||||
INSECURE_PORT_MAPPING="{ \"name\": \"local\", \"containerPort\": 8080, \"hostPort\": 8080},"
|
||||
# INSECURE_PORT_MAPPING is used by sed
|
||||
# shellcheck disable=SC2089
|
||||
INSECURE_PORT_MAPPING='{ "name": "local", "containerPort": 8080, "hostPort": 8080},'
|
||||
fi
|
||||
# shellcheck disable=SC2090
|
||||
export INSECURE_PORT_MAPPING
|
||||
}
|
||||
|
||||
# A helper function that bind mounts kubelet dirs for running mount in a chroot
|
||||
@ -1854,6 +1867,8 @@ function update-node-label() {
|
||||
|
||||
# A helper function that sets file permissions for kube-controller-manager to
|
||||
# run as non root.
|
||||
# User and group should never contain characters that need to be quoted
|
||||
# shellcheck disable=SC2086
|
||||
function run-kube-controller-manager-as-non-root {
|
||||
prepare-log-file /var/log/kube-controller-manager.log ${KUBE_CONTROLLER_MANAGER_RUNASUSER} ${KUBE_CONTROLLER_MANAGER_RUNASGROUP}
|
||||
setfacl -m u:${KUBE_CONTROLLER_MANAGER_RUNASUSER}:r "${CA_CERT_BUNDLE_PATH}"
|
||||
@ -1873,67 +1888,67 @@ function run-kube-controller-manager-as-non-root {
|
||||
# DOCKER_REGISTRY
|
||||
function start-kube-controller-manager {
|
||||
echo "Start kubernetes controller-manager"
|
||||
create-kubeconfig "kube-controller-manager" ${KUBE_CONTROLLER_MANAGER_TOKEN}
|
||||
create-kubeconfig "kube-controller-manager" "${KUBE_CONTROLLER_MANAGER_TOKEN}"
|
||||
prepare-log-file /var/log/kube-controller-manager.log
|
||||
# Calculate variables and assemble the command line.
|
||||
local params="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-"--v=2"} ${CONTROLLER_MANAGER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}"
|
||||
params+=" --use-service-account-credentials"
|
||||
params+=" --cloud-provider=gce"
|
||||
params+=" --kubeconfig=/etc/srv/kubernetes/kube-controller-manager/kubeconfig"
|
||||
params+=" --root-ca-file=${CA_CERT_BUNDLE_PATH}"
|
||||
params+=" --service-account-private-key-file=${SERVICEACCOUNT_KEY_PATH}"
|
||||
local params=("${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-"--v=2"}" "${CONTROLLER_MANAGER_TEST_ARGS:-}" "${CLOUD_CONFIG_OPT}")
|
||||
params+=("--use-service-account-credentials")
|
||||
params+=("--cloud-provider=gce")
|
||||
params+=("--kubeconfig=/etc/srv/kubernetes/kube-controller-manager/kubeconfig")
|
||||
params+=("--root-ca-file=${CA_CERT_BUNDLE_PATH}")
|
||||
params+=("--service-account-private-key-file=${SERVICEACCOUNT_KEY_PATH}")
|
||||
if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]]; then
|
||||
params+=" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}"
|
||||
params+=("--enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}")
|
||||
fi
|
||||
if [[ -n "${INSTANCE_PREFIX:-}" ]]; then
|
||||
params+=" --cluster-name=${INSTANCE_PREFIX}"
|
||||
params+=("--cluster-name=${INSTANCE_PREFIX}")
|
||||
fi
|
||||
if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
|
||||
params+=" --cluster-cidr=${CLUSTER_IP_RANGE}"
|
||||
params+=("--cluster-cidr=${CLUSTER_IP_RANGE}")
|
||||
fi
|
||||
if [[ -n "${CA_KEY:-}" ]]; then
|
||||
params+=" --cluster-signing-cert-file=${CA_CERT_PATH}"
|
||||
params+=" --cluster-signing-key-file=${CA_KEY_PATH}"
|
||||
params+=("--cluster-signing-cert-file=${CA_CERT_PATH}")
|
||||
params+=("--cluster-signing-key-file=${CA_KEY_PATH}")
|
||||
fi
|
||||
if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then
|
||||
params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
|
||||
params+=("--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}")
|
||||
fi
|
||||
if [[ -n "${CONCURRENT_SERVICE_SYNCS:-}" ]]; then
|
||||
params+=" --concurrent-service-syncs=${CONCURRENT_SERVICE_SYNCS}"
|
||||
params+=("--concurrent-service-syncs=${CONCURRENT_SERVICE_SYNCS}")
|
||||
fi
|
||||
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]]; then
|
||||
params+=" --allocate-node-cidrs=true"
|
||||
params+=("--allocate-node-cidrs=true")
|
||||
elif [[ -n "${ALLOCATE_NODE_CIDRS:-}" ]]; then
|
||||
params+=" --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}"
|
||||
params+=("--allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}")
|
||||
fi
|
||||
if [[ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]]; then
|
||||
params+=" --terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}"
|
||||
params+=("--terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}")
|
||||
fi
|
||||
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
|
||||
params+=" --cidr-allocator-type=${NODE_IPAM_MODE}"
|
||||
params+=" --configure-cloud-routes=false"
|
||||
params+=("--cidr-allocator-type=${NODE_IPAM_MODE}")
|
||||
params+=("--configure-cloud-routes=false")
|
||||
fi
|
||||
if [[ -n "${FEATURE_GATES:-}" ]]; then
|
||||
params+=" --feature-gates=${FEATURE_GATES}"
|
||||
params+=("--feature-gates=${FEATURE_GATES}")
|
||||
fi
|
||||
if [[ -n "${VOLUME_PLUGIN_DIR:-}" ]]; then
|
||||
params+=" --flex-volume-plugin-dir=${VOLUME_PLUGIN_DIR}"
|
||||
params+=("--flex-volume-plugin-dir=${VOLUME_PLUGIN_DIR}")
|
||||
fi
|
||||
if [[ -n "${CLUSTER_SIGNING_DURATION:-}" ]]; then
|
||||
params+=" --cluster-signing-duration=$CLUSTER_SIGNING_DURATION"
|
||||
params+=("--cluster-signing-duration=$CLUSTER_SIGNING_DURATION")
|
||||
fi
|
||||
# Disable using HPA metrics REST clients if metrics-server isn't enabled,
|
||||
# or if we want to explicitly disable it by setting HPA_USE_REST_CLIENT.
|
||||
if [[ "${ENABLE_METRICS_SERVER:-}" != "true" ]] ||
|
||||
[[ "${HPA_USE_REST_CLIENTS:-}" == "false" ]]; then
|
||||
params+=" --horizontal-pod-autoscaler-use-rest-clients=false"
|
||||
params+=("--horizontal-pod-autoscaler-use-rest-clients=false")
|
||||
fi
|
||||
if [[ -n "${PV_RECYCLER_OVERRIDE_TEMPLATE:-}" ]]; then
|
||||
params+=" --pv-recycler-pod-template-filepath-nfs=$PV_RECYCLER_OVERRIDE_TEMPLATE"
|
||||
params+=" --pv-recycler-pod-template-filepath-hostpath=$PV_RECYCLER_OVERRIDE_TEMPLATE"
|
||||
params+=("--pv-recycler-pod-template-filepath-nfs=$PV_RECYCLER_OVERRIDE_TEMPLATE")
|
||||
params+=("--pv-recycler-pod-template-filepath-hostpath=$PV_RECYCLER_OVERRIDE_TEMPLATE")
|
||||
fi
|
||||
if [[ -n "${RUN_CONTROLLERS:-}" ]]; then
|
||||
params+=" --controllers=${RUN_CONTROLLERS}"
|
||||
params+=("--controllers=${RUN_CONTROLLERS}")
|
||||
fi
|
||||
|
||||
local -r kube_rc_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-controller-manager.docker_tag)
|
||||
@ -1942,13 +1957,14 @@ function start-kube-controller-manager {
|
||||
container_env="\"env\":[{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}],"
|
||||
fi
|
||||
|
||||
params="$(convert-manifest-params "${params}")"
|
||||
local paramstring
|
||||
paramstring="$(convert-manifest-params "${params[*]}")"
|
||||
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-controller-manager.manifest"
|
||||
# Evaluate variables.
|
||||
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
|
||||
sed -i -e "s@{{pillar\['kube-controller-manager_docker_tag'\]}}@${kube_rc_docker_tag}@g" "${src_file}"
|
||||
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
|
||||
sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
|
||||
sed -i -e "s@{{params}}@${paramstring}@g" "${src_file}"
|
||||
sed -i -e "s@{{container_env}}@${container_env}@g" "${src_file}"
|
||||
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
|
||||
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
|
||||
sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}"
|
||||
@ -1979,38 +1995,41 @@ function start-kube-controller-manager {
|
||||
# DOCKER_REGISTRY
|
||||
function start-kube-scheduler {
|
||||
echo "Start kubernetes scheduler"
|
||||
create-kubeconfig "kube-scheduler" ${KUBE_SCHEDULER_TOKEN}
|
||||
create-kubeconfig "kube-scheduler" "${KUBE_SCHEDULER_TOKEN}"
|
||||
# User and group should never contain characters that need to be quoted
|
||||
# shellcheck disable=SC2086
|
||||
prepare-log-file /var/log/kube-scheduler.log ${KUBE_SCHEDULER_RUNASUSER:-2001} ${KUBE_SCHEDULER_RUNASGROUP:-2001}
|
||||
|
||||
# Calculate variables and set them in the manifest.
|
||||
params="${SCHEDULER_TEST_LOG_LEVEL:-"--v=2"} ${SCHEDULER_TEST_ARGS:-}"
|
||||
params=("${SCHEDULER_TEST_LOG_LEVEL:-"--v=2"}" "${SCHEDULER_TEST_ARGS:-}")
|
||||
if [[ -n "${FEATURE_GATES:-}" ]]; then
|
||||
params+=" --feature-gates=${FEATURE_GATES}"
|
||||
params+=("--feature-gates=${FEATURE_GATES}")
|
||||
fi
|
||||
|
||||
# Scheduler Component Config takes precedence over some flags.
|
||||
if [[ -n "${KUBE_SCHEDULER_CONFIG:-}" ]]; then
|
||||
create-kube-scheduler-config
|
||||
params+=" --config=/etc/srv/kubernetes/kube-scheduler/config"
|
||||
params+=("--config=/etc/srv/kubernetes/kube-scheduler/config")
|
||||
else
|
||||
params+=" --kubeconfig=/etc/srv/kubernetes/kube-scheduler/kubeconfig"
|
||||
params+=("--kubeconfig=/etc/srv/kubernetes/kube-scheduler/kubeconfig")
|
||||
if [[ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]]; then
|
||||
params+=" --algorithm-provider=${SCHEDULING_ALGORITHM_PROVIDER}"
|
||||
params+=("--algorithm-provider=${SCHEDULING_ALGORITHM_PROVIDER}")
|
||||
fi
|
||||
if [[ -n "${SCHEDULER_POLICY_CONFIG:-}" ]]; then
|
||||
create-kubescheduler-policy-config
|
||||
params+=" --use-legacy-policy-config"
|
||||
params+=" --policy-config-file=/etc/srv/kubernetes/kube-scheduler/policy-config"
|
||||
params+=("--use-legacy-policy-config")
|
||||
params+=("--policy-config-file=/etc/srv/kubernetes/kube-scheduler/policy-config")
|
||||
fi
|
||||
fi
|
||||
|
||||
params="$(convert-manifest-params "${params}")"
|
||||
local paramstring
|
||||
paramstring="$(convert-manifest-params "${params[*]}")"
|
||||
local -r kube_scheduler_docker_tag=$(cat "${KUBE_HOME}/kube-docker-files/kube-scheduler.docker_tag")
|
||||
|
||||
# Remove salt comments and replace variables with values.
|
||||
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-scheduler.manifest"
|
||||
|
||||
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
|
||||
sed -i -e "s@{{params}}@${paramstring}@g" "${src_file}"
|
||||
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
|
||||
sed -i -e "s@{{pillar\['kube-scheduler_docker_tag'\]}}@${kube_scheduler_docker_tag}@g" "${src_file}"
|
||||
sed -i -e "s@{{cpurequest}}@${KUBE_SCHEDULER_CPU_REQUEST}@g" "${src_file}"
|
||||
@ -2028,18 +2047,18 @@ function start-cluster-autoscaler {
|
||||
if [[ "${ENABLE_CLUSTER_AUTOSCALER:-}" == "true" ]]; then
|
||||
echo "Start kubernetes cluster autoscaler"
|
||||
setup-addon-manifests "addons" "rbac/cluster-autoscaler"
|
||||
create-kubeconfig "cluster-autoscaler" ${KUBE_CLUSTER_AUTOSCALER_TOKEN}
|
||||
create-kubeconfig "cluster-autoscaler" "${KUBE_CLUSTER_AUTOSCALER_TOKEN}"
|
||||
prepare-log-file /var/log/cluster-autoscaler.log
|
||||
|
||||
# Remove salt comments and replace variables with values
|
||||
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cluster-autoscaler.manifest"
|
||||
|
||||
local params="${AUTOSCALER_MIG_CONFIG} ${CLOUD_CONFIG_OPT} ${AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
|
||||
params+=" --kubeconfig=/etc/srv/kubernetes/cluster-autoscaler/kubeconfig"
|
||||
local params=("${AUTOSCALER_MIG_CONFIG}" "${CLOUD_CONFIG_OPT}" "${AUTOSCALER_EXPANDER_CONFIG:---expander=price}")
|
||||
params+=("--kubeconfig=/etc/srv/kubernetes/cluster-autoscaler/kubeconfig")
|
||||
|
||||
# split the params into separate arguments passed to binary
|
||||
local params_split
|
||||
params_split=$(eval "for param in $params; do echo -n \\\"\$param\\\",; done")
|
||||
params_split=$(eval 'for param in "${params[@]}"; do echo -n "$param",; done')
|
||||
params_split=${params_split%?}
|
||||
|
||||
sed -i -e "s@{{params}}@${params_split}@g" "${src_file}"
|
||||
@ -2109,6 +2128,8 @@ function download-extra-addons {
|
||||
#
|
||||
# $1: URL path after /computeMetadata/v1/ (without heading slash).
|
||||
function get-metadata-value {
|
||||
# We do not want quotes for CURL_RETRY_CONNREFUSED
|
||||
# shellcheck disable=SC2086
|
||||
curl \
|
||||
--retry 5 \
|
||||
--retry-delay 3 \
|
||||
@ -2130,16 +2151,13 @@ function copy-manifests {
|
||||
if [[ ! -d "${dst_dir}" ]]; then
|
||||
mkdir -p "${dst_dir}"
|
||||
fi
|
||||
local files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml")
|
||||
if [[ -n "${files}" ]]; then
|
||||
if [[ -n "$(ls "${src_dir}"/*.yaml 2>/dev/null)" ]]; then
|
||||
cp "${src_dir}/"*.yaml "${dst_dir}"
|
||||
fi
|
||||
files=$(find "${src_dir}" -maxdepth 1 -name "*.json")
|
||||
if [[ -n "${files}" ]]; then
|
||||
if [[ -n "$(ls "${src_dir}"/*.json 2>/dev/null)" ]]; then
|
||||
cp "${src_dir}/"*.json "${dst_dir}"
|
||||
fi
|
||||
files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml.in")
|
||||
if [[ -n "${files}" ]]; then
|
||||
if [[ -n "$(ls "${src_dir}"/*.yaml.in 2>/dev/null)" ]]; then
|
||||
cp "${src_dir}/"*.yaml.in "${dst_dir}"
|
||||
fi
|
||||
chown -R root:root "${dst_dir}"
|
||||
@ -2279,7 +2297,7 @@ function update-daemon-set-prometheus-to-sd-parameters {
|
||||
# Removes all lines between two patterns (throws away prometheus-to-sd)
|
||||
sed -i -e "/# BEGIN_PROMETHEUS_TO_SD/,/# END_PROMETHEUS_TO_SD/d" "$1"
|
||||
else
|
||||
update-prometheus-to-sd-parameters $1
|
||||
update-prometheus-to-sd-parameters "$1"
|
||||
fi
|
||||
}
|
||||
|
||||
@ -2336,10 +2354,10 @@ function setup-fluentd {
|
||||
sed -i -e "s@{{ fluentd_gcp_yaml_version }}@${fluentd_gcp_yaml_version}@g" "${fluentd_gcp_scaler_yaml}"
|
||||
fluentd_gcp_version="${FLUENTD_GCP_VERSION:-1.6.17}"
|
||||
sed -i -e "s@{{ fluentd_gcp_version }}@${fluentd_gcp_version}@g" "${fluentd_gcp_yaml}"
|
||||
update-daemon-set-prometheus-to-sd-parameters ${fluentd_gcp_yaml}
|
||||
start-fluentd-resource-update ${fluentd_gcp_yaml}
|
||||
update-container-runtime ${fluentd_gcp_configmap_yaml}
|
||||
update-node-journal ${fluentd_gcp_configmap_yaml}
|
||||
update-daemon-set-prometheus-to-sd-parameters "${fluentd_gcp_yaml}"
|
||||
start-fluentd-resource-update "${fluentd_gcp_yaml}"
|
||||
update-container-runtime "${fluentd_gcp_configmap_yaml}"
|
||||
update-node-journal "${fluentd_gcp_configmap_yaml}"
|
||||
}
|
||||
|
||||
# Sets up the manifests of kube-dns for k8s addons.
|
||||
@ -2352,7 +2370,7 @@ function setup-kube-dns-manifest {
|
||||
cat > "${kubedns_file}" <<EOF
|
||||
$CUSTOM_KUBE_DNS_YAML
|
||||
EOF
|
||||
update-prometheus-to-sd-parameters ${kubedns_file}
|
||||
update-prometheus-to-sd-parameters "${kubedns_file}"
|
||||
fi
|
||||
# Replace the salt configurations with variable values.
|
||||
sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${kubedns_file}"
|
||||
@ -2415,7 +2433,9 @@ function start-kube-addons {
|
||||
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
|
||||
local -r dst_dir="/etc/kubernetes/addons"
|
||||
|
||||
create-kubeconfig "addon-manager" ${ADDON_MANAGER_TOKEN}
|
||||
create-kubeconfig "addon-manager" "${ADDON_MANAGER_TOKEN}"
|
||||
# User and group should never contain characters that need to be quoted
|
||||
# shellcheck disable=SC2086
|
||||
prepare-log-file /var/log/kube-addon-manager.log ${KUBE_ADDON_MANAGER_RUNASUSER:-2002} ${KUBE_ADDON_MANAGER_RUNASGROUP:-2002}
|
||||
|
||||
# prep addition kube-up specific rbac objects
|
||||
@ -2443,8 +2463,8 @@ EOF
|
||||
prepare-kube-proxy-manifest-variables "$src_dir/kube-proxy/kube-proxy-ds.yaml"
|
||||
setup-addon-manifests "addons" "kube-proxy"
|
||||
fi
|
||||
if ([[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]] &&
|
||||
[[ "${LOGGING_DESTINATION:-}" == "gcp" ]]); then
|
||||
if [[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]] &&
|
||||
[[ "${LOGGING_DESTINATION:-}" == "gcp" ]]; then
|
||||
if [[ "${ENABLE_METADATA_AGENT:-}" == "stackdriver" ]]; then
|
||||
metadata_agent_cpu_request="${METADATA_AGENT_CPU_REQUEST:-40m}"
|
||||
metadata_agent_memory_request="${METADATA_AGENT_MEMORY_REQUEST:-50Mi}"
|
||||
@ -2481,7 +2501,7 @@ EOF
|
||||
fi
|
||||
if [[ "${ENABLE_NODE_TERMINATION_HANDLER:-}" == "true" ]]; then
|
||||
setup-addon-manifests "addons" "node-termination-handler"
|
||||
setup-node-termination-handler-manifest
|
||||
setup-node-termination-handler-manifest ''
|
||||
fi
|
||||
# Setting up the konnectivity-agent daemonset
|
||||
if [[ "${ENABLE_EGRESS_VIA_KONNECTIVITY_SERVICE:-false}" == "true" ]]; then
|
||||
@ -2496,7 +2516,7 @@ EOF
|
||||
BASE_ADDON_DIR=${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty
|
||||
BASE_DNS_DIR=${BASE_ADDON_DIR}/dns
|
||||
NEW_DNS_DIR=${BASE_ADDON_DIR}/0-dns
|
||||
mkdir ${NEW_DNS_DIR} && mv ${BASE_DNS_DIR}/* ${NEW_DNS_DIR} && rm -r ${BASE_DNS_DIR}
|
||||
mkdir "${NEW_DNS_DIR}" && mv "${BASE_DNS_DIR}"/* "${NEW_DNS_DIR}" && rm -r "${BASE_DNS_DIR}"
|
||||
if [[ "${CLUSTER_DNS_CORE_DNS:-}" == "true" ]]; then
|
||||
setup-coredns-manifest
|
||||
else
|
||||
@ -2613,7 +2633,7 @@ function start-lb-controller {
|
||||
prepare-log-file /var/log/glbc.log
|
||||
setup-addon-manifests "addons" "cluster-loadbalancing/glbc"
|
||||
setup-addon-manifests "addons" "rbac/cluster-loadbalancing/glbc"
|
||||
create-kubeconfig "l7-lb-controller" ${GCE_GLBC_TOKEN}
|
||||
create-kubeconfig "l7-lb-controller" "${GCE_GLBC_TOKEN}"
|
||||
|
||||
local -r src_manifest="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/glbc.manifest"
|
||||
local -r dest_manifest="/etc/kubernetes/manifests/glbc.manifest"
|
||||
@ -2656,7 +2676,8 @@ function reset-motd {
|
||||
local -r version="$("${KUBE_HOME}"/bin/kubelet --version=true | cut -f2 -d " ")"
|
||||
# This logic grabs either a release tag (v1.2.1 or v1.2.1-alpha.1),
|
||||
# or the git hash that's in the build info.
|
||||
local gitref="$(echo "${version}" | sed -r "s/(v[0-9]+\.[0-9]+\.[0-9]+)(-[a-z]+\.[0-9]+)?.*/\1\2/g")"
|
||||
local gitref
|
||||
gitref="$(echo "${version}" | sed -r "s/(v[0-9]+\.[0-9]+\.[0-9]+)(-[a-z]+\.[0-9]+)?.*/\1\2/g")"
|
||||
local devel=""
|
||||
if [[ "${gitref}" != "${version}" ]]; then
|
||||
devel="
|
||||
@ -2693,6 +2714,7 @@ function override-kubectl {
|
||||
|
||||
# source the file explicitly otherwise we have
|
||||
# issues on a ubuntu OS image finding the kubectl
|
||||
# shellcheck disable=SC1091
|
||||
source /etc/profile.d/kube_env.sh
|
||||
|
||||
# Add ${KUBE_HOME}/bin into sudoer secure path.
|
||||
@ -2716,7 +2738,7 @@ function override-pv-recycler {
|
||||
PV_RECYCLER_VOLUME="{\"name\": \"pv-recycler-mount\",\"hostPath\": {\"path\": \"${PV_RECYCLER_OVERRIDE_TEMPLATE}\", \"type\": \"FileOrCreate\"}},"
|
||||
PV_RECYCLER_MOUNT="{\"name\": \"pv-recycler-mount\",\"mountPath\": \"${PV_RECYCLER_OVERRIDE_TEMPLATE}\", \"readOnly\": true},"
|
||||
|
||||
cat > ${PV_RECYCLER_OVERRIDE_TEMPLATE} <<EOF
|
||||
cat > "${PV_RECYCLER_OVERRIDE_TEMPLATE}" <<EOF
|
||||
version: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
@ -2824,8 +2846,8 @@ EOF
|
||||
fi
|
||||
|
||||
# Reuse docker group for containerd.
|
||||
local containerd_gid="$(cat /etc/group | grep ^docker: | cut -d: -f 3)"
|
||||
if [[ ! -z "${containerd_gid:-}" ]]; then
|
||||
local -r containerd_gid="$(grep ^docker: /etc/group | cut -d: -f 3)"
|
||||
if [[ -n "${containerd_gid:-}" ]]; then
|
||||
cat >> "${config_path}" <<EOF
|
||||
# reuse id of the docker group
|
||||
[grpc]
|
||||
@ -2876,7 +2898,7 @@ function main() {
|
||||
exit 2
|
||||
fi
|
||||
fi
|
||||
echo "Version : " $(${PYTHON} -V 2>&1)
|
||||
echo "Version : $(${PYTHON} -V 2>&1)"
|
||||
|
||||
if [[ ! -e "${KUBE_HOME}/kube-env" ]]; then
|
||||
echo "The ${KUBE_HOME}/kube-env file does not exist!! Terminate cluster initialization."
|
||||
@ -2935,13 +2957,13 @@ function main() {
|
||||
gke-master-start
|
||||
else
|
||||
create-node-pki
|
||||
create-kubelet-kubeconfig ${KUBERNETES_MASTER_NAME}
|
||||
create-kubelet-kubeconfig "${KUBERNETES_MASTER_NAME}"
|
||||
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
|
||||
create-kubeproxy-user-kubeconfig
|
||||
fi
|
||||
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
|
||||
if [[ -n "${NODE_PROBLEM_DETECTOR_TOKEN:-}" ]]; then
|
||||
create-node-problem-detector-kubeconfig ${KUBERNETES_MASTER_NAME}
|
||||
create-node-problem-detector-kubeconfig "${KUBERNETES_MASTER_NAME}"
|
||||
elif [[ -f "/var/lib/kubelet/kubeconfig" ]]; then
|
||||
create-node-problem-detector-kubeconfig-from-kubelet
|
||||
else
|
||||
|
@ -1,4 +1,3 @@
|
||||
./cluster/gce/gci/configure-helper.sh
|
||||
./cluster/gce/gci/configure.sh
|
||||
./cluster/gce/gci/master-helper.sh
|
||||
./cluster/gce/util.sh
|
||||
|
Loading…
Reference in New Issue
Block a user