mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-14 13:45:06 +00:00
Merge pull request #20681 from justinsb/aws_sync_kubeup_gce_step3
Auto commit by PR queue bot
This commit is contained in:
@@ -574,11 +574,14 @@ function upload-server-tars() {
|
||||
SERVER_BINARY_TAR_HASH=
|
||||
SALT_TAR_URL=
|
||||
SALT_TAR_HASH=
|
||||
BOOTSTRAP_SCRIPT_URL=
|
||||
BOOTSTRAP_SCRIPT_HASH=
|
||||
|
||||
ensure-temp-dir
|
||||
|
||||
SERVER_BINARY_TAR_HASH=$(sha1sum-file "${SERVER_BINARY_TAR}")
|
||||
SALT_TAR_HASH=$(sha1sum-file "${SALT_TAR}")
|
||||
BOOTSTRAP_SCRIPT_HASH=$(sha1sum-file "${BOOTSTRAP_SCRIPT}")
|
||||
|
||||
if [[ -z ${AWS_S3_BUCKET-} ]]; then
|
||||
local project_hash=
|
||||
@@ -637,12 +640,13 @@ function upload-server-tars() {
|
||||
mkdir ${local_dir}
|
||||
|
||||
echo "+++ Staging server tars to S3 Storage: ${AWS_S3_BUCKET}/${staging_path}"
|
||||
local server_binary_path="${staging_path}/${SERVER_BINARY_TAR##*/}"
|
||||
cp -a "${SERVER_BINARY_TAR}" ${local_dir}
|
||||
cp -a "${SALT_TAR}" ${local_dir}
|
||||
cp -a "${BOOTSTRAP_SCRIPT}" ${local_dir}
|
||||
|
||||
aws s3 sync --region ${s3_bucket_location} --exact-timestamps ${local_dir} "s3://${AWS_S3_BUCKET}/${staging_path}/"
|
||||
|
||||
local server_binary_path="${staging_path}/${SERVER_BINARY_TAR##*/}"
|
||||
aws s3api put-object-acl --region ${s3_bucket_location} --bucket ${AWS_S3_BUCKET} --key "${server_binary_path}" --grant-read 'uri="http://acs.amazonaws.com/groups/global/AllUsers"'
|
||||
SERVER_BINARY_TAR_URL="${s3_url_base}/${AWS_S3_BUCKET}/${server_binary_path}"
|
||||
|
||||
@@ -650,9 +654,14 @@ function upload-server-tars() {
|
||||
aws s3api put-object-acl --region ${s3_bucket_location} --bucket ${AWS_S3_BUCKET} --key "${salt_tar_path}" --grant-read 'uri="http://acs.amazonaws.com/groups/global/AllUsers"'
|
||||
SALT_TAR_URL="${s3_url_base}/${AWS_S3_BUCKET}/${salt_tar_path}"
|
||||
|
||||
local bootstrap_script_path="${staging_path}/${BOOTSTRAP_SCRIPT##*/}"
|
||||
aws s3api put-object-acl --region ${s3_bucket_location} --bucket ${AWS_S3_BUCKET} --key "${bootstrap_script_path}" --grant-read 'uri="http://acs.amazonaws.com/groups/global/AllUsers"'
|
||||
BOOTSTRAP_SCRIPT_URL="${s3_url_base}/${AWS_S3_BUCKET}/${bootstrap_script_path}"
|
||||
|
||||
echo "Uploaded server tars:"
|
||||
echo " SERVER_BINARY_TAR_URL: ${SERVER_BINARY_TAR_URL}"
|
||||
echo " SALT_TAR_URL: ${SALT_TAR_URL}"
|
||||
echo " BOOTSTRAP_SCRIPT_URL: ${BOOTSTRAP_SCRIPT_URL}"
|
||||
}
|
||||
|
||||
# Adds a tag to an AWS resource
|
||||
@@ -813,11 +822,14 @@ function kube-up {
|
||||
|
||||
ensure-temp-dir
|
||||
|
||||
create-bootstrap-script
|
||||
|
||||
upload-server-tars
|
||||
|
||||
ensure-iam-profiles
|
||||
|
||||
load-or-gen-kube-basicauth
|
||||
load-or-gen-kube-bearertoken
|
||||
|
||||
ssh-key-setup
|
||||
|
||||
@@ -916,6 +928,24 @@ function kube-up {
|
||||
check-cluster
|
||||
}
|
||||
|
||||
# Builds the bootstrap script and saves it to a local temp file
|
||||
# Sets BOOTSTRAP_SCRIPT to the path of the script
|
||||
function create-bootstrap-script() {
|
||||
ensure-temp-dir
|
||||
|
||||
BOOTSTRAP_SCRIPT="${KUBE_TEMP}/bootstrap-script"
|
||||
|
||||
(
|
||||
# Include the default functions from the GCE configure-vm script
|
||||
sed '/^#+AWS_OVERRIDES_HERE/,$d' "${KUBE_ROOT}/cluster/gce/configure-vm.sh"
|
||||
# Include the AWS override functions
|
||||
cat "${KUBE_ROOT}/cluster/aws/templates/configure-vm-aws.sh"
|
||||
cat "${KUBE_ROOT}/cluster/aws/templates/format-disks.sh"
|
||||
# Include the GCE configure-vm directly-executed code
|
||||
sed -e '1,/^#+AWS_OVERRIDES_HERE/d' "${KUBE_ROOT}/cluster/gce/configure-vm.sh"
|
||||
) > "${BOOTSTRAP_SCRIPT}"
|
||||
}
|
||||
|
||||
# Starts the master node
|
||||
function start-master() {
|
||||
# Ensure RUNTIME_CONFIG is populated
|
||||
@@ -927,7 +957,13 @@ function start-master() {
|
||||
# Get or create master elastic IP
|
||||
ensure-master-ip
|
||||
|
||||
create-certs "" # TODO: Should we pass ELB name / elastic IP ?
|
||||
# We have to make sure that the cert is valid for API_SERVERS
|
||||
# i.e. we likely have to pass ELB name / elastic IP in future
|
||||
create-certs "${KUBE_MASTER_IP}" "${MASTER_INTERNAL_IP}"
|
||||
|
||||
# This key is no longer needed, and this enables us to get under the 16KB size limit
|
||||
KUBECFG_CERT_BASE64=""
|
||||
KUBECFG_KEY_BASE64=""
|
||||
|
||||
write-master-env
|
||||
|
||||
@@ -937,28 +973,20 @@ function start-master() {
|
||||
echo "mkdir -p /var/cache/kubernetes-install"
|
||||
echo "cd /var/cache/kubernetes-install"
|
||||
|
||||
echo "cat > kube-env.yaml << __EOF_MASTER_KUBE_ENV_YAML"
|
||||
echo "cat > kube_env.yaml << __EOF_MASTER_KUBE_ENV_YAML"
|
||||
cat ${KUBE_TEMP}/master-kube-env.yaml
|
||||
# TODO: get rid of these exceptions / harmonize with common or GCE
|
||||
echo "SALT_MASTER: $(yaml-quote ${MASTER_INTERNAL_IP:-})"
|
||||
echo "DOCKER_STORAGE: $(yaml-quote ${DOCKER_STORAGE:-})"
|
||||
echo "MASTER_EXTRA_SANS: $(yaml-quote ${MASTER_EXTRA_SANS:-})"
|
||||
echo "API_SERVERS: $(yaml-quote ${MASTER_INTERNAL_IP:-})"
|
||||
echo "__EOF_MASTER_KUBE_ENV_YAML"
|
||||
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/common.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/extract-kube-env.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/format-disks.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/setup-master-pd.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/create-dynamic-salt-files.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/download-release.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/salt-master.sh"
|
||||
echo ""
|
||||
echo "wget -O bootstrap ${BOOTSTRAP_SCRIPT_URL}"
|
||||
echo "chmod +x bootstrap"
|
||||
echo "./bootstrap"
|
||||
) > "${KUBE_TEMP}/master-user-data"
|
||||
|
||||
# We're running right up against the 16KB limit
|
||||
# Remove all comment lines and then put back the bin/bash shebang
|
||||
cat "${KUBE_TEMP}/master-user-data" | sed -e 's/^[[:blank:]]*#.*$//' | sed -e '/^[[:blank:]]*$/d' > "${KUBE_TEMP}/master-user-data.tmp"
|
||||
echo '#! /bin/bash' | cat - "${KUBE_TEMP}/master-user-data.tmp" > "${KUBE_TEMP}/master-user-data"
|
||||
rm "${KUBE_TEMP}/master-user-data.tmp"
|
||||
# Compress the data to fit under the 16KB limit (cloud-init accepts compressed data)
|
||||
gzip "${KUBE_TEMP}/master-user-data"
|
||||
|
||||
echo "Starting Master"
|
||||
master_id=$($AWS_CMD run-instances \
|
||||
@@ -971,7 +999,7 @@ function start-master() {
|
||||
--security-group-ids ${MASTER_SG_ID} \
|
||||
--associate-public-ip-address \
|
||||
--block-device-mappings "${MASTER_BLOCK_DEVICE_MAPPINGS}" \
|
||||
--user-data file://${KUBE_TEMP}/master-user-data \
|
||||
--user-data fileb://${KUBE_TEMP}/master-user-data.gz \
|
||||
--query Instances[].InstanceId)
|
||||
add-tag $master_id Name $MASTER_NAME
|
||||
add-tag $master_id Role $MASTER_TAG
|
||||
@@ -1014,60 +1042,6 @@ function start-master() {
|
||||
attempt=$(($attempt+1))
|
||||
sleep 10
|
||||
done
|
||||
|
||||
# Check for SSH connectivity
|
||||
attempt=0
|
||||
while true; do
|
||||
echo -n Attempt "$(($attempt+1))" to check for SSH to master
|
||||
local output
|
||||
local ok=1
|
||||
output=$(ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ${SSH_USER}@${KUBE_MASTER_IP} uptime 2> $LOG) || ok=0
|
||||
if [[ ${ok} == 0 ]]; then
|
||||
if (( attempt > 30 )); then
|
||||
echo
|
||||
echo "(Failed) output was: ${output}"
|
||||
echo
|
||||
echo -e "${color_red}Unable to ssh to master on ${KUBE_MASTER_IP}. Your cluster is unlikely" >&2
|
||||
echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2
|
||||
echo -e "cluster. (sorry!)${color_norm}" >&2
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo -e " ${color_green}[ssh to master working]${color_norm}"
|
||||
break
|
||||
fi
|
||||
echo -e " ${color_yellow}[ssh to master not working yet]${color_norm}"
|
||||
attempt=$(($attempt+1))
|
||||
sleep 10
|
||||
done
|
||||
|
||||
# We need the salt-master to be up for the minions to work
|
||||
attempt=0
|
||||
while true; do
|
||||
echo -n Attempt "$(($attempt+1))" to check for salt-master
|
||||
local output
|
||||
local ok=1
|
||||
output=$(ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ${SSH_USER}@${KUBE_MASTER_IP} pgrep salt-master 2> $LOG) || ok=0
|
||||
if [[ ${ok} == 0 ]]; then
|
||||
if (( attempt > 30 )); then
|
||||
echo
|
||||
echo "(Failed) output was: ${output}"
|
||||
echo
|
||||
echo -e "${color_red}salt-master failed to start on ${KUBE_MASTER_IP}. Your cluster is unlikely" >&2
|
||||
echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2
|
||||
echo -e "cluster. (sorry!)${color_norm}" >&2
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo -e " ${color_green}[salt-master running]${color_norm}"
|
||||
break
|
||||
fi
|
||||
echo -e " ${color_yellow}[salt-master not working yet]${color_norm}"
|
||||
attempt=$(($attempt+1))
|
||||
sleep 10
|
||||
done
|
||||
|
||||
reboot-on-failure ${master_id}
|
||||
}
|
||||
|
||||
# Creates an ASG for the minion nodes
|
||||
@@ -1076,7 +1050,29 @@ function start-minions() {
|
||||
build-runtime-config
|
||||
|
||||
echo "Creating minion configuration"
|
||||
generate-minion-user-data > "${KUBE_TEMP}/minion-user-data"
|
||||
|
||||
write-node-env
|
||||
|
||||
(
|
||||
# We pipe this to the ami as a startup script in the user-data field. Requires a compatible ami
|
||||
echo "#! /bin/bash"
|
||||
echo "mkdir -p /var/cache/kubernetes-install"
|
||||
echo "cd /var/cache/kubernetes-install"
|
||||
echo "cat > kube_env.yaml << __EOF_KUBE_ENV_YAML"
|
||||
cat ${KUBE_TEMP}/node-kube-env.yaml
|
||||
# TODO: get rid of these exceptions / harmonize with common or GCE
|
||||
echo "DOCKER_STORAGE: $(yaml-quote ${DOCKER_STORAGE:-})"
|
||||
echo "API_SERVERS: $(yaml-quote ${MASTER_INTERNAL_IP:-})"
|
||||
echo "__EOF_KUBE_ENV_YAML"
|
||||
echo ""
|
||||
echo "wget -O bootstrap ${BOOTSTRAP_SCRIPT_URL}"
|
||||
echo "chmod +x bootstrap"
|
||||
echo "./bootstrap"
|
||||
) > "${KUBE_TEMP}/node-user-data"
|
||||
|
||||
# Compress the data to fit under the 16KB limit (cloud-init accepts compressed data)
|
||||
gzip "${KUBE_TEMP}/node-user-data"
|
||||
|
||||
local public_ip_option
|
||||
if [[ "${ENABLE_NODE_PUBLIC_IP}" == "true" ]]; then
|
||||
public_ip_option="--associate-public-ip-address"
|
||||
@@ -1092,7 +1088,7 @@ function start-minions() {
|
||||
--security-groups ${NODE_SG_ID} \
|
||||
${public_ip_option} \
|
||||
--block-device-mappings "${NODE_BLOCK_DEVICE_MAPPINGS}" \
|
||||
--user-data "file://${KUBE_TEMP}/minion-user-data"
|
||||
--user-data "fileb://${KUBE_TEMP}/node-user-data.gz"
|
||||
|
||||
echo "Creating autoscaling group"
|
||||
${AWS_ASG_CMD} create-auto-scaling-group \
|
||||
@@ -1137,19 +1133,6 @@ function wait-minions {
|
||||
function wait-master() {
|
||||
detect-master > $LOG
|
||||
|
||||
# TODO(justinsb): This is really not necessary any more
|
||||
# Wait 3 minutes for cluster to come up. We hit it with a "highstate" after that to
|
||||
# make sure that everything is well configured.
|
||||
# TODO: Can we poll here?
|
||||
echo "Waiting 3 minutes for cluster to settle"
|
||||
local i
|
||||
for (( i=0; i < 6*3; i++)); do
|
||||
printf "."
|
||||
sleep 10
|
||||
done
|
||||
echo "Re-running salt highstate"
|
||||
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ${SSH_USER}@${KUBE_MASTER_IP} sudo salt '*' state.highstate > $LOG
|
||||
|
||||
echo "Waiting for cluster initialization."
|
||||
echo
|
||||
echo " This will continually check to see if the API for kubernetes is reachable."
|
||||
@@ -1167,25 +1150,15 @@ function wait-master() {
|
||||
}
|
||||
|
||||
# Creates the ~/.kube/config file, getting the information from the master
|
||||
# The master much be running and set in KUBE_MASTER_IP
|
||||
# The master must be running and set in KUBE_MASTER_IP
|
||||
function build-config() {
|
||||
# TODO use token instead of kube_auth
|
||||
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
|
||||
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
|
||||
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
|
||||
export CONTEXT="aws_${INSTANCE_PREFIX}"
|
||||
|
||||
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
|
||||
|
||||
# TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
|
||||
# config file. Distribute the same way the htpasswd is done.
|
||||
export KUBE_CERT="${CERT_DIR}/pki/issued/kubecfg.crt"
|
||||
export KUBE_KEY="${CERT_DIR}/pki/private/kubecfg.key"
|
||||
export CA_CERT="${CERT_DIR}/pki/ca.crt"
|
||||
export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}"
|
||||
(
|
||||
umask 077
|
||||
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "${SSH_USER}@${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>"$LOG"
|
||||
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "${SSH_USER}@${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>"$LOG"
|
||||
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "${SSH_USER}@${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>"$LOG"
|
||||
|
||||
create-kubeconfig
|
||||
umask 077
|
||||
create-kubeconfig
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1392,6 +1365,7 @@ function kube-push {
|
||||
|
||||
# Make sure we have the tar files staged on Google Storage
|
||||
find-release-tars
|
||||
create-bootstrap-script
|
||||
upload-server-tars
|
||||
|
||||
(
|
||||
|
Reference in New Issue
Block a user