|
|
|
@@ -22,32 +22,12 @@ source $(dirname ${BASH_SOURCE})/${KUBE_CONFIG_FILE-"config-default.sh"}
|
|
|
|
|
|
|
|
|
|
AWS_CMD="aws --output json ec2"
|
|
|
|
|
|
|
|
|
|
# Find the release to use. If passed in, go with that and validate. If not use
|
|
|
|
|
# the release/config.sh version assuming a dev workflow.
|
|
|
|
|
function find-release() {
|
|
|
|
|
if [ -n "$1" ]; then
|
|
|
|
|
RELEASE_NORMALIZED=$1
|
|
|
|
|
else
|
|
|
|
|
local RELEASE_CONFIG_SCRIPT=$(dirname $0)/../../release/aws/config.sh
|
|
|
|
|
if [ -f $(dirname $0)/../../release/aws/config.sh ]; then
|
|
|
|
|
. $RELEASE_CONFIG_SCRIPT
|
|
|
|
|
normalize_release
|
|
|
|
|
fi
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# Do one final check that we have a good release
|
|
|
|
|
if ! aws s3 ls $RELEASE_NORMALIZED/$RELEASE_TAR_FILE | grep $RELEASE_TAR_FILE > /dev/null; then
|
|
|
|
|
echo "Could not find release tar. If developing, make sure you have run src/release/release.sh to create a release."
|
|
|
|
|
exit 1
|
|
|
|
|
fi
|
|
|
|
|
echo "Release: ${RELEASE_NORMALIZED}"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function json_val {
|
|
|
|
|
python -c 'import json,sys;obj=json.load(sys.stdin);print obj'$1''
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# TODO (ayurchuk) Refactor the get_* functions to use filters
|
|
|
|
|
# TODO (bburns) Parameterize this for multiple cluster per project
|
|
|
|
|
function get_instance_ids {
|
|
|
|
|
python -c 'import json,sys; lst = [str(instance["InstanceId"]) for reservation in json.load(sys.stdin)["Reservations"] for instance in reservation["Instances"] for tag in instance["Tags"] if tag["Value"].startswith("kubernetes-minion") or tag["Value"].startswith("kubernetes-master")]; print " ".join(lst)'
|
|
|
|
|
}
|
|
|
|
@@ -82,10 +62,10 @@ function get_instance_public_ip {
|
|
|
|
|
|
|
|
|
|
function detect-master () {
|
|
|
|
|
KUBE_MASTER=${MASTER_NAME}
|
|
|
|
|
if [ -z "$KUBE_MASTER_IP" ]; then
|
|
|
|
|
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
|
|
|
|
|
KUBE_MASTER_IP=$($AWS_CMD describe-instances | get_instance_public_ip $MASTER_NAME)
|
|
|
|
|
fi
|
|
|
|
|
if [ -z "$KUBE_MASTER_IP" ]; then
|
|
|
|
|
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
|
|
|
|
|
echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'"
|
|
|
|
|
exit 1
|
|
|
|
|
fi
|
|
|
|
@@ -105,26 +85,6 @@ function detect-minions () {
|
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function get-password {
|
|
|
|
|
file=${HOME}/.kubernetes_auth
|
|
|
|
|
if [ -e ${file} ]; then
|
|
|
|
|
user=$(cat $file | python -c 'import json,sys;print json.load(sys.stdin)["User"]')
|
|
|
|
|
passwd=$(cat $file | python -c 'import json,sys;print json.load(sys.stdin)["Password"]')
|
|
|
|
|
return
|
|
|
|
|
fi
|
|
|
|
|
user=admin
|
|
|
|
|
passwd=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
|
|
|
|
|
|
|
|
|
|
# Store password for reuse.
|
|
|
|
|
cat << EOF > ~/.kubernetes_auth
|
|
|
|
|
{
|
|
|
|
|
"User": "$user",
|
|
|
|
|
"Password": "$passwd"
|
|
|
|
|
}
|
|
|
|
|
EOF
|
|
|
|
|
chmod 0600 ~/.kubernetes_auth
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Verify prereqs
|
|
|
|
|
function verify-prereqs {
|
|
|
|
|
if [ "$(which aws)" == "" ]; then
|
|
|
|
@@ -133,51 +93,163 @@ function verify-prereqs {
|
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function kube-up {
|
|
|
|
|
|
|
|
|
|
# Find the release to use. Generally it will be passed when doing a 'prod'
|
|
|
|
|
# install and will default to the release/config.sh version when doing a
|
|
|
|
|
# developer up.
|
|
|
|
|
find-release $1
|
|
|
|
|
|
|
|
|
|
# Build up start up script for master
|
|
|
|
|
# Create a temp dir that'll be deleted at the end of this bash session.
|
|
|
|
|
#
|
|
|
|
|
# Vars set:
|
|
|
|
|
# KUBE_TEMP
|
|
|
|
|
function ensure-temp-dir {
|
|
|
|
|
if [[ -z ${KUBE_TEMP-} ]]; then
|
|
|
|
|
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
|
|
|
|
|
trap "rm -rf ${KUBE_TEMP}" EXIT
|
|
|
|
|
trap 'rm -rf "${KUBE_TEMP}"' EXIT
|
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
get-password
|
|
|
|
|
echo "Using password: $user:$passwd"
|
|
|
|
|
python $(dirname $0)/../../third_party/htpasswd/htpasswd.py -b -c ${KUBE_TEMP}/htpasswd $user $passwd
|
|
|
|
|
HTPASSWD=$(cat ${KUBE_TEMP}/htpasswd)
|
|
|
|
|
# Verify and find the various tar files that we are going to use on the server.
|
|
|
|
|
#
|
|
|
|
|
# Vars set:
|
|
|
|
|
# SERVER_BINARY_TAR
|
|
|
|
|
# SALT_TAR
|
|
|
|
|
function find-release-tars {
|
|
|
|
|
SERVER_BINARY_TAR="${KUBE_ROOT}/server/kubernetes-server-linux-amd64.tar.gz"
|
|
|
|
|
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
|
|
|
|
|
SERVER_BINARY_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz"
|
|
|
|
|
fi
|
|
|
|
|
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
|
|
|
|
|
echo "!!! Cannot find kubernetes-server-linux-amd64.tar.gz"
|
|
|
|
|
exit 1
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
if [ ! -f $AWS_SSH_KEY ]; then
|
|
|
|
|
ssh-keygen -f $AWS_SSH_KEY -N ''
|
|
|
|
|
fi
|
|
|
|
|
SALT_TAR="${KUBE_ROOT}/server/kubernetes-salt.tar.gz"
|
|
|
|
|
if [[ ! -f "$SALT_TAR" ]]; then
|
|
|
|
|
SALT_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-salt.tar.gz"
|
|
|
|
|
fi
|
|
|
|
|
if [[ ! -f "$SALT_TAR" ]]; then
|
|
|
|
|
echo "!!! Cannot find kubernetes-salt.tar.gz"
|
|
|
|
|
exit 1
|
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Take the local tar files and upload them to S3. They will then be
|
|
|
|
|
# downloaded by the master as part of the start up script for the master.
|
|
|
|
|
#
|
|
|
|
|
# Assumed vars:
|
|
|
|
|
# SERVER_BINARY_TAR
|
|
|
|
|
# SALT_TAR
|
|
|
|
|
# Vars set:
|
|
|
|
|
# SERVER_BINARY_TAR_URL
|
|
|
|
|
# SALT_TAR_URL
|
|
|
|
|
function upload-server-tars() {
|
|
|
|
|
SERVER_BINARY_TAR_URL=
|
|
|
|
|
SALT_TAR_URL=
|
|
|
|
|
|
|
|
|
|
local project_hash
|
|
|
|
|
local key = $(aws configure get aws_access_key_id)
|
|
|
|
|
if which md5 > /dev/null 2>&1; then
|
|
|
|
|
project_hash = $(md5 -q -s "${USER} ${key}")
|
|
|
|
|
else
|
|
|
|
|
project_hash = $(echo -n "${USER} ${key}" | md5sum)
|
|
|
|
|
fi
|
|
|
|
|
local -r staging_bucket="kubernetes-staging-${project_hash}"
|
|
|
|
|
|
|
|
|
|
echo "Uploading to Amazon S3"
|
|
|
|
|
if ! aws s3 ls "s3://${staging_bucket}" > /dev/null 2>&1 ; then
|
|
|
|
|
echo "Creating ${staging_bucket}"
|
|
|
|
|
aws s3 mb "s3://${staging_bucket}"
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
aws s3api put-bucket-acl --bucket $staging_bucket --acl public-read
|
|
|
|
|
|
|
|
|
|
local -r staging_path="${staging_bucket}/devel"
|
|
|
|
|
|
|
|
|
|
echo "+++ Staging server tars to S3 Storage: ${staging_path}"
|
|
|
|
|
SERVER_BINARY_TAR_URL="${staging_path}/${SERVER_BINARY_TAR##*/}"
|
|
|
|
|
aws s3 cp "${SERVER_BINARY_TAR}" "s3://${SERVER_BINARY_TAR_URL}"
|
|
|
|
|
aws s3api put-object-acl --bucket ${staging_bucket} --key "devel/${SERVER_BINARY_TAR##*/}" --grant-read 'uri="http://acs.amazonaws.com/groups/global/AllUsers"'
|
|
|
|
|
SALT_TAR_URL="${staging_path}/${SALT_TAR##*/}"
|
|
|
|
|
aws s3 cp "${SALT_TAR}" "s3://${SALT_TAR_URL}"
|
|
|
|
|
aws s3api put-object-acl --bucket ${staging_bucket} --key "devel/${SALT_TAR##*/}" --grant-read 'uri="http://acs.amazonaws.com/groups/global/AllUsers"'
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Ensure that we have a password created for validating to the master. Will
|
|
|
|
|
# read from $HOME/.kubernetres_auth if available.
|
|
|
|
|
#
|
|
|
|
|
# Vars set:
|
|
|
|
|
# KUBE_USER
|
|
|
|
|
# KUBE_PASSWORD
|
|
|
|
|
function get-password {
|
|
|
|
|
local file="$HOME/.kubernetes_auth"
|
|
|
|
|
if [[ -r "$file" ]]; then
|
|
|
|
|
KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]')
|
|
|
|
|
KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]')
|
|
|
|
|
return
|
|
|
|
|
fi
|
|
|
|
|
KUBE_USER=admin
|
|
|
|
|
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
|
|
|
|
|
|
|
|
|
|
# Remove this code, since in all use cases I can see, we are overwriting this
|
|
|
|
|
# at cluster creation time.
|
|
|
|
|
cat << EOF > "$file"
|
|
|
|
|
{
|
|
|
|
|
"User": "$KUBE_USER",
|
|
|
|
|
"Password": "$KUBE_PASSWORD"
|
|
|
|
|
}
|
|
|
|
|
EOF
|
|
|
|
|
chmod 0600 "$file"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function kube-up {
|
|
|
|
|
find-release-tars
|
|
|
|
|
upload-server-tars
|
|
|
|
|
|
|
|
|
|
ensure-temp-dir
|
|
|
|
|
|
|
|
|
|
get-password
|
|
|
|
|
python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \
|
|
|
|
|
-b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD"
|
|
|
|
|
local htpasswd
|
|
|
|
|
htpasswd=$(cat "${KUBE_TEMP}/htpasswd")
|
|
|
|
|
|
|
|
|
|
if [ ! -f $AWS_SSH_KEY ]; then
|
|
|
|
|
ssh-keygen -f $AWS_SSH_KEY -N ''
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
$AWS_CMD import-key-pair --key-name kubernetes --public-key-material file://$AWS_SSH_KEY.pub > /dev/null 2>&1 || true
|
|
|
|
|
VPC_ID=$($AWS_CMD create-vpc --cidr-block 172.20.0.0/16 | json_val '["Vpc"]["VpcId"]')
|
|
|
|
|
$AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-support '{"Value": true}' > /dev/null
|
|
|
|
|
$AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-hostnames '{"Value": true}' > /dev/null
|
|
|
|
|
$AWS_CMD create-tags --resources $VPC_ID --tags Key=Name,Value=kubernetes-vpc > /dev/null
|
|
|
|
|
SUBNET_ID=$($AWS_CMD create-subnet --cidr-block 172.20.0.0/24 --vpc-id $VPC_ID | json_val '["Subnet"]["SubnetId"]')
|
|
|
|
|
IGW_ID=$($AWS_CMD create-internet-gateway | json_val '["InternetGateway"]["InternetGatewayId"]')
|
|
|
|
|
$AWS_CMD attach-internet-gateway --internet-gateway-id $IGW_ID --vpc-id $VPC_ID > /dev/null
|
|
|
|
|
ROUTE_TABLE_ID=$($AWS_CMD describe-route-tables --filters Name=vpc-id,Values=$VPC_ID | json_val '["RouteTables"][0]["RouteTableId"]')
|
|
|
|
|
$AWS_CMD associate-route-table --route-table-id $ROUTE_TABLE_ID --subnet-id $SUBNET_ID > /dev/null
|
|
|
|
|
$AWS_CMD describe-route-tables --filters Name=vpc-id,Values=$VPC_ID > /dev/null
|
|
|
|
|
$AWS_CMD create-route --route-table-id $ROUTE_TABLE_ID --destination-cidr-block 0.0.0.0/0 --gateway-id $IGW_ID > /dev/null
|
|
|
|
|
SEC_GROUP_ID=$($AWS_CMD create-security-group --group-name kubernetes-sec-group --description kubernetes-sec-group --vpc-id $VPC_ID | json_val '["GroupId"]')
|
|
|
|
|
$AWS_CMD authorize-security-group-ingress --group-id $SEC_GROUP_ID --protocol -1 --port all --cidr 0.0.0.0/0 > /dev/null
|
|
|
|
|
$AWS_CMD import-key-pair --key-name kubernetes --public-key-material file://$AWS_SSH_KEY.pub > /dev/null 2>&1 || true
|
|
|
|
|
VPC_ID=$($AWS_CMD create-vpc --cidr-block 172.20.0.0/16 | json_val '["Vpc"]["VpcId"]')
|
|
|
|
|
$AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-support '{"Value": true}' > /dev/null
|
|
|
|
|
$AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-hostnames '{"Value": true}' > /dev/null
|
|
|
|
|
$AWS_CMD create-tags --resources $VPC_ID --tags Key=Name,Value=kubernetes-vpc > /dev/null
|
|
|
|
|
SUBNET_ID=$($AWS_CMD create-subnet --cidr-block 172.20.0.0/24 --vpc-id $VPC_ID | json_val '["Subnet"]["SubnetId"]')
|
|
|
|
|
IGW_ID=$($AWS_CMD create-internet-gateway | json_val '["InternetGateway"]["InternetGatewayId"]')
|
|
|
|
|
$AWS_CMD attach-internet-gateway --internet-gateway-id $IGW_ID --vpc-id $VPC_ID > /dev/null
|
|
|
|
|
ROUTE_TABLE_ID=$($AWS_CMD describe-route-tables --filters Name=vpc-id,Values=$VPC_ID | json_val '["RouteTables"][0]["RouteTableId"]')
|
|
|
|
|
$AWS_CMD associate-route-table --route-table-id $ROUTE_TABLE_ID --subnet-id $SUBNET_ID > /dev/null
|
|
|
|
|
$AWS_CMD describe-route-tables --filters Name=vpc-id,Values=$VPC_ID > /dev/null
|
|
|
|
|
$AWS_CMD create-route --route-table-id $ROUTE_TABLE_ID --destination-cidr-block 0.0.0.0/0 --gateway-id $IGW_ID > /dev/null
|
|
|
|
|
SEC_GROUP_ID=$($AWS_CMD create-security-group --group-name kubernetes-sec-group --description kubernetes-sec-group --vpc-id $VPC_ID | json_val '["GroupId"]')
|
|
|
|
|
$AWS_CMD authorize-security-group-ingress --group-id $SEC_GROUP_ID --protocol -1 --port all --cidr 0.0.0.0/0 > /dev/null
|
|
|
|
|
|
|
|
|
|
(
|
|
|
|
|
echo "#!/bin/bash"
|
|
|
|
|
echo "MASTER_NAME=${MASTER_NAME}"
|
|
|
|
|
echo "MASTER_RELEASE_TAR=${RELEASE_FULL_HTTP_PATH}/master-release.tgz"
|
|
|
|
|
echo "MASTER_HTPASSWD='${HTPASSWD}'"
|
|
|
|
|
grep -v "^#" $(dirname $0)/templates/download-release.sh
|
|
|
|
|
grep -v "^#" $(dirname $0)/templates/salt-master.sh
|
|
|
|
|
) > ${KUBE_TEMP}/master-start.sh
|
|
|
|
|
(
|
|
|
|
|
# We pipe this to the ami as a startup script in the user-data field. Requires a compatible ami
|
|
|
|
|
echo "#! /bin/bash"
|
|
|
|
|
echo "mkdir -p /var/cache/kubernetes-install"
|
|
|
|
|
echo "cd /var/cache/kubernetes-install"
|
|
|
|
|
echo "readonly MASTER_NAME='${MASTER_NAME}'"
|
|
|
|
|
echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-minion'"
|
|
|
|
|
echo "readonly SERVER_BINARY_TAR_URL='https://s3-${ZONE}.amazonaws.com/${SERVER_BINARY_TAR_URL}'"
|
|
|
|
|
echo "readonly SALT_TAR_URL='https://s3-${ZONE}.amazonaws.com/${SALT_TAR_URL}'"
|
|
|
|
|
echo "readonly AWS_ZONE='${ZONE}'"
|
|
|
|
|
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
|
|
|
|
|
echo "readonly PORTAL_NET='${PORTAL_NET}'"
|
|
|
|
|
echo "readonly FLUENTD_ELASTICSEARCH='${FLUENTD_ELASTICSEARCH:-false}'"
|
|
|
|
|
echo "readonly FLUENTD_GCP='false'"
|
|
|
|
|
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/create-dynamic-salt-files.sh"
|
|
|
|
|
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/download-release.sh"
|
|
|
|
|
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/salt-master.sh"
|
|
|
|
|
) > "${KUBE_TEMP}/master-start.sh"
|
|
|
|
|
|
|
|
|
|
master_id=$($AWS_CMD run-instances \
|
|
|
|
|
|
|
|
|
|
master_id=$($AWS_CMD run-instances \
|
|
|
|
|
--image-id $IMAGE \
|
|
|
|
|
--instance-type $MASTER_SIZE \
|
|
|
|
|
--subnet-id $SUBNET_ID \
|
|
|
|
@@ -186,102 +258,151 @@ function kube-up {
|
|
|
|
|
--security-group-ids $SEC_GROUP_ID \
|
|
|
|
|
--associate-public-ip-address \
|
|
|
|
|
--user-data file://${KUBE_TEMP}/master-start.sh | json_val '["Instances"][0]["InstanceId"]')
|
|
|
|
|
sleep 3
|
|
|
|
|
$AWS_CMD create-tags --resources $master_id --tags Key=Name,Value=$MASTER_NAME > /dev/null
|
|
|
|
|
sleep 3
|
|
|
|
|
$AWS_CMD create-tags --resources $master_id --tags Key=Role,Value=$MASTER_TAG > /dev/null
|
|
|
|
|
|
|
|
|
|
$AWS_CMD create-tags --resources $master_id --tags Key=Name,Value=$MASTER_NAME > /dev/null
|
|
|
|
|
$AWS_CMD create-tags --resources $master_id --tags Key=Role,Value=$MASTER_TAG > /dev/null
|
|
|
|
|
|
|
|
|
|
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
|
|
|
|
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
|
|
|
|
(
|
|
|
|
|
# We pipe this to the ami as a startup script in the user-data field. Requires a compatible ami
|
|
|
|
|
echo "#! /bin/bash"
|
|
|
|
|
echo "MASTER_NAME=${MASTER_NAME}"
|
|
|
|
|
echo "MINION_IP_RANGE=${MINION_IP_RANGES[$i]}"
|
|
|
|
|
grep -v "^#" $(dirname $0)/templates/salt-minion.sh
|
|
|
|
|
) > ${KUBE_TEMP}/minion-start-${i}.sh
|
|
|
|
|
|
|
|
|
|
minion_id=$($AWS_CMD run-instances \
|
|
|
|
|
--image-id $IMAGE \
|
|
|
|
|
--instance-type $MINION_SIZE \
|
|
|
|
|
--subnet-id $SUBNET_ID \
|
|
|
|
|
--private-ip-address 172.20.0.1${i} \
|
|
|
|
|
--key-name kubernetes \
|
|
|
|
|
--security-group-ids $SEC_GROUP_ID \
|
|
|
|
|
--associate-public-ip-address \
|
|
|
|
|
--user-data file://${KUBE_TEMP}/minion-start-${i}.sh | json_val '["Instances"][0]["InstanceId"]')
|
|
|
|
|
echo "MASTER_NAME='${MASTER_NAME}'"
|
|
|
|
|
echo "MINION_IP_RANGE='${MINION_IP_RANGES[$i]}'"
|
|
|
|
|
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/salt-minion.sh"
|
|
|
|
|
) > "${KUBE_TEMP}/minion-start-${i}.sh"
|
|
|
|
|
minion_id=$($AWS_CMD run-instances \
|
|
|
|
|
--image-id $IMAGE \
|
|
|
|
|
--instance-type $MINION_SIZE \
|
|
|
|
|
--subnet-id $SUBNET_ID \
|
|
|
|
|
--private-ip-address 172.20.0.1${i} \
|
|
|
|
|
--key-name kubernetes \
|
|
|
|
|
--security-group-ids $SEC_GROUP_ID \
|
|
|
|
|
--associate-public-ip-address \
|
|
|
|
|
--user-data file://${KUBE_TEMP}/minion-start-${i}.sh | json_val '["Instances"][0]["InstanceId"]')
|
|
|
|
|
sleep 3
|
|
|
|
|
n=0
|
|
|
|
|
until [ $n -ge 5 ]; do
|
|
|
|
|
$AWS_CMD create-tags --resources $minion_id --tags Key=Name,Value=${MINION_NAMES[$i]} > /dev/null && break
|
|
|
|
|
n=$[$n+1]
|
|
|
|
|
sleep 15
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
$AWS_CMD create-tags --resources $minion_id --tags Key=Name,Value=${MINION_NAMES[$i]} > /dev/null
|
|
|
|
|
$AWS_CMD create-tags --resources $minion_id --tags Key=Role,Value=$MINION_TAG > /dev/null
|
|
|
|
|
$AWS_CMD modify-instance-attribute --instance-id $minion_id --source-dest-check '{"Value": false}' > /dev/null
|
|
|
|
|
|
|
|
|
|
# We are not able to add a route to the instance until that instance is in "running" state.
|
|
|
|
|
# This is quite an ugly solution to this problem. In Bash 4 we could use assoc. arrays to do this for
|
|
|
|
|
# all instances at once but we can't be sure we are running Bash 4.
|
|
|
|
|
while true; do
|
|
|
|
|
instance_state=$($AWS_CMD describe-instances --instance-ids $minion_id | expect_instance_states running)
|
|
|
|
|
if [[ "$instance_state" == "" ]]; then
|
|
|
|
|
echo "Minion ${MINION_NAMES[$i]} running"
|
|
|
|
|
$AWS_CMD create-route --route-table-id $ROUTE_TABLE_ID --destination-cidr-block "10.244.$i.0/24" --instance-id $minion_id > /dev/null
|
|
|
|
|
break
|
|
|
|
|
else
|
|
|
|
|
echo "Waiting for minion ${MINION_NAMES[$i]} to spawn"
|
|
|
|
|
echo "Sleeping for 3 seconds..."
|
|
|
|
|
sleep 3
|
|
|
|
|
fi
|
|
|
|
|
done
|
|
|
|
|
sleep 3
|
|
|
|
|
n=0
|
|
|
|
|
until [ $n -ge 5 ]; do
|
|
|
|
|
$AWS_CMD create-tags --resources $minion_id --tags Key=Role,Value=$MINION_TAG > /dev/null && break
|
|
|
|
|
n=$[$n+1]
|
|
|
|
|
sleep 15
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
sleep 3
|
|
|
|
|
$AWS_CMD modify-instance-attribute --instance-id $minion_id --source-dest-check '{"Value": false}' > /dev/null
|
|
|
|
|
|
|
|
|
|
FAIL=0
|
|
|
|
|
for job in `jobs -p`
|
|
|
|
|
do
|
|
|
|
|
wait $job || let "FAIL+=1"
|
|
|
|
|
# We are not able to add a route to the instance until that instance is in "running" state.
|
|
|
|
|
# This is quite an ugly solution to this problem. In Bash 4 we could use assoc. arrays to do this for
|
|
|
|
|
# all instances at once but we can't be sure we are running Bash 4.
|
|
|
|
|
while true; do
|
|
|
|
|
instance_state=$($AWS_CMD describe-instances --instance-ids $minion_id | expect_instance_states running)
|
|
|
|
|
if [[ "$instance_state" == "" ]]; then
|
|
|
|
|
echo "Minion ${MINION_NAMES[$i]} running"
|
|
|
|
|
sleep 10
|
|
|
|
|
$AWS_CMD create-route --route-table-id $ROUTE_TABLE_ID --destination-cidr-block ${MINION_IP_RANGES[$i]} --instance-id $minion_id > /dev/null
|
|
|
|
|
break
|
|
|
|
|
else
|
|
|
|
|
echo "Waiting for minion ${MINION_NAMES[$i]} to spawn"
|
|
|
|
|
echo "Sleeping for 3 seconds..."
|
|
|
|
|
sleep 3
|
|
|
|
|
fi
|
|
|
|
|
done
|
|
|
|
|
if (( $FAIL != 0 )); then
|
|
|
|
|
echo "${FAIL} commands failed. Exiting."
|
|
|
|
|
exit 2
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
FAIL=0
|
|
|
|
|
for job in `jobs -p`; do
|
|
|
|
|
wait $job || let "FAIL+=1"
|
|
|
|
|
done
|
|
|
|
|
if (( $FAIL != 0 )); then
|
|
|
|
|
echo "${FAIL} commands failed. Exiting."
|
|
|
|
|
exit 2
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
detect-master > /dev/null
|
|
|
|
|
detect-minions > /dev/null
|
|
|
|
|
|
|
|
|
|
# Wait 3 minutes for cluster to come up. We hit it with a "highstate" after that to
|
|
|
|
|
# make sure that everything is well configured.
|
|
|
|
|
echo "Waiting for cluster to settle"
|
|
|
|
|
local i
|
|
|
|
|
for (( i=0; i < 6*3; i++)); do
|
|
|
|
|
printf "."
|
|
|
|
|
sleep 10
|
|
|
|
|
done
|
|
|
|
|
echo "Re-running salt highstate"
|
|
|
|
|
ssh -oStrictHostKeyChecking=no -i ~/.ssh/kube_aws_rsa ubuntu@${KUBE_MASTER_IP} sudo salt '*' state.highstate > /dev/null
|
|
|
|
|
|
|
|
|
|
echo "Waiting for cluster initialization."
|
|
|
|
|
echo
|
|
|
|
|
echo " This will continually check to see if the API for kubernetes is reachable."
|
|
|
|
|
echo " This might loop forever if there was some uncaught error during start"
|
|
|
|
|
echo " up."
|
|
|
|
|
echo
|
|
|
|
|
|
|
|
|
|
until $(curl --insecure --user ${KUBE_USER}:${KUBE_PASSWORD} --max-time 5 \
|
|
|
|
|
--fail --output /dev/null --silent https://${KUBE_MASTER_IP}/api/v1beta1/pods); do
|
|
|
|
|
printf "."
|
|
|
|
|
sleep 2
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
echo "Kubernetes cluster created."
|
|
|
|
|
echo "Sanity checking cluster..."
|
|
|
|
|
|
|
|
|
|
sleep 5
|
|
|
|
|
|
|
|
|
|
# Don't bail on errors, we want to be able to print some info.
|
|
|
|
|
set +e
|
|
|
|
|
|
|
|
|
|
# Basic sanity checking
|
|
|
|
|
for i in ${KUBE_MINION_IP_ADDRESSES[@]}; do
|
|
|
|
|
# Make sure docker is installed
|
|
|
|
|
ssh -oStrictHostKeyChecking=no ubuntu@$i -i ~/.ssh/kube_aws_rsa which docker > /dev/null 2>&1
|
|
|
|
|
if [ "$?" != "0" ]; then
|
|
|
|
|
echo "Docker failed to install on $i. Your cluster is unlikely to work correctly."
|
|
|
|
|
echo "Please run ./cluster/aws/kube-down.sh and re-create the cluster. (sorry!)"
|
|
|
|
|
exit 1
|
|
|
|
|
fi
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
echo
|
|
|
|
|
echo "Kubernetes cluster is running. Access the master at:"
|
|
|
|
|
echo
|
|
|
|
|
echo " https://${KUBE_USER}:${KUBE_PASSWORD}@${KUBE_MASTER_IP}"
|
|
|
|
|
echo
|
|
|
|
|
|
|
|
|
|
detect-master > /dev/null
|
|
|
|
|
detect-minions > /dev/null
|
|
|
|
|
local kube_cert=".kubecfg.crt"
|
|
|
|
|
local kube_key=".kubecfg.key"
|
|
|
|
|
local ca_cert=".kubernetes.ca.crt"
|
|
|
|
|
|
|
|
|
|
echo "Waiting for cluster initialization."
|
|
|
|
|
echo
|
|
|
|
|
echo " This will continually check to see if the API for kubernetes is reachable."
|
|
|
|
|
echo " This might loop forever if there was some uncaught error during start"
|
|
|
|
|
echo " up."
|
|
|
|
|
echo
|
|
|
|
|
# TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
|
|
|
|
|
# config file. Distribute the same way the htpasswd is done.
|
|
|
|
|
(
|
|
|
|
|
umask 077
|
|
|
|
|
ssh -oStrictHostKeyChecking=no -i ~/.ssh/kube_aws_rsa ubuntu@${KUBE_MASTER_IP} sudo cat /usr/share/nginx/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null
|
|
|
|
|
ssh -oStrictHostKeyChecking=no -i ~/.ssh/kube_aws_rsa ubuntu@${KUBE_MASTER_IP} sudo cat /usr/share/nginx/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null
|
|
|
|
|
ssh -oStrictHostKeyChecking=no -i ~/.ssh/kube_aws_rsa ubuntu@${KUBE_MASTER_IP} sudo cat /usr/share/nginx/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null
|
|
|
|
|
|
|
|
|
|
until $(curl --insecure --user ${user}:${passwd} --max-time 5 \
|
|
|
|
|
--fail --output /dev/null --silent https://${KUBE_MASTER_IP}/api/v1beta1/pods); do
|
|
|
|
|
printf "."
|
|
|
|
|
sleep 2
|
|
|
|
|
done
|
|
|
|
|
cat << EOF > ~/.kubernetes_auth
|
|
|
|
|
{
|
|
|
|
|
"User": "$KUBE_USER",
|
|
|
|
|
"Password": "$KUBE_PASSWORD",
|
|
|
|
|
"CAFile": "$HOME/$ca_cert",
|
|
|
|
|
"CertFile": "$HOME/$kube_cert",
|
|
|
|
|
"KeyFile": "$HOME/$kube_key"
|
|
|
|
|
}
|
|
|
|
|
EOF
|
|
|
|
|
|
|
|
|
|
echo "Kubernetes cluster created."
|
|
|
|
|
echo "Sanity checking cluster..."
|
|
|
|
|
|
|
|
|
|
sleep 5
|
|
|
|
|
|
|
|
|
|
# Don't bail on errors, we want to be able to print some info.
|
|
|
|
|
set +e
|
|
|
|
|
|
|
|
|
|
# Basic sanity checking
|
|
|
|
|
for i in ${KUBE_MINION_IP_ADDRESSES[@]}; do
|
|
|
|
|
# Make sure docker is installed
|
|
|
|
|
ssh -oStrictHostKeyChecking=no ubuntu@$i -i ~/.ssh/kube_aws_rsa which docker > /dev/null 2>&1
|
|
|
|
|
if [ "$?" != "0" ]; then
|
|
|
|
|
echo "Docker failed to install on $i. Your cluster is unlikely to work correctly."
|
|
|
|
|
echo "Please run ./cluster/aws/kube-down.sh and re-create the cluster. (sorry!)"
|
|
|
|
|
exit 1
|
|
|
|
|
fi
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
echo
|
|
|
|
|
echo "Kubernetes cluster is running. Access the master at:"
|
|
|
|
|
echo
|
|
|
|
|
echo " https://${user}:${passwd}@${KUBE_MASTER_IP}"
|
|
|
|
|
echo
|
|
|
|
|
echo "Security note: The server above uses a self signed certificate. This is"
|
|
|
|
|
echo " subject to \"Man in the middle\" type attacks."
|
|
|
|
|
chmod 0600 ~/.kubernetes_auth "${HOME}/${kube_cert}" \
|
|
|
|
|
"${HOME}/${kube_key}" "${HOME}/${ca_cert}"
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function kube-down {
|
|
|
|
|