Merge pull request #2260 from brendandburns/Ch00k-kube-up-aws

Ch00k kube up aws - adapted by brendandburns
This commit is contained in:
Brendan Burns 2014-11-10 20:57:32 -08:00
commit 8928df79cc
13 changed files with 898 additions and 197 deletions

View File

@ -18,6 +18,8 @@ While the concepts and architecture in Kubernetes represent years of experience
* [Google Compute Engine](docs/getting-started-guides/gce.md)
* [Locally](docs/getting-started-guides/locally.md)
* [Vagrant](docs/getting-started-guides/vagrant.md)
* [AWS with CoreOS and Cloud Formation](docs/getting-started-guides/aws-coreos.md)
* [AWS](docs/getting-started-guides/aws.md)
* Fedora (w/ [Ansible](docs/getting-started-guides/fedora/fedora_ansible_config.md) or [manual](docs/getting-started-guides/fedora/fedora_manual_config.md))
* [Circle CI](https://circleci.com/docs/docker#google-compute-engine-and-kubernetes)
* [Digital Ocean](https://github.com/bketelsen/coreos-kubernetes-digitalocean)
@ -29,6 +31,7 @@ While the concepts and architecture in Kubernetes represent years of experience
* The following clouds are currently broken at Kubernetes head. Please sync your client to `v0.3` (`git checkout v0.3`) to use these:
* [Microsoft Azure](docs/getting-started-guides/azure.md)
* [Kubernetes 101](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/examples/walkthrough)
* [kubecfg command line tool](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/cli.md)
* [Kubernetes API Documentation](http://cdn.rawgit.com/GoogleCloudPlatform/kubernetes/31a0daae3627c91bc96e1f02a6344cd76e294791/api/kubernetes.html)

View File

@ -0,0 +1,37 @@
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: this isn't quite piped into all the right places...
ZONE=us-west-2
MASTER_SIZE=t2.micro
MINION_SIZE=t2.micro
NUM_MINIONS=4
# This is the ubuntu 14.04 image for us-west-2 + ebs
# See here: http://cloud-images.ubuntu.com/locator/ec2/ for other images
# This will need to be updated from time to time as amis are deprecated
IMAGE=ami-55d69e65
INSTANCE_PREFIX=kubernetes
AWS_SSH_KEY=$HOME/.ssh/kube_aws_rsa
MASTER_NAME="ip-172-20-0-9.$ZONE.compute.internal"
MASTER_TAG="${INSTANCE_PREFIX}-master"
MINION_TAG="${INSTANCE_PREFIX}-minion"
MINION_NAMES=($(eval echo ip-172-20-0-1{0..$(($NUM_MINIONS-1))}.$ZONE.compute.internal))
MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
MINION_SCOPES=""
POLL_SLEEP_INTERVAL=3
PORTAL_NET="10.0.0.0/16"

View File

@ -0,0 +1,30 @@
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Create the overlay files for the salt tree. We create these in a separate
# place so that we can blow away the rest of the salt configs on a kube-push and
# re-apply these.
mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
node_instance_prefix: $NODE_INSTANCE_PREFIX
portal_net: $PORTAL_NET
use-fluentd-es: $FLUENTD_ELASTICSEARCH
use-fluentd-gcp: $FLUENTD_GCP
EOF
mkdir -p /srv/salt-overlay/salt/nginx
echo $MASTER_HTPASSWD > /srv/salt-overlay/salt/nginx/htpasswd

View File

@ -0,0 +1,34 @@
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Download and install release
# This script assumes that the environment variable MASTER_RELEASE_TAR contains
# the release tar to download and unpack. It is meant to be pushed to the
# master and run.
echo "Downloading binary release tar ($SERVER_BINARY_TAR_URL)"
wget "$SERVER_BINARY_TAR_URL" .
echo "Downloading binary release tar ($SALT_TAR_URL)"
wget "$SALT_TAR_URL" .
echo "Unpacking Salt tree"
rm -rf kubernetes
tar xzf "${SALT_TAR_URL##*/}"
echo "Running release install script"
sudo kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR_URL##*/}"

View File

@ -0,0 +1,57 @@
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Prepopulate the name of the Master
mkdir -p /etc/salt/minion.d
echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
roles:
- kubernetes-master
cloud: aws
EOF
cat <<EOF > /etc/aws.conf
{
"Global": {
"Region": "${AWS_ZONE}"
}
}
EOF
# Auto accept all keys from minions that try to join
mkdir -p /etc/salt/master.d
cat <<EOF >/etc/salt/master.d/auto-accept.conf
auto_accept: True
EOF
cat <<EOF >/etc/salt/master.d/reactor.conf
# React to new minions starting by running highstate on them.
reactor:
- 'salt/minion/*/start':
- /srv/reactor/highstate-new.sls
EOF
# Install Salt
#
# We specify -X to avoid a race condition that can cause minion failure to
# install. See https://github.com/saltstack/salt-bootstrap/issues/270
#
# -M installs the master
set +x
curl -L --connect-timeout 20 --retry 6 --retry-delay 10 http://bootstrap.saltstack.com | sh -s -- -M -X
set -x

View File

@ -0,0 +1,37 @@
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Prepopulate the name of the Master
mkdir -p /etc/salt/minion.d
echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf
# Turn on debugging for salt-minion
# echo "DAEMON_ARGS=\"\$DAEMON_ARGS --log-file-level=debug\"" > /etc/default/salt-minion
# Our minions will have a pool role to distinguish them from the master.
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
roles:
- kubernetes-pool
cbr-cidr: $MINION_IP_RANGE
cloud: aws
EOF
# Install Salt
#
# We specify -X to avoid a race condition that can cause minion failure to
# install. See https://github.com/saltstack/salt-bootstrap/issues/270
curl -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s -- -X

452
cluster/aws/util.sh Normal file
View File

@ -0,0 +1,452 @@
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for the local config.
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
# config-default.sh.
source $(dirname ${BASH_SOURCE})/${KUBE_CONFIG_FILE-"config-default.sh"}
AWS_CMD="aws --output json ec2"
function json_val {
python -c 'import json,sys;obj=json.load(sys.stdin);print obj'$1''
}
# TODO (ayurchuk) Refactor the get_* functions to use filters
# TODO (bburns) Parameterize this for multiple cluster per project
function get_instance_ids {
python -c 'import json,sys; lst = [str(instance["InstanceId"]) for reservation in json.load(sys.stdin)["Reservations"] for instance in reservation["Instances"] for tag in instance["Tags"] if tag["Value"].startswith("kubernetes-minion") or tag["Value"].startswith("kubernetes-master")]; print " ".join(lst)'
}
function get_vpc_id {
python -c 'import json,sys; lst = [str(vpc["VpcId"]) for vpc in json.load(sys.stdin)["Vpcs"] for tag in vpc["Tags"] if tag["Value"] == "kubernetes-vpc"]; print "".join(lst)'
}
function get_subnet_id {
python -c "import json,sys; lst = [str(subnet['SubnetId']) for subnet in json.load(sys.stdin)['Subnets'] if subnet['VpcId'] == '$1']; print ''.join(lst)"
}
function get_igw_id {
python -c "import json,sys; lst = [str(igw['InternetGatewayId']) for igw in json.load(sys.stdin)['InternetGateways'] for attachment in igw['Attachments'] if attachment['VpcId'] == '$1']; print ''.join(lst)"
}
function get_route_table_id {
python -c "import json,sys; lst = [str(route_table['RouteTableId']) for route_table in json.load(sys.stdin)['RouteTables'] if route_table['VpcId'] == '$1']; print ''.join(lst)"
}
function get_sec_group_id {
python -c 'import json,sys; lst = [str(group["GroupId"]) for group in json.load(sys.stdin)["SecurityGroups"] if group["GroupName"] == "kubernetes-sec-group"]; print "".join(lst)'
}
function expect_instance_states {
python -c "import json,sys; lst = [str(instance['InstanceId']) for reservation in json.load(sys.stdin)['Reservations'] for instance in reservation['Instances'] if instance['State']['Name'] != '$1']; print ' '.join(lst)"
}
function get_instance_public_ip {
python -c "import json,sys; lst = [str(instance['NetworkInterfaces'][0]['Association']['PublicIp']) for reservation in json.load(sys.stdin)['Reservations'] for instance in reservation['Instances'] for tag in instance['Tags'] if tag['Value'] == '$1' and instance['State']['Name'] == 'running']; print ' '.join(lst)"
}
function detect-master () {
KUBE_MASTER=${MASTER_NAME}
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
KUBE_MASTER_IP=$($AWS_CMD describe-instances | get_instance_public_ip $MASTER_NAME)
fi
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'"
exit 1
fi
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)"
}
function detect-minions () {
KUBE_MINION_IP_ADDRESSES=()
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
local minion_ip=$($AWS_CMD describe-instances --filters Name=tag-value,Values=${MINION_NAMES[$i]} Name=instance-state-name,Values=running | get_instance_public_ip ${MINION_NAMES[$i]})
echo "Found ${MINION_NAMES[$i]} at ${minion_ip}"
KUBE_MINION_IP_ADDRESSES+=("${minion_ip}")
done
if [ -z "$KUBE_MINION_IP_ADDRESSES" ]; then
echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'"
exit 1
fi
}
# Verify prereqs
function verify-prereqs {
if [ "$(which aws)" == "" ]; then
echo "Can't find aws in PATH, please fix and retry."
exit 1
fi
}
# Create a temp dir that'll be deleted at the end of this bash session.
#
# Vars set:
# KUBE_TEMP
function ensure-temp-dir {
if [[ -z ${KUBE_TEMP-} ]]; then
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
trap 'rm -rf "${KUBE_TEMP}"' EXIT
fi
}
function setup-monitoring {
if [ ${MONITORING-} ]; then
# TODO: Implement this.
echo "Monitoring not currently supported on AWS"
fi
}
function teardown-monitoring {
if [ ${MONITORING-} ]; then
# TODO: Implement this.
echo "Monitoring not currently supported on AWS"
fi
}
# Verify and find the various tar files that we are going to use on the server.
#
# Vars set:
# SERVER_BINARY_TAR
# SALT_TAR
function find-release-tars {
SERVER_BINARY_TAR="${KUBE_ROOT}/server/kubernetes-server-linux-amd64.tar.gz"
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
SERVER_BINARY_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz"
fi
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
echo "!!! Cannot find kubernetes-server-linux-amd64.tar.gz"
exit 1
fi
SALT_TAR="${KUBE_ROOT}/server/kubernetes-salt.tar.gz"
if [[ ! -f "$SALT_TAR" ]]; then
SALT_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-salt.tar.gz"
fi
if [[ ! -f "$SALT_TAR" ]]; then
echo "!!! Cannot find kubernetes-salt.tar.gz"
exit 1
fi
}
# Take the local tar files and upload them to S3. They will then be
# downloaded by the master as part of the start up script for the master.
#
# Assumed vars:
# SERVER_BINARY_TAR
# SALT_TAR
# Vars set:
# SERVER_BINARY_TAR_URL
# SALT_TAR_URL
function upload-server-tars() {
SERVER_BINARY_TAR_URL=
SALT_TAR_URL=
local project_hash=
local key=$(aws configure get aws_access_key_id)
if which md5 > /dev/null 2>&1; then
project_hash=$(md5 -q -s "${USER} ${key}")
else
project_hash=$(echo -n "${USER} ${key}" | md5sum)
fi
local -r staging_bucket="kubernetes-staging-${project_hash}"
echo "Uploading to Amazon S3"
if ! aws s3 ls "s3://${staging_bucket}" > /dev/null 2>&1 ; then
echo "Creating ${staging_bucket}"
aws s3 mb "s3://${staging_bucket}"
fi
aws s3api put-bucket-acl --bucket $staging_bucket --acl public-read
local -r staging_path="${staging_bucket}/devel"
echo "+++ Staging server tars to S3 Storage: ${staging_path}"
SERVER_BINARY_TAR_URL="${staging_path}/${SERVER_BINARY_TAR##*/}"
aws s3 cp "${SERVER_BINARY_TAR}" "s3://${SERVER_BINARY_TAR_URL}"
aws s3api put-object-acl --bucket ${staging_bucket} --key "devel/${SERVER_BINARY_TAR##*/}" --grant-read 'uri="http://acs.amazonaws.com/groups/global/AllUsers"'
SALT_TAR_URL="${staging_path}/${SALT_TAR##*/}"
aws s3 cp "${SALT_TAR}" "s3://${SALT_TAR_URL}"
aws s3api put-object-acl --bucket ${staging_bucket} --key "devel/${SALT_TAR##*/}" --grant-read 'uri="http://acs.amazonaws.com/groups/global/AllUsers"'
}
# Ensure that we have a password created for validating to the master. Will
# read from $HOME/.kubernetres_auth if available.
#
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
function get-password {
local file="$HOME/.kubernetes_auth"
if [[ -r "$file" ]]; then
KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]')
KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]')
return
fi
KUBE_USER=admin
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
# Remove this code, since in all use cases I can see, we are overwriting this
# at cluster creation time.
cat << EOF > "$file"
{
"User": "$KUBE_USER",
"Password": "$KUBE_PASSWORD"
}
EOF
chmod 0600 "$file"
}
function kube-up {
find-release-tars
upload-server-tars
ensure-temp-dir
get-password
python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \
-b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD"
local htpasswd
htpasswd=$(cat "${KUBE_TEMP}/htpasswd")
if [ ! -f $AWS_SSH_KEY ]; then
ssh-keygen -f $AWS_SSH_KEY -N ''
fi
$AWS_CMD import-key-pair --key-name kubernetes --public-key-material file://$AWS_SSH_KEY.pub > /dev/null 2>&1 || true
VPC_ID=$($AWS_CMD create-vpc --cidr-block 172.20.0.0/16 | json_val '["Vpc"]["VpcId"]')
$AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-support '{"Value": true}' > /dev/null
$AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-hostnames '{"Value": true}' > /dev/null
$AWS_CMD create-tags --resources $VPC_ID --tags Key=Name,Value=kubernetes-vpc > /dev/null
SUBNET_ID=$($AWS_CMD create-subnet --cidr-block 172.20.0.0/24 --vpc-id $VPC_ID | json_val '["Subnet"]["SubnetId"]')
IGW_ID=$($AWS_CMD create-internet-gateway | json_val '["InternetGateway"]["InternetGatewayId"]')
$AWS_CMD attach-internet-gateway --internet-gateway-id $IGW_ID --vpc-id $VPC_ID > /dev/null
ROUTE_TABLE_ID=$($AWS_CMD describe-route-tables --filters Name=vpc-id,Values=$VPC_ID | json_val '["RouteTables"][0]["RouteTableId"]')
$AWS_CMD associate-route-table --route-table-id $ROUTE_TABLE_ID --subnet-id $SUBNET_ID > /dev/null
$AWS_CMD describe-route-tables --filters Name=vpc-id,Values=$VPC_ID > /dev/null
$AWS_CMD create-route --route-table-id $ROUTE_TABLE_ID --destination-cidr-block 0.0.0.0/0 --gateway-id $IGW_ID > /dev/null
SEC_GROUP_ID=$($AWS_CMD create-security-group --group-name kubernetes-sec-group --description kubernetes-sec-group --vpc-id $VPC_ID | json_val '["GroupId"]')
$AWS_CMD authorize-security-group-ingress --group-id $SEC_GROUP_ID --protocol -1 --port all --cidr 0.0.0.0/0 > /dev/null
(
# We pipe this to the ami as a startup script in the user-data field. Requires a compatible ami
echo "#! /bin/bash"
echo "mkdir -p /var/cache/kubernetes-install"
echo "cd /var/cache/kubernetes-install"
echo "readonly MASTER_NAME='${MASTER_NAME}'"
echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-minion'"
echo "readonly SERVER_BINARY_TAR_URL='https://s3-${ZONE}.amazonaws.com/${SERVER_BINARY_TAR_URL}'"
echo "readonly SALT_TAR_URL='https://s3-${ZONE}.amazonaws.com/${SALT_TAR_URL}'"
echo "readonly AWS_ZONE='${ZONE}'"
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
echo "readonly PORTAL_NET='${PORTAL_NET}'"
echo "readonly FLUENTD_ELASTICSEARCH='${FLUENTD_ELASTICSEARCH:-false}'"
echo "readonly FLUENTD_GCP='false'"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/create-dynamic-salt-files.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/download-release.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/salt-master.sh"
) > "${KUBE_TEMP}/master-start.sh"
master_id=$($AWS_CMD run-instances \
--image-id $IMAGE \
--instance-type $MASTER_SIZE \
--subnet-id $SUBNET_ID \
--private-ip-address 172.20.0.9 \
--key-name kubernetes \
--security-group-ids $SEC_GROUP_ID \
--associate-public-ip-address \
--user-data file://${KUBE_TEMP}/master-start.sh | json_val '["Instances"][0]["InstanceId"]')
sleep 3
$AWS_CMD create-tags --resources $master_id --tags Key=Name,Value=$MASTER_NAME > /dev/null
sleep 3
$AWS_CMD create-tags --resources $master_id --tags Key=Role,Value=$MASTER_TAG > /dev/null
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
(
# We pipe this to the ami as a startup script in the user-data field. Requires a compatible ami
echo "#! /bin/bash"
echo "MASTER_NAME='${MASTER_NAME}'"
echo "MINION_IP_RANGE='${MINION_IP_RANGES[$i]}'"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/salt-minion.sh"
) > "${KUBE_TEMP}/minion-start-${i}.sh"
minion_id=$($AWS_CMD run-instances \
--image-id $IMAGE \
--instance-type $MINION_SIZE \
--subnet-id $SUBNET_ID \
--private-ip-address 172.20.0.1${i} \
--key-name kubernetes \
--security-group-ids $SEC_GROUP_ID \
--associate-public-ip-address \
--user-data file://${KUBE_TEMP}/minion-start-${i}.sh | json_val '["Instances"][0]["InstanceId"]')
sleep 3
n=0
until [ $n -ge 5 ]; do
$AWS_CMD create-tags --resources $minion_id --tags Key=Name,Value=${MINION_NAMES[$i]} > /dev/null && break
n=$[$n+1]
sleep 15
done
sleep 3
n=0
until [ $n -ge 5 ]; do
$AWS_CMD create-tags --resources $minion_id --tags Key=Role,Value=$MINION_TAG > /dev/null && break
n=$[$n+1]
sleep 15
done
sleep 3
$AWS_CMD modify-instance-attribute --instance-id $minion_id --source-dest-check '{"Value": false}' > /dev/null
# We are not able to add a route to the instance until that instance is in "running" state.
# This is quite an ugly solution to this problem. In Bash 4 we could use assoc. arrays to do this for
# all instances at once but we can't be sure we are running Bash 4.
while true; do
instance_state=$($AWS_CMD describe-instances --instance-ids $minion_id | expect_instance_states running)
if [[ "$instance_state" == "" ]]; then
echo "Minion ${MINION_NAMES[$i]} running"
sleep 10
$AWS_CMD create-route --route-table-id $ROUTE_TABLE_ID --destination-cidr-block ${MINION_IP_RANGES[$i]} --instance-id $minion_id > /dev/null
break
else
echo "Waiting for minion ${MINION_NAMES[$i]} to spawn"
echo "Sleeping for 3 seconds..."
sleep 3
fi
done
done
FAIL=0
for job in `jobs -p`; do
wait $job || let "FAIL+=1"
done
if (( $FAIL != 0 )); then
echo "${FAIL} commands failed. Exiting."
exit 2
fi
detect-master > /dev/null
detect-minions > /dev/null
# Wait 3 minutes for cluster to come up. We hit it with a "highstate" after that to
# make sure that everything is well configured.
echo "Waiting for cluster to settle"
local i
for (( i=0; i < 6*3; i++)); do
printf "."
sleep 10
done
echo "Re-running salt highstate"
ssh -oStrictHostKeyChecking=no -i ~/.ssh/kube_aws_rsa ubuntu@${KUBE_MASTER_IP} sudo salt '*' state.highstate > /dev/null
echo "Waiting for cluster initialization."
echo
echo " This will continually check to see if the API for kubernetes is reachable."
echo " This might loop forever if there was some uncaught error during start"
echo " up."
echo
until $(curl --insecure --user ${KUBE_USER}:${KUBE_PASSWORD} --max-time 5 \
--fail --output /dev/null --silent https://${KUBE_MASTER_IP}/api/v1beta1/pods); do
printf "."
sleep 2
done
echo "Kubernetes cluster created."
echo "Sanity checking cluster..."
sleep 5
# Don't bail on errors, we want to be able to print some info.
set +e
# Basic sanity checking
for i in ${KUBE_MINION_IP_ADDRESSES[@]}; do
# Make sure docker is installed
ssh -oStrictHostKeyChecking=no ubuntu@$i -i ~/.ssh/kube_aws_rsa which docker > /dev/null 2>&1
if [ "$?" != "0" ]; then
echo "Docker failed to install on $i. Your cluster is unlikely to work correctly."
echo "Please run ./cluster/aws/kube-down.sh and re-create the cluster. (sorry!)"
exit 1
fi
done
echo
echo "Kubernetes cluster is running. Access the master at:"
echo
echo " https://${KUBE_USER}:${KUBE_PASSWORD}@${KUBE_MASTER_IP}"
echo
local kube_cert=".kubecfg.crt"
local kube_key=".kubecfg.key"
local ca_cert=".kubernetes.ca.crt"
# TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
# config file. Distribute the same way the htpasswd is done.
(
umask 077
ssh -oStrictHostKeyChecking=no -i ~/.ssh/kube_aws_rsa ubuntu@${KUBE_MASTER_IP} sudo cat /usr/share/nginx/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null
ssh -oStrictHostKeyChecking=no -i ~/.ssh/kube_aws_rsa ubuntu@${KUBE_MASTER_IP} sudo cat /usr/share/nginx/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null
ssh -oStrictHostKeyChecking=no -i ~/.ssh/kube_aws_rsa ubuntu@${KUBE_MASTER_IP} sudo cat /usr/share/nginx/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null
cat << EOF > ~/.kubernetes_auth
{
"User": "$KUBE_USER",
"Password": "$KUBE_PASSWORD",
"CAFile": "$HOME/$ca_cert",
"CertFile": "$HOME/$kube_cert",
"KeyFile": "$HOME/$kube_key"
}
EOF
chmod 0600 ~/.kubernetes_auth "${HOME}/${kube_cert}" \
"${HOME}/${kube_key}" "${HOME}/${ca_cert}"
)
}
function kube-down {
AWS_CMD="aws --output json ec2"
instance_ids=$($AWS_CMD describe-instances | get_instance_ids)
$AWS_CMD terminate-instances --instance-ids $instance_ids > /dev/null
echo "Waiting for instances deleted"
while true; do
instance_states=$($AWS_CMD describe-instances --instance-ids $instance_ids | expect_instance_states terminated)
if [[ "$instance_states" == "" ]]; then
echo "All instances terminated"
break
else
echo "Instances not yet terminated: $instance_states"
echo "Sleeping for 3 seconds..."
sleep 3
fi
done
echo "Deleting VPC"
vpc_id=$($AWS_CMD describe-vpcs | get_vpc_id)
subnet_id=$($AWS_CMD describe-subnets | get_subnet_id $vpc_id)
igw_id=$($AWS_CMD describe-internet-gateways | get_igw_id $vpc_id)
route_table_id=$($AWS_CMD describe-route-tables | get_route_table_id $vpc_id)
sec_group_id=$($AWS_CMD describe-security-groups | get_sec_group_id)
$AWS_CMD delete-subnet --subnet-id $subnet_id > /dev/null
$AWS_CMD detach-internet-gateway --internet-gateway-id $igw_id --vpc-id $vpc_id > /dev/null
$AWS_CMD delete-internet-gateway --internet-gateway-id $igw_id > /dev/null
$AWS_CMD delete-security-group --group-id $sec_group_id > /dev/null
$AWS_CMD delete-route --route-table-id $route_table_id --destination-cidr-block 0.0.0.0/0 > /dev/null
$AWS_CMD delete-vpc --vpc-id $vpc_id > /dev/null
}

View File

@ -18,6 +18,6 @@
# You can override the default provider by exporting the KUBERNETES_PROVIDER
# variable in your bashrc
#
# The valid values: 'gce', 'azure', 'vagrant', 'local', 'vsphere'
# The valid values: 'gce', 'aws', 'azure', 'vagrant', 'local', 'vsphere'
KUBERNETES_PROVIDER=${KUBERNETES_PROVIDER:-gce}

View File

@ -21,6 +21,12 @@
{% set cloud_provider = "-cloud_provider=gce" %}
{% set machines = "-machines=" + ','.join(salt['mine.get']('roles:kubernetes-pool', 'network.ip_addrs', expr_form='grain').keys()) %}
{% endif %}
{% if grains.cloud == 'aws' %}
# TODO: turn on AWS as a cloud provider. Need to figure out AWS role-based auth.
{% set cloud_provider = "" %}
{% set minion_regexp = "" %}
{% set machines = "-machines " + ','.join(salt['mine.get']('roles:kubernetes-pool', 'network.ip_addrs', expr_form='grain').keys()) %}
{% endif %}
{% if grains.cloud == 'azure' %}
MACHINES="{{ salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values()|join(',', attribute='hostnamef') }}"
{% set machines = "-machines=$MACHINES" %}

View File

@ -14,6 +14,9 @@ nginx:
{% if grains.cloud == 'gce' %}
{% set cert_ip='_use_gce_external_ip_' %}
{% endif %}
{% if grains.cloud == 'aws' %}
{% set cert_ip='_use_aws_external_ip_' %}
{% endif %}
{% if grains.cloud == 'vagrant' %}
{% set cert_ip=grains.fqdn_ip4 %}
{% endif %}

View File

@ -25,6 +25,10 @@ if [ "$cert_ip" == "_use_gce_external_ip_" ]; then
cert_ip=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip)
fi
if [ "$cert_ip" == "_use_aws_external_ip_" ]; then
cert_ip=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4)
fi
tmpdir=$(mktemp -d --tmpdir kubernetes_cacert.XXXXXX)
trap 'rm -rf "${tmpdir}"' EXIT
cd "${tmpdir}"

View File

@ -0,0 +1,203 @@
# Getting started on Amazon EC2
The example below creates an elastic Kubernetes cluster with 3 worker nodes and a master.
## Highlights
* Cluster bootstrapping using [cloud-config](https://coreos.com/docs/cluster-management/setup/cloudinit-cloud-config)
* Cross container networking with [flannel](https://github.com/coreos/flannel#flannel)
* Auto worker registration with [kube-register](https://github.com/kelseyhightower/kube-register#kube-register)
* Kubernetes v0.4.2 [official binaries](https://github.com/GoogleCloudPlatform/kubernetes/releases/tag/v0.4.2)
## Prerequisites
* [kubecfg CLI](aws/kubecfg.md)
* [aws CLI](http://aws.amazon.com/cli)
* CoreOS 490.0.0+
## Starting a Cluster
### Cloud Formation
The [cloudformation-template.json](aws/cloudformation-template.json) can be used to bootstrap a Kubernetes cluster with a single command.
```
aws cloudformation create-stack --stack-name kubernetes --region us-west-2 \
--template-body file://aws/cloudformation-template.json \
--parameters ParameterKey=KeyPair,ParameterValue=<keypair>
```
It will take a few minutes for the entire stack to come up. You can monitor the stack progress with the following command:
```
aws cloudformation describe-stack-events --stack-name kubernetes
```
> Record the Kubernetes Master IP address
```
aws cloudformation describe-stacks --stack-name kubernetes
```
[Skip to kubecfg client configuration](#configure-the-kubecfg-ssh-tunnel)
### Manually
The following commands use the CoreOS 490.0.0 alpha AMI `ami-e18dc5d1` from the `us-west-2` region. For a list of different regions and corresponding AMI IDs see the [CoreOS EC2 cloud provider documentation](https://coreos.com/docs/running-coreos/cloud-providers/ec2/#choosing-a-channel).
#### Create the Kubernetes Security Group
```
aws ec2 create-security-group --group-name kubernetes --description "Kubernetes Security Group"
aws ec2 authorize-security-group-ingress --group-name kubernetes --protocol tcp --port 22 --cidr 0.0.0.0/0
aws ec2 authorize-security-group-ingress --group-name kubernetes --protocol tcp --port 80 --cidr 0.0.0.0/0
aws ec2 authorize-security-group-ingress --group-name kubernetes --source-security-group-name kubernetes
```
#### Save the master and node cloud-configs
* [master.yaml](aws/cloud-configs/master.yaml)
* [node.yaml](aws/cloud-configs/node.yaml)
#### Launch the master
```
aws ec2 run-instances --image-id ami-e18dc5d1 --key-name <keypair> \
--region us-west-2 --security-groups kubernetes --instance-type m3.medium \
--user-data file://master.yaml
```
> Record the `InstanceId` for the master.
Gather the public and private IPs for the master node:
```
aws ec2 describe-instances --instance-id <instance-id>
```
```
{
"Reservations": [
{
"Instances": [
{
"PublicDnsName": "ec2-54-68-97-117.us-west-2.compute.amazonaws.com",
"RootDeviceType": "ebs",
"State": {
"Code": 16,
"Name": "running"
},
"PublicIpAddress": "54.68.97.117",
"PrivateIpAddress": "172.31.9.9",
...
```
#### Update the node.yaml cloud-config
Edit `node.yaml` and replace all instances of `<master-private-ip>` with the **private** IP address of the master node.
### Launch 3 worker nodes
```
aws ec2 run-instances --count 3 --image-id ami-e18dc5d1 --key-name <keypair> \
--region us-west-2 --security-groups kubernetes --instance-type m3.medium \
--user-data file://node.yaml
```
### Add additional worker nodes
```
aws ec2 run-instances --count 1 --image-id ami-e18dc5d1 --key-name <keypair> \
--region us-west-2 --security-groups kubernetes --instance-type m3.medium \
--user-data file://node.yaml
```
### Configure the kubecfg SSH tunnel
This command enables secure communication between the kubecfg client and the Kubernetes API.
```
ssh -f -nNT -L 8080:127.0.0.1:8080 core@<master-public-ip>
```
### Listing worker nodes
Once the worker instances have fully booted, they will be automatically registered with the Kubernetes API server by the kube-register service running on the master node. It may take a few mins.
```
kubecfg list minions
```
## Starting a simple pod
Create a pod manifest: `pod.json`
```
{
"id": "hello",
"kind": "Pod",
"apiVersion": "v1beta1",
"desiredState": {
"manifest": {
"version": "v1beta1",
"id": "hello",
"containers": [{
"name": "hello",
"image": "quay.io/kelseyhightower/hello",
"ports": [{
"containerPort": 80,
"hostPort": 80
}]
}]
}
},
"labels": {
"name": "hello",
"environment": "testing"
}
}
```
### Create the pod using the kubecfg command line tool
```
kubecfg -c pod.json create pods
```
### Testing
```
kubecfg list pods
```
> Record the **Host** of the pod, which should be the private IP address.
Gather the public IP address for the worker node.
```
aws ec2 describe-instances --filters 'Name=private-ip-address,Values=<host>'
```
```
{
"Reservations": [
{
"Instances": [
{
"PublicDnsName": "ec2-54-68-97-117.us-west-2.compute.amazonaws.com",
"RootDeviceType": "ebs",
"State": {
"Code": 16,
"Name": "running"
},
"PublicIpAddress": "54.68.97.117",
...
```
Visit the public IP address in your browser to view the running pod.
### Delete the pod
```
kubecfg delete pods/hello
```

View File

@ -1,203 +1,38 @@
# Getting started on Amazon EC2
## Getting started on AWS
The example below creates an elastic Kubernetes cluster with 3 worker nodes and a master.
### Prerequisites
## Highlights
1. You need an AWS account. Visit [http://aws.amazon.com](http://aws.amazon.com) to get started
2. Install and configure [AWS Command Line Interface](http://aws.amazon.com/cli)
* Cluster bootstrapping using [cloud-config](https://coreos.com/docs/cluster-management/setup/cloudinit-cloud-config)
* Cross container networking with [flannel](https://github.com/coreos/flannel#flannel)
* Auto worker registration with [kube-register](https://github.com/kelseyhightower/kube-register#kube-register)
* Kubernetes v0.4.2 [official binaries](https://github.com/GoogleCloudPlatform/kubernetes/releases/tag/v0.4.2)
### Cluster turnup
## Prerequisites
#### Option 1: Install a binary release
* [kubecfg CLI](aws/kubecfg.md)
* [aws CLI](http://aws.amazon.com/cli)
* CoreOS 490.0.0+
1. Download the [binary release](getting-started-guides/binary_release.md)
2. Unpack the archive and ```cd kubernetes```
## Starting a Cluster
#### Option 2: Install from source
1. ```git clone https://github.com/GoogleCloudPlatform/kubernetes.git```
2. ```cd kubernetes; make release```
#### Turn up the cluster
```
export KUBERNETES_PROVIDER=aws
cluster/kube-up.sh
```
The script above relies on AWS S3 to deploy the software to instances running in EC2.
### Running examples
Take a look at [next steps](https://github.com/GoogleCloudPlatform/kubernetes#where-to-go-next)
### Tearing down the cluster
```
cd kubernetes
cluster/kube-down.sh
```
### Cloud Formation
The [cloudformation-template.json](aws/cloudformation-template.json) can be used to bootstrap a Kubernetes cluster with a single command.
```
aws cloudformation create-stack --stack-name kubernetes --region us-west-2 \
--template-body file://aws/cloudformation-template.json \
--parameters ParameterKey=KeyPair,ParameterValue=<keypair>
```
It will take a few minutes for the entire stack to come up. You can monitor the stack progress with the following command:
```
aws cloudformation describe-stack-events --stack-name kubernetes
```
> Record the Kubernetes Master IP address
```
aws cloudformation describe-stacks --stack-name kubernetes
```
[Skip to kubecfg client configuration](#configure-the-kubecfg-ssh-tunnel)
### Manually
The following commands use the CoreOS 490.0.0 alpha AMI `ami-e18dc5d1` from the `us-west-2` region. For a list of different regions and corresponding AMI IDs see the [CoreOS EC2 cloud provider documentation](https://coreos.com/docs/running-coreos/cloud-providers/ec2/#choosing-a-channel).
#### Create the Kubernetes Security Group
```
aws ec2 create-security-group --group-name kubernetes --description "Kubernetes Security Group"
aws ec2 authorize-security-group-ingress --group-name kubernetes --protocol tcp --port 22 --cidr 0.0.0.0/0
aws ec2 authorize-security-group-ingress --group-name kubernetes --protocol tcp --port 80 --cidr 0.0.0.0/0
aws ec2 authorize-security-group-ingress --group-name kubernetes --source-security-group-name kubernetes
```
#### Save the master and node cloud-configs
* [master.yaml](aws/cloud-configs/master.yaml)
* [node.yaml](aws/cloud-configs/node.yaml)
#### Launch the master
```
aws ec2 run-instances --image-id ami-e18dc5d1 --key-name <keypair> \
--region us-west-2 --security-groups kubernetes --instance-type m3.medium \
--user-data file://master.yaml
```
> Record the `InstanceId` for the master.
Gather the public and private IPs for the master node:
```
aws ec2 describe-instances --instance-id <instance-id>
```
```
{
"Reservations": [
{
"Instances": [
{
"PublicDnsName": "ec2-54-68-97-117.us-west-2.compute.amazonaws.com",
"RootDeviceType": "ebs",
"State": {
"Code": 16,
"Name": "running"
},
"PublicIpAddress": "54.68.97.117",
"PrivateIpAddress": "172.31.9.9",
...
```
#### Update the node.yaml cloud-config
Edit `node.yaml` and replace all instances of `<master-private-ip>` with the **private** IP address of the master node.
### Launch 3 worker nodes
```
aws ec2 run-instances --count 3 --image-id ami-e18dc5d1 --key-name <keypair> \
--region us-west-2 --security-groups kubernetes --instance-type m3.medium \
--user-data file://node.yaml
```
### Add additional worker nodes
```
aws ec2 run-instances --count 1 --image-id ami-e18dc5d1 --key-name <keypair> \
--region us-west-2 --security-groups kubernetes --instance-type m3.medium \
--user-data file://node.yaml
```
### Configure the kubecfg SSH tunnel
This command enables secure communication between the kubecfg client and the Kubernetes API.
```
ssh -f -nNT -L 8080:127.0.0.1:8080 core@<master-public-ip>
```
### Listing worker nodes
Once the worker instances have fully booted, they will be automatically registered with the Kubernetes API server by the kube-register service running on the master node. It may take a few mins.
```
kubecfg list minions
```
## Starting a simple pod
Create a pod manifest: `pod.json`
```
{
"id": "hello",
"kind": "Pod",
"apiVersion": "v1beta1",
"desiredState": {
"manifest": {
"version": "v1beta1",
"id": "hello",
"containers": [{
"name": "hello",
"image": "quay.io/kelseyhightower/hello",
"ports": [{
"containerPort": 80,
"hostPort": 80
}]
}]
}
},
"labels": {
"name": "hello",
"environment": "testing"
}
}
```
### Create the pod using the kubecfg command line tool
```
kubecfg -c pod.json create pods
```
### Testing
```
kubecfg list pods
```
> Record the **Host** of the pod, which should be the private IP address.
Gather the public IP address for the worker node.
```
aws ec2 describe-instances --filters 'Name=private-ip-address,Values=<host>'
```
```
{
"Reservations": [
{
"Instances": [
{
"PublicDnsName": "ec2-54-68-97-117.us-west-2.compute.amazonaws.com",
"RootDeviceType": "ebs",
"State": {
"Code": 16,
"Name": "running"
},
"PublicIpAddress": "54.68.97.117",
...
```
Visit the public IP address in your browser to view the running pod.
### Delete the pod
```
kubecfg delete pods/hello
```
There is a contributed [example](aws-coreos.md) from [CoreOS](http://www.coreos.com) using Cloud Formation.