diff --git a/cluster/aws/common/common.sh b/cluster/aws/common/common.sh deleted file mode 100644 index 62b466fd3e7..00000000000 --- a/cluster/aws/common/common.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# A library of common helper functions for Ubuntus & Debians. - -function detect-minion-image() { - if [[ -z "${KUBE_NODE_IMAGE=-}" ]]; then - detect-image - KUBE_NODE_IMAGE=$AWS_IMAGE - fi -} - -function generate-minion-user-data { - # We pipe this to the ami as a startup script in the user-data field. Requires a compatible ami - echo "#! /bin/bash" - echo "SALT_MASTER='${MASTER_INTERNAL_IP}'" - echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS:-}'" - echo "readonly NON_MASQUERADE_CIDR='${NON_MASQUERADE_CIDR:-}'" - echo "readonly DOCKER_STORAGE='${DOCKER_STORAGE:-}'" - grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/common.sh" - grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/format-disks.sh" - grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/salt-minion.sh" -} - -function check-minion() { - local minion_ip=$1 - - local output=$(ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ${SSH_USER}@$minion_ip sudo docker ps -a 2>/dev/null) - if [[ -z "${output}" ]]; then - ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ${SSH_USER}@$minion_ip sudo service docker start > $LOG 2>&1 - echo "not working yet" - else - echo "working" - fi -} diff --git a/cluster/aws/config-default.sh b/cluster/aws/config-default.sh deleted file mode 100644 index 138a920d759..00000000000 --- a/cluster/aws/config-default.sh +++ /dev/null @@ -1,167 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -ZONE=${KUBE_AWS_ZONE:-us-west-2a} -MASTER_SIZE=${MASTER_SIZE:-} -NODE_SIZE=${NODE_SIZE:-} -NUM_NODES=${NUM_NODES:-4} - -# Dynamically set node sizes so that Heapster has enough space to run -if [[ -z ${NODE_SIZE} ]]; then - if (( ${NUM_NODES} < 50 )); then - NODE_SIZE="t2.micro" - elif (( ${NUM_NODES} < 150 )); then - NODE_SIZE="t2.small" - else - NODE_SIZE="t2.medium" - fi -fi - -# Dynamically set the master size by the number of nodes, these are guesses -if [[ -z ${MASTER_SIZE} ]]; then - MASTER_SIZE="m3.medium" - if [[ "${NUM_NODES}" -gt "5" ]]; then - suggested_master_size="m3.large" - fi - if [[ "${NUM_NODES}" -gt "10" ]]; then - suggested_master_size="m3.xlarge" - fi - if [[ "${NUM_NODES}" -gt "100" ]]; then - suggested_master_size="m3.2xlarge" - fi - if [[ "${NUM_NODES}" -gt "250" ]]; then - suggested_master_size="c4.4xlarge" - fi - if [[ "${NUM_NODES}" -gt "500" ]]; then - suggested_master_size="c4.8xlarge" - fi -fi - -# Optional: Set AWS_S3_BUCKET to the name of an S3 bucket to use for uploading binaries -# (otherwise a unique bucket name will be generated for you) -# AWS_S3_BUCKET=kubernetes-artifacts - -# Because regions are globally named, we want to create in a single region; default to us-east-1 -AWS_S3_REGION=${AWS_S3_REGION:-us-east-1} - -# Which docker storage mechanism to use. -DOCKER_STORAGE=${DOCKER_STORAGE:-aufs} - -# Extra docker options for nodes. -EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-}" - -INSTANCE_PREFIX="${KUBE_AWS_INSTANCE_PREFIX:-kubernetes}" -CLUSTER_ID=${INSTANCE_PREFIX} -VPC_NAME=${VPC_NAME:-kubernetes-vpc} -AWS_SSH_KEY=${AWS_SSH_KEY:-$HOME/.ssh/kube_aws_rsa} -CONFIG_CONTEXT="${KUBE_CONFIG_CONTEXT:-aws_${INSTANCE_PREFIX}}" - -LOG="/dev/null" - -MASTER_DISK_TYPE="${MASTER_DISK_TYPE:-gp2}" -MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20} -# The master root EBS volume size (typically does not need to be very large) -MASTER_ROOT_DISK_TYPE="${MASTER_ROOT_DISK_TYPE:-gp2}" -MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-8} -# The minions root EBS volume size (used to house Docker images) -NODE_ROOT_DISK_TYPE="${NODE_ROOT_DISK_TYPE:-gp2}" -NODE_ROOT_DISK_SIZE=${NODE_ROOT_DISK_SIZE:-32} - -MASTER_NAME="${INSTANCE_PREFIX}-master" -MASTER_TAG="${INSTANCE_PREFIX}-master" -NODE_TAG="${INSTANCE_PREFIX}-minion" -NODE_SCOPES="" -NON_MASQUERADE_CIDR="${NON_MASQUERADE_CIDR:-10.0.0.0/8}" # Traffic to IPs outside this range will use IP masquerade -SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/16}" # formerly PORTAL_NET -CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}" -MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" -SSH_CIDR="${SSH_CIDR:-0.0.0.0/0}" # IP to restrict ssh access to nodes/master -HTTP_API_CIDR="${HTTP_API_CIDR:-0.0.0.0/0}" # IP to restrict HTTP API access -# If set to an Elastic IP address, the master instance will be associated with this IP. -# Otherwise a new Elastic IP will be acquired -# (We used to accept 'auto' to mean 'allocate elastic ip', but that is now the default) -MASTER_RESERVED_IP="${MASTER_RESERVED_IP:-}" - -# Runtime config -RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}" - -# Optional: Cluster monitoring to setup as part of the cluster bring up: -# none - No cluster monitoring setup -# influxdb - Heapster, InfluxDB, and Grafana -ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}" - -# Optional: Enable node logging. -ENABLE_NODE_LOGGING="${KUBE_ENABLE_NODE_LOGGING:-true}" -LOGGING_DESTINATION="${KUBE_LOGGING_DESTINATION:-elasticsearch}" # options: elasticsearch, gcp - -# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up. -ENABLE_CLUSTER_LOGGING="${KUBE_ENABLE_CLUSTER_LOGGING:-true}" -ELASTICSEARCH_LOGGING_REPLICAS=1 - -# Optional: Don't require https for registries in our local RFC1918 network -if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:-false} == "true" ]]; then - EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS} --insecure-registry ${NON_MASQUERADE_CIDR}" -fi - -# Optional: Install cluster DNS. -ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" -DNS_SERVER_IP="${DNS_SERVER_IP:-10.0.0.10}" -DNS_DOMAIN="cluster.local" - -# Optional: Enable DNS horizontal autoscaler -ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}" - -# Optional: Install Kubernetes UI -ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}" - -# Optional: Create autoscaler for cluster's nodes. -ENABLE_CLUSTER_AUTOSCALER="${KUBE_ENABLE_CLUSTER_AUTOSCALER:-false}" -if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then - # TODO: actually configure ASG or similar - AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}" - AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_NODES}}" - TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}" -fi - -# Admission Controllers to invoke prior to persisting objects in cluster -# If we included ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely. -ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds - -# Optional: Enable/disable public IP assignment for minions. -# Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes! -ENABLE_NODE_PUBLIC_IP=${KUBE_ENABLE_NODE_PUBLIC_IP:-true} - -# OS options for minions -KUBE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION:-jessie}" -MASTER_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION}" -NODE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION}" -KUBE_NODE_IMAGE="${KUBE_NODE_IMAGE:-}" -COREOS_CHANNEL="${COREOS_CHANNEL:-alpha}" -CONTAINER_RUNTIME="${KUBE_CONTAINER_RUNTIME:-docker}" -RKT_VERSION="${KUBE_RKT_VERSION:-1.23.0}" - -NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # kubenet, opencontrail, flannel - -# OpenContrail networking plugin specific settings -OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}" -OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}" -OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}" - -# Optional: if set to true, kube-up will configure the cluster to run e2e tests. -E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false} - -# Optional: install a default StorageClass -ENABLE_DEFAULT_STORAGE_CLASS="${ENABLE_DEFAULT_STORAGE_CLASS:-true}" diff --git a/cluster/aws/config-test.sh b/cluster/aws/config-test.sh deleted file mode 100755 index 97381617d3d..00000000000 --- a/cluster/aws/config-test.sh +++ /dev/null @@ -1,153 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -ZONE=${KUBE_AWS_ZONE:-us-west-2a} - -MASTER_SIZE=${MASTER_SIZE:-} -NODE_SIZE=${NODE_SIZE:-} -NUM_NODES=${NUM_NODES:-2} - -# Dynamically set node sizes so that Heapster has enough space to run -if [[ -z ${NODE_SIZE} ]]; then - if (( ${NUM_NODES} < 50 )); then - NODE_SIZE="t2.micro" - elif (( ${NUM_NODES} < 150 )); then - NODE_SIZE="t2.small" - else - NODE_SIZE="t2.medium" - fi -fi - -# Dynamically set the master size by the number of nodes, these are guesses -# TODO: gather some data -if [[ -z ${MASTER_SIZE} ]]; then - if (( ${NUM_NODES} < 150 )); then - MASTER_SIZE="m3.medium" - else - MASTER_SIZE="m3.large" - fi -fi - - -# Because regions are globally named, we want to create in a single region; default to us-east-1 -AWS_S3_REGION=${AWS_S3_REGION:-us-east-1} - -# Which docker storage mechanism to use. -DOCKER_STORAGE=${DOCKER_STORAGE:-aufs} - -# Extra docker options for nodes. -EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-}" - -INSTANCE_PREFIX="${KUBE_AWS_INSTANCE_PREFIX:-e2e-test-${USER}}" -CONFIG_CONTEXT="${KUBE_CONFIG_CONTEXT:-aws_${INSTANCE_PREFIX}}" -CLUSTER_ID=${INSTANCE_PREFIX} -VPC_NAME=${VPC_NAME:-kubernetes-vpc} -AWS_SSH_KEY=${AWS_SSH_KEY:-$HOME/.ssh/kube_aws_rsa} - -LOG="/dev/null" - -MASTER_DISK_TYPE="${MASTER_DISK_TYPE:-gp2}" -MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20} -# The master root EBS volume size (typically does not need to be very large) -MASTER_ROOT_DISK_TYPE="${MASTER_ROOT_DISK_TYPE:-gp2}" -MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-8} -# The minions root EBS volume size (used to house Docker images) -NODE_ROOT_DISK_TYPE="${NODE_ROOT_DISK_TYPE:-gp2}" -NODE_ROOT_DISK_SIZE=${NODE_ROOT_DISK_SIZE:-32} - -MASTER_NAME="${INSTANCE_PREFIX}-master" -MASTER_TAG="${INSTANCE_PREFIX}-master" -NODE_TAG="${INSTANCE_PREFIX}-minion" -NODE_SCOPES="" -NON_MASQUERADE_CIDR="${NON_MASQUERADE_CIDR:-10.0.0.0/8}" # Traffic to IPs outside this range will use IP masquerade -SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/16}" # formerly PORTAL_NET -CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}" -MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" -SSH_CIDR="${SSH_CIDR:-0.0.0.0/0}" # IP to restrict ssh access to nodes/master -HTTP_API_CIDR="${HTTP_API_CIDR:-0.0.0.0/0}" # IP to restrict HTTP API access -# If set to an Elastic IP address, the master instance will be associated with this IP. -# Otherwise a new Elastic IP will be acquired -# (We used to accept 'auto' to mean 'allocate elastic ip', but that is now the default) -MASTER_RESERVED_IP="${MASTER_RESERVED_IP:-}" -RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}" - -# Optional: Cluster monitoring to setup as part of the cluster bring up: -# none - No cluster monitoring setup -# influxdb - Heapster, InfluxDB, and Grafana -ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-none}" - -# Optional: Enable node logging. -ENABLE_NODE_LOGGING="${KUBE_ENABLE_NODE_LOGGING:-true}" -LOGGING_DESTINATION="${KUBE_LOGGING_DESTINATION:-elasticsearch}" # options: elasticsearch, gcp - -# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up. -ENABLE_CLUSTER_LOGGING="${KUBE_ENABLE_CLUSTER_LOGGING:-false}" -ELASTICSEARCH_LOGGING_REPLICAS=1 - -# Optional: Don't require https for registries in our local RFC1918 network -if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:-false} == "true" ]]; then - EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS} --insecure-registry ${NON_MASQUERADE_CIDR}" -fi - -# Optional: Install cluster DNS. -ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" -DNS_SERVER_IP="${DNS_SERVER_IP:-10.0.0.10}" -DNS_DOMAIN="cluster.local" - -# Optional: Enable DNS horizontal autoscaler -ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}" - -# Optional: Install Kubernetes UI -ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}" - -# Optional: Create autoscaler for cluster's nodes. -ENABLE_CLUSTER_AUTOSCALER="${KUBE_ENABLE_CLUSTER_AUTOSCALER:-false}" -if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then - # TODO: actually configure ASG or similar - AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}" - AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_NODES}}" - TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}" -fi - -# Admission Controllers to invoke prior to persisting objects in cluster -# If we included ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely. -ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds - -# Optional: Enable/disable public IP assignment for minions. -# Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes! -ENABLE_NODE_PUBLIC_IP=${KUBE_ENABLE_NODE_PUBLIC_IP:-true} - -# OS options for minions -KUBE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION:-jessie}" -MASTER_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION}" -NODE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION}" -KUBE_NODE_IMAGE="${KUBE_NODE_IMAGE:-}" -COREOS_CHANNEL="${COREOS_CHANNEL:-alpha}" -CONTAINER_RUNTIME="${KUBE_CONTAINER_RUNTIME:-docker}" -RKT_VERSION="${KUBE_RKT_VERSION:-1.23.0}" - -NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # kubenet, opencontrail, flannel - -# OpenContrail networking plugin specific settings -OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}" -OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}" -OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}" - -# Optional: if set to true, kube-up will configure the cluster to run e2e tests. -E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false} - -# Optional: install a default StorageClass -ENABLE_DEFAULT_STORAGE_CLASS="${ENABLE_DEFAULT_STORAGE_CLASS:-true}" diff --git a/cluster/aws/jessie/util.sh b/cluster/aws/jessie/util.sh deleted file mode 100644 index 73f750f0602..00000000000 --- a/cluster/aws/jessie/util.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# A library of helper functions for Jessie. - -source "${KUBE_ROOT}/cluster/aws/common/common.sh" - -SSH_USER=admin - -# Detects the AMI to use for jessie (considering the region) -# -# Vars set: -# AWS_IMAGE -function detect-jessie-image () { - if [[ -z "${AWS_IMAGE-}" ]]; then - # These images are built using the imagebuilder tool, in the kube-deploy github repo - # https://github.com/kubernetes/kube-deploy/tree/master/imagebuilder - - # 282335181503: images published by kope.io - aws_account="282335181503" - # TODO: we could use a tag for the latest image, instead of bumping it every time - # e.g. family = k8s-1.3-debian-jessie-amd64-hvm-ebs latest/1.3=true - if [[ -z "${AWS_IMAGE_NAME:-}" ]]; then - AWS_IMAGE_NAME="k8s-1.3-debian-jessie-amd64-hvm-ebs-2016-06-18" - fi - AWS_IMAGE=`aws ec2 describe-images --owner ${aws_account} --filters Name=name,Values=${AWS_IMAGE_NAME} --query Images[].ImageId --output text` - if [[ -z "${AWS_IMAGE-}" ]]; then - echo "Please specify AWS_IMAGE directly (image ${AWS_IMAGE_NAME} not found in region ${AWS_REGION})" - exit 1 - fi - fi -} diff --git a/cluster/aws/options.md b/cluster/aws/options.md deleted file mode 100644 index 88d107188bc..00000000000 --- a/cluster/aws/options.md +++ /dev/null @@ -1,157 +0,0 @@ -# AWS specific configuration options - -These options can be set as environment variables to customize how your cluster is created. Only options -specific to AWS are documented here, for cross-provider options see [this document](../options.md). - -This is a work-in-progress; not all options are documented yet! - -**KUBE_AWS_ZONE** - -The AWS availability zone to deploy to. Defaults to us-west-2a. - -**AWS_IMAGE** - -The AMI to use. If not specified, the image will be selected based on the AWS region. - -**AWS_S3_BUCKET**, **AWS_S3_REGION** - -The bucket name to use, and the region where the bucket should be created, or where the bucket is located if it exists already. - -If not specified, defaults to AWS_S3_REGION us-east-1, because buckets are globally named and you probably -want to share a bucket across all regions; us-east-1 is a sensible (relatively arbitrary) default. - -AWS_S3_BUCKET will default to a uniquely generated name, so you won't collide with other kubernetes users. -(Currently this uses the hash of your AWS Access key to produce a per-user unique value). - -It is not a bad idea to set AWS_S3_BUCKET to something more human friendly. - -AWS_S3_REGION is useful for people that want to control their data location, because of regulatory restrictions for example. - -**MASTER_SIZE**, **NODE_SIZE** - -The instance type to use for creating the master/minion. Defaults to auto-sizing based on the number of nodes (see below). - -For production usage, we recommend bigger instances, for example: - -``` -export MASTER_SIZE=c4.large -export NODE_SIZE=r3.large -``` - -If you don't specify master and minion sizes, the scripts will attempt to guess the correct size of the master and worker -nodes based on `${NUM_NODES}`. See [Getting started on AWS EC2](../../docs/getting-started-guides/aws.md) for details. - -Please note: `kube-up` utilizes ephemeral storage available on instances for docker storage. EBS-only instance types do not -support ephemeral storage and will default to docker storage on the root disk which is usually only 8GB. -EBS-only instance types include `t2`, `c4`, and `m4`. - -**KUBE_ENABLE_NODE_PUBLIC_IP** - -Should a public IP automatically assigned to the minions? "true" or "false" -Defaults to: "true" - -Please note: Do not set this to "false" unless you... - -- ... already configured a NAT instance in the kubernetes VPC that will enable internet access for the new minions -- ... already configured a route for "0.0.0.0/0" to this NAT instance -- ... already configured a route for "YOUR_IP/32" to an AWS internet gateway (for the master instance to reach your - client directly during setup) - -**DOCKER_STORAGE** - -Choose the docker storage driver to use. This is an advanced option; most people should leave it as the default aufs -for parity with GCE. - -Supported values: btrfs, aufs, devicemapper, aufs-nolvm - -This will also configure your ephemeral storage in a compatible way, and your Docker containers -will run on this storage if available, as typically the root disk is comparatively small. - -* `btrfs` will combine your ephemeral disks into a btrfs volume. This is a good option if you have a recent kernel - with a reliable btrfs. -* `aufs` uses the aufs driver, but also installs LVM to combine your disks. `aufs-nolvm` will not use LVM, - meaning that only your first ephemeral disk will be used. -* `devicemapper` sets up LVM across all your ephemeral disks and sets Docker to drive it directly. This is a - similar option to btrfs, but without relying on the btrfs filesystem. Sadly, it does not work with most - configurations - see [this docker bug](https://github.com/docker/docker/issues/4036) - -If your machines don't have any ephemeral disks, this will default to the aufs driver on your root disk (with no LVM). - -**KUBE_OS_DISTRIBUTION** - -The distribution to use. Defaults to `jessie` - -Supported options: - -* `jessie`: Debian Jessie, running a custom kubernetes-optimized image. Should - be supported until 2018 by the debian-security team, and until 2020 by the - debian-LTS team. -* `wily`: Ubuntu Wily. Wily is not an LTS release, and OS support is due to - end in July 2016. - -No longer supported as of 1.3: - -* `vivid`: Ubuntu Vivid. Vivid OS support ended in early February 2016. - Docker no longer provides packages for vivid. - -Given the support situation, we recommend using Debian Jessie. In Kubernetes -1.3 Ubuntu should have their next LTS release out, so we should be able to -recommend Ubuntu again at that time. - -Using kube-up with other operating systems is neither supported nor -recommended. But we would welcome increased OS support for kube-up, so please -contribute! - -**NON_MASQUERADE_CIDR** - -The 'internal' IP range which Kubernetes will use, which will therefore not -use IP masquerade. By default kubernetes runs an internal network for traffic -between pods (and between pods and services), and by default this uses the -`10.0.0.0/8` range. However, this sometimes overlaps with a range that you may -want to use; in particular the range cannot be used with EC2 ClassicLink. You -may also want to run kubernetes in an existing VPC where you have chosen a CIDR -in the `10.0.0.0/8` range. - -Setting this flag allows you to change this internal network CIDR. Note that -you must set other values consistently within the CIDR that you choose. - -For example, you might choose `172.16.0.0/14`; and you could then choose to -configure like this: - -``` -export NON_MASQUERADE_CIDR="172.16.0.0/14" -export SERVICE_CLUSTER_IP_RANGE="172.16.0.0/16" -export DNS_SERVER_IP="172.16.0.10" -export MASTER_IP_RANGE="172.17.0.0/24" -export CLUSTER_IP_RANGE="172.18.0.0/16" -``` - -When choosing a CIDR in the 172.20/12 reserved range you should be careful not -to choose a CIDR that overlaps your VPC CIDR (the kube-up script sets the VPC -CIDR to 172.20.0.0/16 by default, so you should not overlap that). If you want -to allow inter-VPC traffic you should be careful to avoid your other VPCs as -well. - -There is also a 100.64/10 address block which is reserved for "Carrier Grade -NAT", and which some users have reported success using. While we haven't seen -any problems, or conflicts with any AWS networks, we can't guarantee it. If you -decide you are comfortable using 100.64, you might use: - -``` -export NON_MASQUERADE_CIDR="100.64.0.0/10" -export SERVICE_CLUSTER_IP_RANGE="100.64.0.0/16" -export DNS_SERVER_IP="100.64.0.10" -export MASTER_IP_RANGE="100.65.0.0/24" -export CLUSTER_IP_RANGE="100.66.0.0/16" -``` - -**KUBE_VPC_CIDR_BASE** - -By default `kube-up.sh` will create a VPC with CIDR 172.20.0.0/16. `KUBE_VPC_CIDR_BASE` allows to configure -this CIDR. For example you may choose to use `172.21.0.0/16`: - -``` -export KUBE_VPC_CIDR_BASE=172.21 -``` - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/aws/options.md?pixel)]() diff --git a/cluster/aws/templates/configure-vm-aws.sh b/cluster/aws/templates/configure-vm-aws.sh deleted file mode 100755 index 0be5203add1..00000000000 --- a/cluster/aws/templates/configure-vm-aws.sh +++ /dev/null @@ -1,132 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Note: these functions override functions in the GCE configure-vm script -# We include the GCE script first, and this one second. - -ensure-basic-networking() { - : -} - -ensure-packages() { - apt-get-install curl - # For reading kube_env.yaml - apt-get-install python-yaml - - # TODO: Where to get safe_format_and_mount? - mkdir -p /usr/share/google - cd /usr/share/google - download-or-bust "dc96f40fdc9a0815f099a51738587ef5a976f1da" https://raw.githubusercontent.com/GoogleCloudPlatform/compute-image-packages/82b75f314528b90485d5239ab5d5495cc22d775f/google-startup-scripts/usr/share/google/safe_format_and_mount - chmod +x safe_format_and_mount -} - -set-kube-env() { - local kube_env_yaml="/etc/kubernetes/kube_env.yaml" - - # kube-env has all the environment variables we care about, in a flat yaml format - eval "$(python -c ' -import pipes,sys,yaml - -for k,v in yaml.load(sys.stdin).iteritems(): - print("""readonly {var}={value}""".format(var = k, value = pipes.quote(str(v)))) - print("""export {var}""".format(var = k)) - ' < """${kube_env_yaml}""")" -} - -remove-docker-artifacts() { - : -} - -# Finds the master PD device -find-master-pd() { - if ( grep "/mnt/master-pd" /proc/mounts ); then - echo "Master PD already mounted; won't remount" - MASTER_PD_DEVICE="" - return - fi - echo "Waiting for master pd to be attached" - attempt=0 - while true; do - echo Attempt "$(($attempt+1))" to check for /dev/xvdb - if [[ -e /dev/xvdb ]]; then - echo "Found /dev/xvdb" - MASTER_PD_DEVICE="/dev/xvdb" - break - fi - attempt=$(($attempt+1)) - sleep 1 - done - - # Mount the master PD as early as possible - echo "/dev/xvdb /mnt/master-pd ext4 noatime 0 0" >> /etc/fstab -} - -fix-apt-sources() { - : -} - -salt-master-role() { - cat </etc/salt/minion.d/grains.conf -grains: - roles: - - kubernetes-master - cloud: aws -EOF - - # If the kubelet on the master is enabled, give it the same CIDR range - # as a generic node. - if [[ ! -z "${KUBELET_APISERVER:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then - cat <>/etc/salt/minion.d/grains.conf - kubelet_api_servers: '${KUBELET_APISERVER}' -EOF - else - # If the kubelet is running disconnected from a master, give it a fixed - # CIDR range. - cat <>/etc/salt/minion.d/grains.conf - cbr-cidr: ${MASTER_IP_RANGE} -EOF - fi - - env-to-grains "runtime_config" - env-to-grains "kube_user" -} - -salt-node-role() { - cat </etc/salt/minion.d/grains.conf -grains: - roles: - - kubernetes-pool - cloud: aws - api_servers: '${API_SERVERS}' -EOF - - # We set the hostname_override to the full EC2 private dns name - # we'd like to use EC2 instance-id, but currently the kubelet health-check assumes the name - # is resolvable, although that check should be going away entirely (#7092) - if [[ -z "${HOSTNAME_OVERRIDE:-}" ]]; then - HOSTNAME_OVERRIDE=`curl --silent curl http://169.254.169.254/2007-01-19/meta-data/local-hostname` - fi - - env-to-grains "hostname_override" -} - -function run-user-script() { - # TODO(justinsb): Support user scripts on AWS - # AWS doesn't have as rich a metadata service as GCE does - # Maybe specify an env var that is the path to a script? - : -} - diff --git a/cluster/aws/templates/format-disks.sh b/cluster/aws/templates/format-disks.sh deleted file mode 100644 index 0ac29d2f9f2..00000000000 --- a/cluster/aws/templates/format-disks.sh +++ /dev/null @@ -1,226 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Discover all the ephemeral disks - -function ensure-local-disks() { - -# Skip if already mounted (a reboot) -if ( grep "/mnt/ephemeral" /proc/mounts ); then - echo "Found /mnt/ephemeral in /proc/mounts; skipping local disk initialization" - return -fi - -block_devices=() - -ephemeral_devices=$( (curl --silent http://169.254.169.254/2014-11-05/meta-data/block-device-mapping/ | grep ephemeral) || true ) -for ephemeral_device in $ephemeral_devices; do - echo "Checking ephemeral device: ${ephemeral_device}" - aws_device=$(curl --silent http://169.254.169.254/2014-11-05/meta-data/block-device-mapping/${ephemeral_device}) - - device_path="" - if [ -b /dev/$aws_device ]; then - device_path="/dev/$aws_device" - else - # Check for the xvd-style name - xvd_style=$(echo $aws_device | sed "s/sd/xvd/") - if [ -b /dev/$xvd_style ]; then - device_path="/dev/$xvd_style" - fi - fi - - if [[ -z ${device_path} ]]; then - echo " Could not find disk: ${ephemeral_device}@${aws_device}" - else - echo " Detected ephemeral disk: ${ephemeral_device}@${device_path}" - block_devices+=(${device_path}) - fi -done - -# These are set if we should move where docker/kubelet store data -# Note this gets set to the parent directory -move_docker="" -move_kubelet="" - -docker_storage=${DOCKER_STORAGE:-aufs} - -# Format the ephemeral disks -if [[ ${#block_devices[@]} == 0 ]]; then - echo "No ephemeral block devices found; will use aufs on root" - docker_storage="aufs" -else - echo "Block devices: ${block_devices[@]}" - - # Remove any existing mounts - for block_device in ${block_devices}; do - echo "Unmounting ${block_device}" - /bin/umount ${block_device} || echo "Ignoring failure umounting ${block_device}" - sed -i -e "\|^${block_device}|d" /etc/fstab - done - - # Remove any existing /mnt/ephemeral entry in /etc/fstab - sed -i -e "\|/mnt/ephemeral|d" /etc/fstab - - # Mount the storage - if [[ ${docker_storage} == "btrfs" ]]; then - apt-get-install btrfs-tools - - if [[ ${#block_devices[@]} == 1 ]]; then - echo "One ephemeral block device found; formatting with btrfs" - mkfs.btrfs -f ${block_devices[0]} - else - echo "Found multiple ephemeral block devices, formatting with btrfs as RAID-0" - mkfs.btrfs -f --data raid0 ${block_devices[@]} - fi - echo "${block_devices[0]} /mnt/ephemeral btrfs noatime,nofail 0 0" >> /etc/fstab - mkdir -p /mnt/ephemeral - mount /mnt/ephemeral - - mkdir -p /mnt/ephemeral/kubernetes - - move_docker="/mnt/ephemeral" - move_kubelet="/mnt/ephemeral/kubernetes" - elif [[ ${docker_storage} == "aufs-nolvm" ]]; then - if [[ ${#block_devices[@]} != 1 ]]; then - echo "aufs-nolvm selected, but multiple ephemeral devices were found; only the first will be available" - fi - - mkfs -t ext4 ${block_devices[0]} - echo "${block_devices[0]} /mnt/ephemeral ext4 noatime,nofail 0 0" >> /etc/fstab - mkdir -p /mnt/ephemeral - mount /mnt/ephemeral - - mkdir -p /mnt/ephemeral/kubernetes - - move_docker="/mnt/ephemeral" - move_kubelet="/mnt/ephemeral/kubernetes" - elif [[ ${docker_storage} == "devicemapper" || ${docker_storage} == "aufs" ]]; then - # We always use LVM, even with one device - # In devicemapper mode, Docker can use LVM directly - # Also, fewer code paths are good - echo "Using LVM2 and ext4" - apt-get-install lvm2 - - # Don't output spurious "File descriptor X leaked on vgcreate invocation." - # Known bug: e.g. Ubuntu #591823 - export LVM_SUPPRESS_FD_WARNINGS=1 - - for block_device in ${block_devices}; do - pvcreate ${block_device} - done - vgcreate vg-ephemeral ${block_devices[@]} - - if [[ ${docker_storage} == "devicemapper" ]]; then - # devicemapper thin provisioning, managed by docker - # This is the best option, but it is sadly broken on most distros - # Bug: https://github.com/docker/docker/issues/4036 - - # 80% goes to the docker thin-pool; we want to leave some space for host-volumes - lvcreate -l 80%VG --thinpool docker-thinpool vg-ephemeral - - DOCKER_OPTS="${DOCKER_OPTS:-} --storage-opt dm.thinpooldev=/dev/mapper/vg--ephemeral-docker--thinpool" - # Note that we don't move docker; docker goes direct to the thinpool - - # Remaining space (20%) is for kubernetes data - # TODO: Should this be a thin pool? e.g. would we ever want to snapshot this data? - lvcreate -l 100%FREE -n kubernetes vg-ephemeral - mkfs -t ext4 /dev/vg-ephemeral/kubernetes - mkdir -p /mnt/ephemeral/kubernetes - echo "/dev/vg-ephemeral/kubernetes /mnt/ephemeral/kubernetes ext4 noatime,nofail 0 0" >> /etc/fstab - mount /mnt/ephemeral/kubernetes - - move_kubelet="/mnt/ephemeral/kubernetes" - else - # aufs - # We used to split docker & kubernetes, but we no longer do that, because - # host volumes go into the kubernetes area, and it is otherwise very easy - # to fill up small volumes. - # - # No need for thin pool since we are not over-provisioning or doing snapshots - # (probably shouldn't be doing snapshots on ephemeral disk? Should be stateless-ish.) - # Tried to do it, but it cause problems (#16188) - - lvcreate -l 100%VG -n ephemeral vg-ephemeral - mkfs -t ext4 /dev/vg-ephemeral/ephemeral - mkdir -p /mnt/ephemeral - echo "/dev/vg-ephemeral/ephemeral /mnt/ephemeral ext4 noatime,nofail 0 0" >> /etc/fstab - mount /mnt/ephemeral - - mkdir -p /mnt/ephemeral/kubernetes - - move_docker="/mnt/ephemeral" - move_kubelet="/mnt/ephemeral/kubernetes" - fi - else - echo "Ignoring unknown DOCKER_STORAGE: ${docker_storage}" - fi -fi - - -if [[ ${docker_storage} == "btrfs" ]]; then - DOCKER_OPTS="${DOCKER_OPTS:-} -s btrfs" -elif [[ ${docker_storage} == "aufs-nolvm" || ${docker_storage} == "aufs" ]]; then - # Install aufs kernel module - # Fix issue #14162 with extra-virtual - if [[ `lsb_release -i -s` == 'Ubuntu' ]]; then - apt-get-install linux-image-extra-$(uname -r) linux-image-extra-virtual - fi - - # Install aufs tools - apt-get-install aufs-tools - - DOCKER_OPTS="${DOCKER_OPTS:-} -s aufs" -elif [[ ${docker_storage} == "devicemapper" ]]; then - DOCKER_OPTS="${DOCKER_OPTS:-} -s devicemapper" -else - echo "Ignoring unknown DOCKER_STORAGE: ${docker_storage}" -fi - -if [[ -n "${move_docker}" ]]; then - # Stop docker if it is running, so we can move its files - systemctl stop docker || true - - # Move docker to e.g. /mnt - # but only if it is a directory, not a symlink left over from a previous run - if [[ -d /var/lib/docker ]]; then - mv /var/lib/docker ${move_docker}/ - fi - mkdir -p ${move_docker}/docker - # If /var/lib/docker doesn't exist (it will exist if it is already a symlink), - # then symlink it to the ephemeral docker area - if [[ ! -e /var/lib/docker ]]; then - ln -s ${move_docker}/docker /var/lib/docker - fi - DOCKER_ROOT="${move_docker}/docker" - DOCKER_OPTS="${DOCKER_OPTS:-} -g ${DOCKER_ROOT}" -fi - -if [[ -n "${move_kubelet}" ]]; then - # Move /var/lib/kubelet to e.g. /mnt - # (the backing for empty-dir volumes can use a lot of space!) - # (As with /var/lib/docker, only if it is a directory; skip if symlink) - if [[ -d /var/lib/kubelet ]]; then - mv /var/lib/kubelet ${move_kubelet}/ - fi - mkdir -p ${move_kubelet}/kubelet - # Create symlink for /var/lib/kubelet, unless it is already a symlink - if [[ ! -e /var/lib/kubelet ]]; then - ln -s ${move_kubelet}/kubelet /var/lib/kubelet - fi - KUBELET_ROOT="${move_kubelet}/kubelet" -fi - -} diff --git a/cluster/aws/templates/iam/kubernetes-master-policy.json b/cluster/aws/templates/iam/kubernetes-master-policy.json deleted file mode 100644 index e5cbaea8039..00000000000 --- a/cluster/aws/templates/iam/kubernetes-master-policy.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": ["ec2:*"], - "Resource": ["*"] - }, - { - "Effect": "Allow", - "Action": ["elasticloadbalancing:*"], - "Resource": ["*"] - }, - { - "Effect": "Allow", - "Action": ["route53:*"], - "Resource": ["*"] - }, - { - "Effect": "Allow", - "Action": "s3:*", - "Resource": [ - "arn:aws:s3:::kubernetes-*" - ] - } - ] -} diff --git a/cluster/aws/templates/iam/kubernetes-master-role.json b/cluster/aws/templates/iam/kubernetes-master-role.json deleted file mode 100644 index 66d5de1d5ae..00000000000 --- a/cluster/aws/templates/iam/kubernetes-master-role.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { "Service": "ec2.amazonaws.com"}, - "Action": "sts:AssumeRole" - } - ] -} diff --git a/cluster/aws/templates/iam/kubernetes-minion-policy.json b/cluster/aws/templates/iam/kubernetes-minion-policy.json deleted file mode 100644 index af81e98c824..00000000000 --- a/cluster/aws/templates/iam/kubernetes-minion-policy.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:*", - "Resource": [ - "arn:aws:s3:::kubernetes-*" - ] - }, - { - "Effect": "Allow", - "Action": "ec2:Describe*", - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": "ec2:AttachVolume", - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": "ec2:DetachVolume", - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": ["route53:*"], - "Resource": ["*"] - }, - { - "Effect": "Allow", - "Action": [ - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:BatchGetImage" - ], - "Resource": "*" - } - ] -} diff --git a/cluster/aws/templates/iam/kubernetes-minion-role.json b/cluster/aws/templates/iam/kubernetes-minion-role.json deleted file mode 100644 index 66d5de1d5ae..00000000000 --- a/cluster/aws/templates/iam/kubernetes-minion-role.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { "Service": "ec2.amazonaws.com"}, - "Action": "sts:AssumeRole" - } - ] -} diff --git a/cluster/aws/util.sh b/cluster/aws/util.sh index e646ce934c9..483bda04cb4 100755 --- a/cluster/aws/util.sh +++ b/cluster/aws/util.sh @@ -14,1618 +14,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -# A library of helper functions and constant for the local config. - -# Experimental flags can be removed/renamed at any time. -# The intent is to allow experimentation/advanced functionality before we -# are ready to commit to supporting it. -# Experimental functionality: -# KUBE_USE_EXISTING_MASTER=true -# Detect and reuse an existing master; useful if you want to -# create more nodes, perhaps with a different instance type or in -# a different subnet/AZ -# KUBE_SUBNET_CIDR=172.20.1.0/24 -# Override the default subnet CIDR; useful if you want to create -# a second subnet. The default subnet is 172.20.0.0/24. The VPC -# is created with 172.20.0.0/16; you must pick a sub-CIDR of that. - -# Use the config file specified in $KUBE_CONFIG_FILE, or default to -# config-default.sh. KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. -source "${KUBE_ROOT}/cluster/aws/${KUBE_CONFIG_FILE-"config-default.sh"}" -source "${KUBE_ROOT}/cluster/common.sh" source "${KUBE_ROOT}/cluster/lib/util.sh" -if [[ -z "${KUBE_AWS_DEPRECATION_WARNED:-}" ]]; then - echo -e "${color_red}WARNING${color_norm}: The bash deployment for AWS is deprecated and will be removed in v1.7." >&2 - echo "For a list of viable alternatives, see:" >&2 - echo >&2 - echo " http://kubernetes.io/docs/getting-started-guides/aws/" >&2 - echo >&2 - export KUBE_AWS_DEPRECATION_WARNED=yes -fi - -ALLOCATE_NODE_CIDRS=true - -NODE_INSTANCE_PREFIX="${INSTANCE_PREFIX}-minion" - -# The Auto Scaling Group (ASG) name must be unique, so we include the zone -ASG_NAME="${NODE_INSTANCE_PREFIX}-group-${ZONE}" - -# We could allow the master disk volume id to be specified in future -MASTER_DISK_ID= - -# Well known tags -TAG_KEY_MASTER_IP="kubernetes.io/master-ip" - -OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION} - -# Defaults: ubuntu -> wily -if [[ "${OS_DISTRIBUTION}" == "ubuntu" ]]; then - OS_DISTRIBUTION=wily -fi - -# Loads the distro-specific utils script. -# If the distro is not recommended, prints warnings or exits. -function load_distro_utils () { -case "${OS_DISTRIBUTION}" in - jessie) - ;; - wily) - ;; - vivid) - echo "vivid is no longer supported by kube-up; please use jessie instead" >&2 - exit 2 - ;; - coreos) - echo "coreos is no longer supported by kube-up; please use jessie instead" >&2 - exit 2 - ;; - trusty) - echo "trusty is no longer supported by kube-up; please use jessie or wily instead" >&2 - exit 2 - ;; - wheezy) - echo "wheezy is no longer supported by kube-up; please use jessie instead" >&2 - exit 2 - ;; - *) - echo "Cannot start cluster using os distro: ${OS_DISTRIBUTION}" >&2 - echo "The current recommended distro is jessie" >&2 - exit 2 - ;; -esac - -source "${KUBE_ROOT}/cluster/aws/${OS_DISTRIBUTION}/util.sh" -} - -load_distro_utils - -# This removes the final character in bash (somehow) -re='[a-zA-Z]' -if [[ ${ZONE: -1} =~ $re ]]; then - AWS_REGION=${ZONE%?} -else - AWS_REGION=$ZONE -fi - -export AWS_DEFAULT_REGION=${AWS_REGION} -export AWS_DEFAULT_OUTPUT=text -AWS_CMD="aws ec2" -AWS_ASG_CMD="aws autoscaling" - -VPC_CIDR_BASE=${KUBE_VPC_CIDR_BASE:-172.20} -MASTER_IP_SUFFIX=.9 -VPC_CIDR=${VPC_CIDR_BASE}.0.0/16 -SUBNET_CIDR=${VPC_CIDR_BASE}.0.0/24 -if [[ -n "${KUBE_SUBNET_CIDR:-}" ]]; then - echo "Using subnet CIDR override: ${KUBE_SUBNET_CIDR}" - SUBNET_CIDR=${KUBE_SUBNET_CIDR} -fi -if [[ -z "${MASTER_INTERNAL_IP-}" ]]; then - MASTER_INTERNAL_IP="${SUBNET_CIDR%.*}${MASTER_IP_SUFFIX}" -fi - -MASTER_SG_NAME="kubernetes-master-${CLUSTER_ID}" -NODE_SG_NAME="kubernetes-minion-${CLUSTER_ID}" - -IAM_PROFILE_MASTER="kubernetes-master-${CLUSTER_ID}-${VPC_NAME}" -IAM_PROFILE_NODE="kubernetes-minion-${CLUSTER_ID}-${VPC_NAME}" - -# Be sure to map all the ephemeral drives. We can specify more than we actually have. -# TODO: Actually mount the correct number (especially if we have more), though this is non-trivial, and -# only affects the big storage instance types, which aren't a typical use case right now. -EPHEMERAL_BLOCK_DEVICE_MAPPINGS=",{\"DeviceName\": \"/dev/sdc\",\"VirtualName\":\"ephemeral0\"},{\"DeviceName\": \"/dev/sdd\",\"VirtualName\":\"ephemeral1\"},{\"DeviceName\": \"/dev/sde\",\"VirtualName\":\"ephemeral2\"},{\"DeviceName\": \"/dev/sdf\",\"VirtualName\":\"ephemeral3\"}" - -# Experimental: If the user sets KUBE_AWS_STORAGE to ebs, use ebs storage -# in preference to local instance storage We do this by not mounting any -# instance storage. We could do this better in future (e.g. making instance -# storage available for other purposes) -if [[ "${KUBE_AWS_STORAGE:-}" == "ebs" ]]; then - EPHEMERAL_BLOCK_DEVICE_MAPPINGS="" -fi - -# TODO (bburns) Parameterize this for multiple cluster per project -function get_vpc_id { - $AWS_CMD describe-vpcs \ - --filters Name=tag:Name,Values=${VPC_NAME} \ - Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \ - --query Vpcs[].VpcId -} - -function get_subnet_id { - local vpc_id=$1 - local az=$2 - $AWS_CMD describe-subnets \ - --filters Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \ - Name=availabilityZone,Values=${az} \ - Name=vpc-id,Values=${vpc_id} \ - --query Subnets[].SubnetId -} - -function get_igw_id { - local vpc_id=$1 - $AWS_CMD describe-internet-gateways \ - --filters Name=attachment.vpc-id,Values=${vpc_id} \ - --query InternetGateways[].InternetGatewayId -} - -function get_elbs_in_vpc { - # ELB doesn't seem to be on the same platform as the rest of AWS; doesn't support filtering - aws elb --output json describe-load-balancers | \ - python -c "import json,sys; lst = [str(lb['LoadBalancerName']) for lb in json.load(sys.stdin)['LoadBalancerDescriptions'] if 'VPCId' in lb and lb['VPCId'] == '$1']; print('\n'.join(lst))" -} - -function get_instanceid_from_name { - local tagName=$1 - $AWS_CMD describe-instances \ - --filters Name=tag:Name,Values=${tagName} \ - Name=instance-state-name,Values=running \ - Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \ - --query Reservations[].Instances[].InstanceId -} - -function get_instance_public_ip { - local instance_id=$1 - $AWS_CMD describe-instances \ - --instance-ids ${instance_id} \ - --query Reservations[].Instances[].NetworkInterfaces[0].Association.PublicIp -} - -function get_instance_private_ip { - local instance_id=$1 - $AWS_CMD describe-instances \ - --instance-ids ${instance_id} \ - --query Reservations[].Instances[].NetworkInterfaces[0].PrivateIpAddress -} - -# Gets a security group id, by name ($1) -function get_security_group_id { - local name=$1 - $AWS_CMD describe-security-groups \ - --filters Name=vpc-id,Values=${VPC_ID} \ - Name=group-name,Values=${name} \ - Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \ - --query SecurityGroups[].GroupId \ - | tr "\t" "\n" -} - -# Finds the master ip, if it is saved (tagged on the master disk) -# Sets KUBE_MASTER_IP -function find-tagged-master-ip { - find-master-pd - if [[ -n "${MASTER_DISK_ID:-}" ]]; then - KUBE_MASTER_IP=$(get-tag ${MASTER_DISK_ID} ${TAG_KEY_MASTER_IP}) - fi -} - -# Gets a tag value from an AWS resource -# usage: get-tag -# outputs: the tag value, or "" if no tag -function get-tag { - $AWS_CMD describe-tags --filters Name=resource-id,Values=${1} \ - Name=key,Values=${2} \ - --query Tags[].Value -} - -# Gets an existing master, exiting if not found -# Note that this is called directly by the e2e tests -function detect-master() { - find-tagged-master-ip - KUBE_MASTER=${MASTER_NAME} - if [[ -z "${KUBE_MASTER_IP:-}" ]]; then - echo "Could not detect Kubernetes master node IP. Make sure you've launched a cluster with 'kube-up.sh'" - exit 1 - fi - echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)" -} - -# Reads kube-env metadata from master -# -# Assumed vars: -# KUBE_MASTER_IP -# AWS_SSH_KEY -# SSH_USER -function get-master-env() { - ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ${SSH_USER}@${KUBE_MASTER_IP} sudo cat /etc/kubernetes/kube_env.yaml -} - - -function query-running-minions () { - local query=$1 - $AWS_CMD describe-instances \ - --filters Name=instance-state-name,Values=running \ - Name=vpc-id,Values=${VPC_ID} \ - Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \ - Name=tag:aws:autoscaling:groupName,Values=${ASG_NAME} \ - Name=tag:Role,Values=${NODE_TAG} \ - --query ${query} -} - -function detect-node-names () { - # If this is called directly, VPC_ID might not be set - # (this is case from cluster/log-dump.sh) - if [[ -z "${VPC_ID:-}" ]]; then - VPC_ID=$(get_vpc_id) - fi - - NODE_IDS=() - NODE_NAMES=() - for id in $(query-running-minions "Reservations[].Instances[].InstanceId"); do - NODE_IDS+=("${id}") - - # We use the minion ids as the name - NODE_NAMES+=("${id}") - done -} - -# Called to detect the project on GCE -# Not needed on AWS -function detect-project() { - : -} - -function detect-nodes () { - detect-node-names - - # This is inefficient, but we want NODE_NAMES / NODE_IDS to be ordered the same as KUBE_NODE_IP_ADDRESSES - KUBE_NODE_IP_ADDRESSES=() - for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - local minion_ip - if [[ "${ENABLE_NODE_PUBLIC_IP}" == "true" ]]; then - minion_ip=$(get_instance_public_ip ${NODE_NAMES[$i]}) - else - minion_ip=$(get_instance_private_ip ${NODE_NAMES[$i]}) - fi - echo "Found minion ${i}: ${NODE_NAMES[$i]} @ ${minion_ip}" - KUBE_NODE_IP_ADDRESSES+=("${minion_ip}") - done - - if [[ -z "$KUBE_NODE_IP_ADDRESSES" ]]; then - echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" - exit 1 - fi -} - -function detect-security-groups { - if [[ -z "${MASTER_SG_ID-}" ]]; then - MASTER_SG_ID=$(get_security_group_id "${MASTER_SG_NAME}") - if [[ -z "${MASTER_SG_ID}" ]]; then - echo "Could not detect Kubernetes master security group. Make sure you've launched a cluster with 'kube-up.sh'" - exit 1 - else - echo "Using master security group: ${MASTER_SG_NAME} ${MASTER_SG_ID}" - fi - fi - if [[ -z "${NODE_SG_ID-}" ]]; then - NODE_SG_ID=$(get_security_group_id "${NODE_SG_NAME}") - if [[ -z "${NODE_SG_ID}" ]]; then - echo "Could not detect Kubernetes minion security group. Make sure you've launched a cluster with 'kube-up.sh'" - exit 1 - else - echo "Using minion security group: ${NODE_SG_NAME} ${NODE_SG_ID}" - fi - fi -} - -# Detects the AMI to use (considering the region) -# This really should be in the various distro-specific util functions, -# but CoreOS uses this for the master, so for now it is here. -# -# TODO: Remove this and just have each distro implement detect-image -# -# Vars set: -# AWS_IMAGE -function detect-image () { -case "${OS_DISTRIBUTION}" in - wily) - detect-wily-image - ;; - jessie) - detect-jessie-image - ;; - *) - echo "Please specify AWS_IMAGE directly (distro ${OS_DISTRIBUTION} not recognized)" - exit 2 - ;; -esac -} - -# Detects the RootDevice to use in the Block Device Mapping (considering the AMI) -# -# Vars set: -# MASTER_BLOCK_DEVICE_MAPPINGS -# NODE_BLOCK_DEVICE_MAPPINGS -# -function detect-root-device { - local master_image=${AWS_IMAGE} - local node_image=${KUBE_NODE_IMAGE} - - ROOT_DEVICE_MASTER=$($AWS_CMD describe-images --image-ids ${master_image} --query 'Images[].RootDeviceName') - if [[ "${master_image}" == "${node_image}" ]]; then - ROOT_DEVICE_NODE=${ROOT_DEVICE_MASTER} - else - ROOT_DEVICE_NODE=$($AWS_CMD describe-images --image-ids ${node_image} --query 'Images[].RootDeviceName') - fi - - MASTER_BLOCK_DEVICE_MAPPINGS="[{\"DeviceName\":\"${ROOT_DEVICE_MASTER}\",\"Ebs\":{\"DeleteOnTermination\":true,\"VolumeSize\":${MASTER_ROOT_DISK_SIZE},\"VolumeType\":\"${MASTER_ROOT_DISK_TYPE}\"}} ${EPHEMERAL_BLOCK_DEVICE_MAPPINGS}]" - NODE_BLOCK_DEVICE_MAPPINGS="[{\"DeviceName\":\"${ROOT_DEVICE_NODE}\",\"Ebs\":{\"DeleteOnTermination\":true,\"VolumeSize\":${NODE_ROOT_DISK_SIZE},\"VolumeType\":\"${NODE_ROOT_DISK_TYPE}\"}} ${EPHEMERAL_BLOCK_DEVICE_MAPPINGS}]" -} - -# Computes the AWS fingerprint for a public key file ($1) -# $1: path to public key file -# Note that this is a different hash from the OpenSSH hash. -# But AWS gives us this public key hash in the describe keys output, so we should stick with this format. -# Hopefully this will be done by the aws cli tool one day: https://github.com/aws/aws-cli/issues/191 -# NOTE: This does not work on Mavericks, due to an odd ssh-keygen version, so we use get-ssh-fingerprint instead -function get-aws-fingerprint { - local -r pubkey_path=$1 - ssh-keygen -f ${pubkey_path} -e -m PKCS8 | openssl rsa -pubin -outform DER | openssl md5 -c | sed -e 's/(stdin)= //g' -} - -# Computes the SSH fingerprint for a public key file ($1) -# #1: path to public key file -# Note this is different from the AWS fingerprint; see notes on get-aws-fingerprint -function get-ssh-fingerprint { - local -r pubkey_path=$1 - ssh-keygen -lf ${pubkey_path} | cut -f2 -d' ' -} - -# Import an SSH public key to AWS. -# Ignores duplicate names; recommended to use a name that includes the public key hash. -# $1 name -# $2 public key path -function import-public-key { - local -r name=$1 - local -r path=$2 - - local ok=1 - local output="" - output=$($AWS_CMD import-key-pair --key-name ${name} --public-key-material "file://${path}" 2>&1) || ok=0 - if [[ ${ok} == 0 ]]; then - # Idempotency: ignore if duplicate name - if [[ "${output}" != *"InvalidKeyPair.Duplicate"* ]]; then - echo "Error importing public key" - echo "Output: ${output}" - exit 1 - fi - fi -} - -# Robustly try to create a security group, if it does not exist. -# $1: The name of security group; will be created if not exists -# $2: Description for security group (used if created) -# -# Note that this doesn't actually return the sgid; we need to re-query -function create-security-group { - local -r name=$1 - local -r description=$2 - - local sgid=$(get_security_group_id "${name}") - if [[ -z "$sgid" ]]; then - echo "Creating security group ${name}." - sgid=$($AWS_CMD create-security-group --group-name "${name}" --description "${description}" --vpc-id "${VPC_ID}" --query GroupId) - add-tag $sgid KubernetesCluster ${CLUSTER_ID} - fi -} - -# Authorize ingress to a security group. -# Attempts to be idempotent, though we end up checking the output looking for error-strings. -# $1 group-id -# $2.. arguments to pass to authorize-security-group-ingress -function authorize-security-group-ingress { - local -r sgid=$1 - shift - local ok=1 - local output="" - output=$($AWS_CMD authorize-security-group-ingress --group-id "${sgid}" $@ 2>&1) || ok=0 - if [[ ${ok} == 0 ]]; then - # Idempotency: ignore if duplicate rule - if [[ "${output}" != *"InvalidPermission.Duplicate"* ]]; then - echo "Error creating security group ingress rule" - echo "Output: ${output}" - exit 1 - fi - fi -} - -# Gets master persistent volume, if exists -# Sets MASTER_DISK_ID -function find-master-pd { - local name=${MASTER_NAME}-pd - if [[ -z "${MASTER_DISK_ID}" ]]; then - local zone_filter="Name=availability-zone,Values=${ZONE}" - if [[ "${KUBE_USE_EXISTING_MASTER:-}" == "true" ]]; then - # If we're reusing an existing master, it is likely to be in another zone - # If running multizone, your cluster must be uniquely named across zones - zone_filter="" - fi - MASTER_DISK_ID=`$AWS_CMD describe-volumes \ - --filters ${zone_filter} \ - Name=tag:Name,Values=${name} \ - Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \ - --query Volumes[].VolumeId` - fi -} - -# Gets or creates master persistent volume -# Sets MASTER_DISK_ID -function ensure-master-pd { - local name=${MASTER_NAME}-pd - - find-master-pd - - if [[ -z "${MASTER_DISK_ID}" ]]; then - echo "Creating master disk: size ${MASTER_DISK_SIZE}GB, type ${MASTER_DISK_TYPE}" - MASTER_DISK_ID=`$AWS_CMD create-volume --availability-zone ${ZONE} --volume-type ${MASTER_DISK_TYPE} --size ${MASTER_DISK_SIZE} --query VolumeId` - add-tag ${MASTER_DISK_ID} Name ${name} - add-tag ${MASTER_DISK_ID} KubernetesCluster ${CLUSTER_ID} - fi -} - -# Configures a CloudWatch alarm to reboot the instance on failure -function reboot-on-failure { - local instance_id=$1 - - echo "Creating Cloudwatch alarm to reboot instance ${instance_id} on failure" - - local aws_owner_id=`aws ec2 describe-instances --instance-ids ${instance_id} --query Reservations[0].OwnerId` - if [[ -z "${aws_owner_id}" ]]; then - echo "Unable to determinate AWS account id for ${instance_id}" - exit 1 - fi - - aws cloudwatch put-metric-alarm \ - --alarm-name k8s-${instance_id}-statuscheckfailure-reboot \ - --alarm-description "Reboot ${instance_id} on status check failure" \ - --namespace "AWS/EC2" \ - --dimensions Name=InstanceId,Value=${instance_id} \ - --statistic Minimum \ - --metric-name StatusCheckFailed \ - --comparison-operator GreaterThanThreshold \ - --threshold 0 \ - --period 60 \ - --evaluation-periods 3 \ - --alarm-actions arn:aws:swf:${AWS_REGION}:${aws_owner_id}:action/actions/AWS_EC2.InstanceId.Reboot/1.0 > $LOG - - # TODO: The IAM role EC2ActionsAccess must have been created - # See e.g. http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/UsingIAM.html -} - -function delete-instance-alarms { - local instance_id=$1 - - alarm_names=`aws cloudwatch describe-alarms --alarm-name-prefix k8s-${instance_id}- --query MetricAlarms[].AlarmName` - for alarm_name in ${alarm_names}; do - aws cloudwatch delete-alarms --alarm-names ${alarm_name} > $LOG - done -} - -# Finds the existing master IP, or creates/reuses an Elastic IP -# If MASTER_RESERVED_IP looks like an IP address, we will use it; -# otherwise we will create a new elastic IP -# Sets KUBE_MASTER_IP -function ensure-master-ip { - find-tagged-master-ip - - if [[ -z "${KUBE_MASTER_IP:-}" ]]; then - # Check if MASTER_RESERVED_IP looks like an IPv4 address - # Note that we used to only allocate an elastic IP when MASTER_RESERVED_IP=auto - # So be careful changing the IPV4 test, to be sure that 'auto' => 'allocate' - if [[ "${MASTER_RESERVED_IP}" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then - KUBE_MASTER_IP="${MASTER_RESERVED_IP}" - else - KUBE_MASTER_IP=`$AWS_CMD allocate-address --domain vpc --query PublicIp` - echo "Allocated Elastic IP for master: ${KUBE_MASTER_IP}" - fi - - # We can't tag elastic ips. Instead we put the tag on the persistent disk. - # It is a little weird, perhaps, but it sort of makes sense... - # The master mounts the master PD, and whoever mounts the master PD should also - # have the master IP - add-tag ${MASTER_DISK_ID} ${TAG_KEY_MASTER_IP} ${KUBE_MASTER_IP} - fi -} - -# Creates a new DHCP option set configured correctly for Kubernetes when DHCP_OPTION_SET_ID is not specified -# Sets DHCP_OPTION_SET_ID -function create-dhcp-option-set () { - if [[ -z ${DHCP_OPTION_SET_ID-} ]]; then - case "${AWS_REGION}" in - us-east-1) - OPTION_SET_DOMAIN=ec2.internal - ;; - - *) - OPTION_SET_DOMAIN="${AWS_REGION}.compute.internal" - esac - - DHCP_OPTION_SET_ID=$($AWS_CMD create-dhcp-options --dhcp-configuration Key=domain-name,Values=${OPTION_SET_DOMAIN} Key=domain-name-servers,Values=AmazonProvidedDNS --query DhcpOptions.DhcpOptionsId) - - add-tag ${DHCP_OPTION_SET_ID} Name kubernetes-dhcp-option-set - add-tag ${DHCP_OPTION_SET_ID} KubernetesCluster ${CLUSTER_ID} - fi - - $AWS_CMD associate-dhcp-options --dhcp-options-id ${DHCP_OPTION_SET_ID} --vpc-id ${VPC_ID} > $LOG - - echo "Using DHCP option set ${DHCP_OPTION_SET_ID}" -} - -# Verify prereqs -function verify-prereqs { - if [[ "$(which aws)" == "" ]]; then - echo "Can't find aws in PATH, please fix and retry." - exit 1 - fi -} - -# Take the local tar files and upload them to S3. They will then be -# downloaded by the master as part of the start up script for the master. -# -# Assumed vars: -# SERVER_BINARY_TAR -# SALT_TAR -# Vars set: -# SERVER_BINARY_TAR_URL -# SALT_TAR_URL -function upload-server-tars() { - SERVER_BINARY_TAR_URL= - SERVER_BINARY_TAR_HASH= - SALT_TAR_URL= - SALT_TAR_HASH= - BOOTSTRAP_SCRIPT_URL= - BOOTSTRAP_SCRIPT_HASH= - - ensure-temp-dir - - SERVER_BINARY_TAR_HASH=$(sha1sum-file "${SERVER_BINARY_TAR}") - SALT_TAR_HASH=$(sha1sum-file "${SALT_TAR}") - BOOTSTRAP_SCRIPT_HASH=$(sha1sum-file "${BOOTSTRAP_SCRIPT}") - - if [[ -z ${AWS_S3_BUCKET-} ]]; then - local project_hash= - local key=$(aws configure get aws_access_key_id) - if which md5 > /dev/null 2>&1; then - project_hash=$(md5 -q -s "${USER} ${key} ${INSTANCE_PREFIX}") - else - project_hash=$(echo -n "${USER} ${key} ${INSTANCE_PREFIX}" | md5sum | awk '{ print $1 }') - fi - AWS_S3_BUCKET="kubernetes-staging-${project_hash}" - fi - - echo "Uploading to Amazon S3" - - if ! aws s3api get-bucket-location --bucket ${AWS_S3_BUCKET} > /dev/null 2>&1 ; then - echo "Creating ${AWS_S3_BUCKET}" - - # Buckets must be globally uniquely named, so always create in a known region - # We default to us-east-1 because that's the canonical region for S3, - # and then the bucket is most-simply named (s3.amazonaws.com) - aws s3 mb "s3://${AWS_S3_BUCKET}" --region ${AWS_S3_REGION} - - echo "Confirming bucket was created..." - - local attempt=0 - while true; do - if ! aws s3 ls --region ${AWS_S3_REGION} "s3://${AWS_S3_BUCKET}" > /dev/null 2>&1; then - if (( attempt > 120 )); then - echo - echo -e "${color_red}Unable to confirm bucket creation." >&2 - echo "Please ensure that s3://${AWS_S3_BUCKET} exists" >&2 - echo -e "and run the script again. (sorry!)${color_norm}" >&2 - exit 1 - fi - else - break - fi - attempt=$(($attempt+1)) - sleep 1 - done - fi - - local s3_bucket_location=$(aws s3api get-bucket-location --bucket ${AWS_S3_BUCKET}) - local s3_url_base=https://s3-${s3_bucket_location}.amazonaws.com - if [[ "${s3_bucket_location}" == "None" ]]; then - # "US Classic" does not follow the pattern - s3_url_base=https://s3.amazonaws.com - s3_bucket_location=us-east-1 - elif [[ "${s3_bucket_location}" == "cn-north-1" ]]; then - s3_url_base=https://s3.cn-north-1.amazonaws.com.cn - fi - - local -r staging_path="devel" - - local -r local_dir="${KUBE_TEMP}/s3/" - mkdir ${local_dir} - - echo "+++ Staging server tars to S3 Storage: ${AWS_S3_BUCKET}/${staging_path}" - cp -a "${SERVER_BINARY_TAR}" ${local_dir} - cp -a "${SALT_TAR}" ${local_dir} - cp -a "${BOOTSTRAP_SCRIPT}" ${local_dir} - - aws s3 sync --region ${s3_bucket_location} --exact-timestamps ${local_dir} "s3://${AWS_S3_BUCKET}/${staging_path}/" - - local server_binary_path="${staging_path}/${SERVER_BINARY_TAR##*/}" - aws s3api put-object-acl --region ${s3_bucket_location} --bucket ${AWS_S3_BUCKET} --key "${server_binary_path}" --grant-read 'uri="http://acs.amazonaws.com/groups/global/AllUsers"' - SERVER_BINARY_TAR_URL="${s3_url_base}/${AWS_S3_BUCKET}/${server_binary_path}" - - local salt_tar_path="${staging_path}/${SALT_TAR##*/}" - aws s3api put-object-acl --region ${s3_bucket_location} --bucket ${AWS_S3_BUCKET} --key "${salt_tar_path}" --grant-read 'uri="http://acs.amazonaws.com/groups/global/AllUsers"' - SALT_TAR_URL="${s3_url_base}/${AWS_S3_BUCKET}/${salt_tar_path}" - - local bootstrap_script_path="${staging_path}/${BOOTSTRAP_SCRIPT##*/}" - aws s3api put-object-acl --region ${s3_bucket_location} --bucket ${AWS_S3_BUCKET} --key "${bootstrap_script_path}" --grant-read 'uri="http://acs.amazonaws.com/groups/global/AllUsers"' - BOOTSTRAP_SCRIPT_URL="${s3_url_base}/${AWS_S3_BUCKET}/${bootstrap_script_path}" - - echo "Uploaded server tars:" - echo " SERVER_BINARY_TAR_URL: ${SERVER_BINARY_TAR_URL}" - echo " SALT_TAR_URL: ${SALT_TAR_URL}" - echo " BOOTSTRAP_SCRIPT_URL: ${BOOTSTRAP_SCRIPT_URL}" -} - -# Adds a tag to an AWS resource -# usage: add-tag -function add-tag { - echo "Adding tag to ${1}: ${2}=${3}" - - # We need to retry in case the resource isn't yet fully created - n=0 - until [ $n -ge 25 ]; do - $AWS_CMD create-tags --resources ${1} --tags Key=${2},Value=${3} > $LOG && return - n=$[$n+1] - sleep 3 - done - - echo "Unable to add tag to AWS resource" - exit 1 -} - -# Creates the IAM profile, based on configuration files in templates/iam -# usage: create-iam-profile kubernetes-master-us-west-1a-chom kubernetes-master -function create-iam-profile { - local key=$1 - local role=$2 - - local conf_dir=file://${KUBE_ROOT}/cluster/aws/templates/iam - - echo "Creating IAM role: ${key}" - aws iam create-role --role-name ${key} --assume-role-policy-document ${conf_dir}/${role}-role.json > $LOG - - echo "Creating IAM role-policy: ${key}" - aws iam put-role-policy --role-name ${key} --policy-name ${key} --policy-document ${conf_dir}/${role}-policy.json > $LOG - - echo "Creating IAM instance-policy: ${key}" - aws iam create-instance-profile --instance-profile-name ${key} > $LOG - - echo "Adding IAM role to instance-policy: ${key}" - aws iam add-role-to-instance-profile --instance-profile-name ${key} --role-name ${key} > $LOG -} - -# Creates the IAM roles (if they do not already exist) -function ensure-iam-profiles { - echo "Creating master IAM profile: ${IAM_PROFILE_MASTER}" - create-iam-profile ${IAM_PROFILE_MASTER} kubernetes-master - - echo "Creating minion IAM profile: ${IAM_PROFILE_NODE}" - create-iam-profile ${IAM_PROFILE_NODE} kubernetes-minion -} - -# Wait for instance to be in specified state -function wait-for-instance-state { - instance_id=$1 - state=$2 - - while true; do - instance_state=$($AWS_CMD describe-instances --instance-ids ${instance_id} --query Reservations[].Instances[].State.Name) - if [[ "$instance_state" == "${state}" ]]; then - break - else - echo "Waiting for instance ${instance_id} to be ${state} (currently ${instance_state})" - echo "Sleeping for 3 seconds..." - sleep 3 - fi - done -} - -# Allocates new Elastic IP from Amazon -# Output: allocated IP address -function allocate-elastic-ip { - $AWS_CMD allocate-address --domain vpc --query PublicIp -} - -# Attaches an elastic IP to the specified instance -function attach-ip-to-instance { - local ip_address=$1 - local instance_id=$2 - - local elastic_ip_allocation_id=$($AWS_CMD describe-addresses --public-ips $ip_address --query Addresses[].AllocationId) - echo "Attaching IP ${ip_address} to instance ${instance_id}" - $AWS_CMD associate-address --instance-id ${instance_id} --allocation-id ${elastic_ip_allocation_id} > $LOG -} - -# Releases an elastic IP -function release-elastic-ip { - local ip_address=$1 - - echo "Releasing Elastic IP: ${ip_address}" - elastic_ip_allocation_id=$($AWS_CMD describe-addresses --public-ips $ip_address --query Addresses[].AllocationId 2> $LOG) || true - if [[ -z "${elastic_ip_allocation_id}" ]]; then - echo "Elastic IP already released" - else - $AWS_CMD release-address --allocation-id ${elastic_ip_allocation_id} > $LOG - fi -} - -# Deletes a security group -# usage: delete_security_group -function delete_security_group { - local -r sg_id=${1} - - echo "Deleting security group: ${sg_id}" - - # We retry in case there's a dependent resource - typically an ELB - local n=0 - until [ $n -ge 20 ]; do - $AWS_CMD delete-security-group --group-id ${sg_id} > $LOG && return - n=$[$n+1] - sleep 3 - done - echo "Unable to delete security group: ${sg_id}" - exit 1 -} - - - -# Deletes master and minion IAM roles and instance profiles -# usage: delete-iam-instance-profiles -function delete-iam-profiles { - for iam_profile_name in ${IAM_PROFILE_MASTER} ${IAM_PROFILE_NODE};do - echo "Removing role from instance profile: ${iam_profile_name}" - conceal-no-such-entity-response aws iam remove-role-from-instance-profile --instance-profile-name "${iam_profile_name}" --role-name "${iam_profile_name}" - - echo "Deleting IAM Instance-Profile: ${iam_profile_name}" - conceal-no-such-entity-response aws iam delete-instance-profile --instance-profile-name "${iam_profile_name}" - - echo "Delete IAM role policy: ${iam_profile_name}" - conceal-no-such-entity-response aws iam delete-role-policy --role-name "${iam_profile_name}" --policy-name "${iam_profile_name}" - - echo "Deleting IAM Role: ${iam_profile_name}" - conceal-no-such-entity-response aws iam delete-role --role-name "${iam_profile_name}" - done -} - -# Detects NoSuchEntity response from AWS cli stderr output and conceals error -# Otherwise the error is treated as fatal -# usage: conceal-no-such-entity-response ...args -function conceal-no-such-entity-response { - # in plain english: redirect stderr to stdout, and stdout to the log file - local -r errMsg=$($@ 2>&1 > $LOG) - if [[ "$errMsg" == "" ]];then - return - fi - - echo $errMsg - if [[ "$errMsg" =~ " (NoSuchEntity) " ]];then - echo " -> no such entity response detected. will assume operation is not necessary due to prior incomplete teardown" - return - fi - - echo "Error message is fatal. Will exit" - exit 1 -} - -function ssh-key-setup { - if [[ ! -f "$AWS_SSH_KEY" ]]; then - ssh-keygen -f "$AWS_SSH_KEY" -N '' - fi - - # Note that we use get-ssh-fingerprint, so this works on OSX Mavericks - # get-aws-fingerprint gives the same fingerprint that AWS computes, - # but OSX Mavericks ssh-keygen can't compute it - AWS_SSH_KEY_FINGERPRINT=$(get-ssh-fingerprint ${AWS_SSH_KEY}.pub) - echo "Using SSH key with (AWS) fingerprint: ${AWS_SSH_KEY_FINGERPRINT}" - AWS_SSH_KEY_NAME="kubernetes-${AWS_SSH_KEY_FINGERPRINT//:/}" - - import-public-key ${AWS_SSH_KEY_NAME} ${AWS_SSH_KEY}.pub -} - -function vpc-setup { - if [[ -z "${VPC_ID:-}" ]]; then - VPC_ID=$(get_vpc_id) - fi - if [[ -z "$VPC_ID" ]]; then - echo "Creating vpc." - VPC_ID=$($AWS_CMD create-vpc --cidr-block ${VPC_CIDR} --query Vpc.VpcId) - $AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-support '{"Value": true}' > $LOG - $AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-hostnames '{"Value": true}' > $LOG - add-tag $VPC_ID Name ${VPC_NAME} - add-tag $VPC_ID KubernetesCluster ${CLUSTER_ID} - fi - - echo "Using VPC $VPC_ID" -} - -function subnet-setup { - if [[ -z "${SUBNET_ID:-}" ]]; then - SUBNET_ID=$(get_subnet_id $VPC_ID $ZONE) - fi - - if [[ -z "$SUBNET_ID" ]]; then - echo "Creating subnet." - SUBNET_ID=$($AWS_CMD create-subnet --cidr-block ${SUBNET_CIDR} --vpc-id $VPC_ID --availability-zone ${ZONE} --query Subnet.SubnetId) - add-tag $SUBNET_ID KubernetesCluster ${CLUSTER_ID} - else - EXISTING_CIDR=$($AWS_CMD describe-subnets --subnet-ids ${SUBNET_ID} --query Subnets[].CidrBlock) - echo "Using existing subnet with CIDR $EXISTING_CIDR" - if [ ! $SUBNET_CIDR = $EXISTING_CIDR ]; then - MASTER_INTERNAL_IP="${EXISTING_CIDR%.*}${MASTER_IP_SUFFIX}" - echo "Assuming MASTER_INTERNAL_IP=${MASTER_INTERNAL_IP}" - fi - fi - - echo "Using subnet $SUBNET_ID" -} - -function kube-up { - echo "Starting cluster using os distro: ${OS_DISTRIBUTION}" >&2 - - get-tokens - - detect-image - detect-minion-image - - detect-root-device - - find-release-tars - - ensure-temp-dir - - create-bootstrap-script - - upload-server-tars - - ensure-iam-profiles - - load-or-gen-kube-basicauth - load-or-gen-kube-bearertoken - - ssh-key-setup - - vpc-setup - - create-dhcp-option-set - - subnet-setup - - IGW_ID=$(get_igw_id $VPC_ID) - if [[ -z "$IGW_ID" ]]; then - echo "Creating Internet Gateway." - IGW_ID=$($AWS_CMD create-internet-gateway --query InternetGateway.InternetGatewayId) - $AWS_CMD attach-internet-gateway --internet-gateway-id $IGW_ID --vpc-id $VPC_ID > $LOG - fi - - echo "Using Internet Gateway $IGW_ID" - - echo "Associating route table." - ROUTE_TABLE_ID=$($AWS_CMD describe-route-tables \ - --filters Name=vpc-id,Values=${VPC_ID} \ - Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \ - --query RouteTables[].RouteTableId) - if [[ -z "${ROUTE_TABLE_ID}" ]]; then - echo "Creating route table" - ROUTE_TABLE_ID=$($AWS_CMD create-route-table \ - --vpc-id=${VPC_ID} \ - --query RouteTable.RouteTableId) - add-tag ${ROUTE_TABLE_ID} KubernetesCluster ${CLUSTER_ID} - fi - - echo "Associating route table ${ROUTE_TABLE_ID} to subnet ${SUBNET_ID}" - $AWS_CMD associate-route-table --route-table-id $ROUTE_TABLE_ID --subnet-id $SUBNET_ID > $LOG || true - echo "Adding route to route table ${ROUTE_TABLE_ID}" - $AWS_CMD create-route --route-table-id $ROUTE_TABLE_ID --destination-cidr-block 0.0.0.0/0 --gateway-id $IGW_ID > $LOG || true - - echo "Using Route Table $ROUTE_TABLE_ID" - - # Create security groups - MASTER_SG_ID=$(get_security_group_id "${MASTER_SG_NAME}") - if [[ -z "${MASTER_SG_ID}" ]]; then - echo "Creating master security group." - create-security-group "${MASTER_SG_NAME}" "Kubernetes security group applied to master nodes" - fi - NODE_SG_ID=$(get_security_group_id "${NODE_SG_NAME}") - if [[ -z "${NODE_SG_ID}" ]]; then - echo "Creating minion security group." - create-security-group "${NODE_SG_NAME}" "Kubernetes security group applied to minion nodes" - fi - - detect-security-groups - - # Masters can talk to master - authorize-security-group-ingress "${MASTER_SG_ID}" "--source-group ${MASTER_SG_ID} --protocol all" - - # Minions can talk to minions - authorize-security-group-ingress "${NODE_SG_ID}" "--source-group ${NODE_SG_ID} --protocol all" - - # Masters and minions can talk to each other - authorize-security-group-ingress "${MASTER_SG_ID}" "--source-group ${NODE_SG_ID} --protocol all" - authorize-security-group-ingress "${NODE_SG_ID}" "--source-group ${MASTER_SG_ID} --protocol all" - - # SSH is open to the world - authorize-security-group-ingress "${MASTER_SG_ID}" "--protocol tcp --port 22 --cidr ${SSH_CIDR}" - authorize-security-group-ingress "${NODE_SG_ID}" "--protocol tcp --port 22 --cidr ${SSH_CIDR}" - - # HTTPS to the master is allowed (for API access) - authorize-security-group-ingress "${MASTER_SG_ID}" "--protocol tcp --port 443 --cidr ${HTTP_API_CIDR}" - - # KUBE_USE_EXISTING_MASTER is used to add minions to an existing master - if [[ "${KUBE_USE_EXISTING_MASTER:-}" == "true" ]]; then - detect-master - parse-master-env - - # Start minions - start-minions - wait-minions - else - # Create the master - start-master - - # Build ~/.kube/config - build-config - - # Start minions - start-minions - wait-minions - - # Wait for the master to be ready - wait-master - fi - - # Check the cluster is OK - check-cluster -} - -# Builds the bootstrap script and saves it to a local temp file -# Sets BOOTSTRAP_SCRIPT to the path of the script -function create-bootstrap-script() { - ensure-temp-dir - - BOOTSTRAP_SCRIPT="${KUBE_TEMP}/bootstrap-script" - - ( - # Include the default functions from the GCE configure-vm script - sed '/^#+AWS_OVERRIDES_HERE/,$d' "${KUBE_ROOT}/cluster/gce/configure-vm.sh" - # Include the AWS override functions - cat "${KUBE_ROOT}/cluster/aws/templates/configure-vm-aws.sh" - cat "${KUBE_ROOT}/cluster/aws/templates/format-disks.sh" - # Include the GCE configure-vm directly-executed code - sed -e '1,/^#+AWS_OVERRIDES_HERE/d' "${KUBE_ROOT}/cluster/gce/configure-vm.sh" - ) > "${BOOTSTRAP_SCRIPT}" -} - -# Starts the master node -function start-master() { - # Ensure RUNTIME_CONFIG is populated - build-runtime-config - - # Get or create master persistent volume - ensure-master-pd - - # Get or create master elastic IP - ensure-master-ip - - # We have to make sure that the cert is valid for API_SERVERS - # i.e. we likely have to pass ELB name / elastic IP in future - create-certs "${KUBE_MASTER_IP}" "${MASTER_INTERNAL_IP}" - - # This key is no longer needed, and this enables us to get under the 16KB size limit - KUBECFG_CERT_BASE64="" - KUBECFG_KEY_BASE64="" - - write-master-env - - ( - # We pipe this to the ami as a startup script in the user-data field. Requires a compatible ami - echo "#! /bin/bash" - echo "mkdir -p /var/cache/kubernetes-install" - echo "cd /var/cache/kubernetes-install" - - echo "cat > kube_env.yaml << __EOF_MASTER_KUBE_ENV_YAML" - cat ${KUBE_TEMP}/master-kube-env.yaml - echo "AUTO_UPGRADE: 'true'" - # TODO: get rid of these exceptions / harmonize with common or GCE - echo "DOCKER_STORAGE: $(yaml-quote ${DOCKER_STORAGE:-})" - echo "API_SERVERS: $(yaml-quote ${MASTER_INTERNAL_IP:-})" - echo "__EOF_MASTER_KUBE_ENV_YAML" - echo "" - echo "wget -O bootstrap ${BOOTSTRAP_SCRIPT_URL}" - echo "chmod +x bootstrap" - echo "mkdir -p /etc/kubernetes" - echo "mv kube_env.yaml /etc/kubernetes" - echo "mv bootstrap /etc/kubernetes/" - echo "cat > /etc/rc.local << EOF_RC_LOCAL" - echo "#!/bin/sh -e" - # We want to be sure that we don't pass an argument to bootstrap - echo "/etc/kubernetes/bootstrap" - echo "exit 0" - echo "EOF_RC_LOCAL" - echo "/etc/kubernetes/bootstrap" - ) > "${KUBE_TEMP}/master-user-data" - - # Compress the data to fit under the 16KB limit (cloud-init accepts compressed data) - gzip "${KUBE_TEMP}/master-user-data" - - echo "Starting Master" - master_id=$($AWS_CMD run-instances \ - --image-id $AWS_IMAGE \ - --iam-instance-profile Name=$IAM_PROFILE_MASTER \ - --instance-type $MASTER_SIZE \ - --subnet-id $SUBNET_ID \ - --private-ip-address $MASTER_INTERNAL_IP \ - --key-name ${AWS_SSH_KEY_NAME} \ - --security-group-ids ${MASTER_SG_ID} \ - --associate-public-ip-address \ - --block-device-mappings "${MASTER_BLOCK_DEVICE_MAPPINGS}" \ - --user-data fileb://${KUBE_TEMP}/master-user-data.gz \ - --query Instances[].InstanceId) - add-tag $master_id Name $MASTER_NAME - add-tag $master_id Role $MASTER_TAG - add-tag $master_id KubernetesCluster ${CLUSTER_ID} - - echo "Waiting for master to be ready" - local attempt=0 - - while true; do - echo -n Attempt "$(($attempt+1))" to check for master node - local ip=$(get_instance_public_ip ${master_id}) - if [[ -z "${ip}" ]]; then - if (( attempt > 30 )); then - echo - echo -e "${color_red}master failed to start. Your cluster is unlikely" >&2 - echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2 - echo -e "cluster. (sorry!)${color_norm}" >&2 - exit 1 - fi - else - # We are not able to add an elastic ip, a route or volume to the instance until that instance is in "running" state. - wait-for-instance-state ${master_id} "running" - - KUBE_MASTER=${MASTER_NAME} - echo -e " ${color_green}[master running]${color_norm}" - - attach-ip-to-instance ${KUBE_MASTER_IP} ${master_id} - - # This is a race between instance start and volume attachment. There appears to be no way to start an AWS instance with a volume attached. - # To work around this, we wait for volume to be ready in setup-master-pd.sh - echo "Attaching persistent data volume (${MASTER_DISK_ID}) to master" - $AWS_CMD attach-volume --volume-id ${MASTER_DISK_ID} --device /dev/sdb --instance-id ${master_id} - - sleep 10 - $AWS_CMD create-route --route-table-id $ROUTE_TABLE_ID --destination-cidr-block ${MASTER_IP_RANGE} --instance-id $master_id > $LOG - - break - fi - echo -e " ${color_yellow}[master not working yet]${color_norm}" - attempt=$(($attempt+1)) - sleep 10 - done -} - -# Creates an ASG for the minion nodes -function start-minions() { - # Minions don't currently use runtime config, but call it anyway for sanity - build-runtime-config - - echo "Creating minion configuration" - - write-node-env - - ( - # We pipe this to the ami as a startup script in the user-data field. Requires a compatible ami - echo "#! /bin/bash" - echo "mkdir -p /var/cache/kubernetes-install" - echo "cd /var/cache/kubernetes-install" - echo "cat > kube_env.yaml << __EOF_KUBE_ENV_YAML" - cat ${KUBE_TEMP}/node-kube-env.yaml - echo "AUTO_UPGRADE: 'true'" - # TODO: get rid of these exceptions / harmonize with common or GCE - echo "DOCKER_STORAGE: $(yaml-quote ${DOCKER_STORAGE:-})" - echo "API_SERVERS: $(yaml-quote ${MASTER_INTERNAL_IP:-})" - echo "__EOF_KUBE_ENV_YAML" - echo "" - echo "wget -O bootstrap ${BOOTSTRAP_SCRIPT_URL}" - echo "chmod +x bootstrap" - echo "mkdir -p /etc/kubernetes" - echo "mv kube_env.yaml /etc/kubernetes" - echo "mv bootstrap /etc/kubernetes/" - echo "cat > /etc/rc.local << EOF_RC_LOCAL" - echo "#!/bin/sh -e" - # We want to be sure that we don't pass an argument to bootstrap - echo "/etc/kubernetes/bootstrap" - echo "exit 0" - echo "EOF_RC_LOCAL" - echo "/etc/kubernetes/bootstrap" - ) > "${KUBE_TEMP}/node-user-data" - - # Compress the data to fit under the 16KB limit (cloud-init accepts compressed data) - gzip "${KUBE_TEMP}/node-user-data" - - local public_ip_option - if [[ "${ENABLE_NODE_PUBLIC_IP}" == "true" ]]; then - public_ip_option="--associate-public-ip-address" - else - public_ip_option="--no-associate-public-ip-address" - fi - local spot_price_option - if [[ -n "${NODE_SPOT_PRICE:-}" ]]; then - spot_price_option="--spot-price ${NODE_SPOT_PRICE}" - else - spot_price_option="" - fi - ${AWS_ASG_CMD} create-launch-configuration \ - --launch-configuration-name ${ASG_NAME} \ - --image-id $KUBE_NODE_IMAGE \ - --iam-instance-profile ${IAM_PROFILE_NODE} \ - --instance-type $NODE_SIZE \ - --key-name ${AWS_SSH_KEY_NAME} \ - --security-groups ${NODE_SG_ID} \ - ${public_ip_option} \ - ${spot_price_option} \ - --block-device-mappings "${NODE_BLOCK_DEVICE_MAPPINGS}" \ - --user-data "fileb://${KUBE_TEMP}/node-user-data.gz" - - echo "Creating autoscaling group" - ${AWS_ASG_CMD} create-auto-scaling-group \ - --auto-scaling-group-name ${ASG_NAME} \ - --launch-configuration-name ${ASG_NAME} \ - --min-size ${NUM_NODES} \ - --max-size ${NUM_NODES} \ - --vpc-zone-identifier ${SUBNET_ID} \ - --tags ResourceId=${ASG_NAME},ResourceType=auto-scaling-group,Key=Name,Value=${NODE_INSTANCE_PREFIX} \ - ResourceId=${ASG_NAME},ResourceType=auto-scaling-group,Key=Role,Value=${NODE_TAG} \ - ResourceId=${ASG_NAME},ResourceType=auto-scaling-group,Key=KubernetesCluster,Value=${CLUSTER_ID} -} - -function wait-minions { - # Wait for the minions to be running - # TODO(justinsb): This is really not needed any more - local attempt=0 - local max_attempts=30 - # Spot instances are slower to launch - if [[ -n "${NODE_SPOT_PRICE:-}" ]]; then - max_attempts=90 - fi - while true; do - detect-node-names > $LOG - if [[ ${#NODE_IDS[@]} == ${NUM_NODES} ]]; then - echo -e " ${color_green}${#NODE_IDS[@]} minions started; ready${color_norm}" - break - fi - - if (( attempt > max_attempts )); then - echo - echo "Expected number of minions did not start in time" - echo - echo -e "${color_red}Expected number of minions failed to start. Your cluster is unlikely" >&2 - echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2 - echo -e "cluster. (sorry!)${color_norm}" >&2 - exit 1 - fi - - echo -e " ${color_yellow}${#NODE_IDS[@]} minions started; waiting${color_norm}" - attempt=$(($attempt+1)) - sleep 10 - done -} - -# Wait for the master to be started -function wait-master() { - detect-master > $LOG - - echo "Waiting for cluster initialization." - echo - echo " This will continually check to see if the API for kubernetes is reachable." - echo " This might loop forever if there was some uncaught error during start" - echo " up." - echo - - until $(curl --insecure --user ${KUBE_USER}:${KUBE_PASSWORD} --max-time 5 \ - --fail --output $LOG --silent https://${KUBE_MASTER_IP}/healthz); do - printf "." - sleep 2 - done - - echo "Kubernetes cluster created." -} - -# Creates the ~/.kube/config file, getting the information from the master -# The master must be running and set in KUBE_MASTER_IP -function build-config() { - export KUBE_CERT="${CERT_DIR}/pki/issued/kubecfg.crt" - export KUBE_KEY="${CERT_DIR}/pki/private/kubecfg.key" - export CA_CERT="${CERT_DIR}/pki/ca.crt" - export CONTEXT="${CONFIG_CONTEXT}" - ( - umask 077 - - # Update the user's kubeconfig to include credentials for this apiserver. - create-kubeconfig - - create-kubeconfig-for-federation - ) -} - -# Sanity check the cluster and print confirmation messages -function check-cluster() { - echo "Sanity checking cluster..." - - sleep 5 - - detect-nodes > $LOG - - # Don't bail on errors, we want to be able to print some info. - set +e - - # Basic sanity checking - # TODO(justinsb): This is really not needed any more - local rc # Capture return code without exiting because of errexit bash option - for (( i=0; i<${#KUBE_NODE_IP_ADDRESSES[@]}; i++)); do - # Make sure docker is installed and working. - local attempt=0 - while true; do - local minion_ip=${KUBE_NODE_IP_ADDRESSES[$i]} - echo -n "Attempt $(($attempt+1)) to check Docker on node @ ${minion_ip} ..." - local output=`check-minion ${minion_ip}` - echo $output - if [[ "${output}" != "working" ]]; then - if (( attempt > 20 )); then - echo - echo -e "${color_red}Your cluster is unlikely to work correctly." >&2 - echo "Please run ./cluster/kube-down.sh and re-create the" >&2 - echo -e "cluster. (sorry!)${color_norm}" >&2 - exit 1 - fi - else - break - fi - attempt=$(($attempt+1)) - sleep 30 - done - done - - # ensures KUBECONFIG is set - get-kubeconfig-basicauth - echo - echo -e "${color_green}Kubernetes cluster is running. The master is running at:" - echo - echo -e "${color_yellow} https://${KUBE_MASTER_IP}" - echo - echo -e "${color_green}The user name and password to use is located in ${KUBECONFIG}.${color_norm}" - echo -} - -function kube-down { - local vpc_id=$(get_vpc_id) - if [[ -n "${vpc_id}" ]]; then - local elb_ids=$(get_elbs_in_vpc ${vpc_id}) - if [[ -n "${elb_ids}" ]]; then - echo "Deleting ELBs in: ${vpc_id}" - for elb_id in ${elb_ids}; do - aws elb delete-load-balancer --load-balancer-name=${elb_id} >$LOG - done - - echo "Waiting for ELBs to be deleted" - while true; do - elb_ids=$(get_elbs_in_vpc ${vpc_id}) - if [[ -z "$elb_ids" ]]; then - echo "All ELBs deleted" - break - else - echo "ELBs not yet deleted: $elb_ids" - echo "Sleeping for 3 seconds..." - sleep 3 - fi - done - fi - - if [[ -z "${KUBE_MASTER_ID-}" ]]; then - KUBE_MASTER_ID=$(get_instanceid_from_name ${MASTER_NAME}) - fi - if [[ -n "${KUBE_MASTER_ID-}" ]]; then - delete-instance-alarms ${KUBE_MASTER_ID} - fi - - echo "Deleting instances in VPC: ${vpc_id}" - instance_ids=$($AWS_CMD describe-instances \ - --filters Name=vpc-id,Values=${vpc_id} \ - Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \ - --query Reservations[].Instances[].InstanceId) - - if [[ -n "${instance_ids}" ]]; then - asg_groups=$($AWS_CMD describe-instances \ - --query 'Reservations[].Instances[].Tags[?Key==`aws:autoscaling:groupName`].Value[]' \ - --instance-ids ${instance_ids}) - for asg_group in ${asg_groups}; do - if [[ -n $(${AWS_ASG_CMD} describe-auto-scaling-groups --auto-scaling-group-names ${asg_group} --query AutoScalingGroups[].AutoScalingGroupName) ]]; then - echo "Deleting auto-scaling group: ${asg_group}" - ${AWS_ASG_CMD} delete-auto-scaling-group --force-delete --auto-scaling-group-name ${asg_group} - fi - if [[ -n $(${AWS_ASG_CMD} describe-launch-configurations --launch-configuration-names ${asg_group} --query LaunchConfigurations[].LaunchConfigurationName) ]]; then - echo "Deleting auto-scaling launch configuration: ${asg_group}" - ${AWS_ASG_CMD} delete-launch-configuration --launch-configuration-name ${asg_group} - fi - done - - $AWS_CMD terminate-instances --instance-ids ${instance_ids} > $LOG - echo "Waiting for instances to be deleted" - for instance_id in ${instance_ids}; do - wait-for-instance-state ${instance_id} "terminated" - done - echo "All instances deleted" - fi - if [[ -n $(${AWS_ASG_CMD} describe-launch-configurations --launch-configuration-names ${ASG_NAME} --query LaunchConfigurations[].LaunchConfigurationName) ]]; then - echo "Warning: default auto-scaling launch configuration ${ASG_NAME} still exists, attempting to delete" - echo " (This may happen if kube-up leaves just the launch configuration but no auto-scaling group.)" - ${AWS_ASG_CMD} delete-launch-configuration --launch-configuration-name ${ASG_NAME} || true - fi - - find-master-pd - find-tagged-master-ip - - if [[ -n "${KUBE_MASTER_IP:-}" ]]; then - release-elastic-ip ${KUBE_MASTER_IP} - fi - - if [[ -n "${MASTER_DISK_ID:-}" ]]; then - echo "Deleting volume ${MASTER_DISK_ID}" - $AWS_CMD delete-volume --volume-id ${MASTER_DISK_ID} > $LOG - fi - - echo "Cleaning up resources in VPC: ${vpc_id}" - default_sg_id=$($AWS_CMD describe-security-groups \ - --filters Name=vpc-id,Values=${vpc_id} \ - Name=group-name,Values=default \ - --query SecurityGroups[].GroupId \ - | tr "\t" "\n") - sg_ids=$($AWS_CMD describe-security-groups \ - --filters Name=vpc-id,Values=${vpc_id} \ - Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \ - --query SecurityGroups[].GroupId \ - | tr "\t" "\n") - # First delete any inter-security group ingress rules - # (otherwise we get dependency violations) - for sg_id in ${sg_ids}; do - # EC2 doesn't let us delete the default security group - if [[ "${sg_id}" == "${default_sg_id}" ]]; then - continue - fi - - echo "Cleaning up security group: ${sg_id}" - other_sgids=$(${AWS_CMD} describe-security-groups --group-id "${sg_id}" --query SecurityGroups[].IpPermissions[].UserIdGroupPairs[].GroupId) - for other_sgid in ${other_sgids}; do - $AWS_CMD revoke-security-group-ingress --group-id "${sg_id}" --source-group "${other_sgid}" --protocol all > $LOG - done - done - - for sg_id in ${sg_ids}; do - # EC2 doesn't let us delete the default security group - if [[ "${sg_id}" == "${default_sg_id}" ]]; then - continue - fi - - delete_security_group ${sg_id} - done - - subnet_ids=$($AWS_CMD describe-subnets \ - --filters Name=vpc-id,Values=${vpc_id} \ - Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \ - --query Subnets[].SubnetId \ - | tr "\t" "\n") - for subnet_id in ${subnet_ids}; do - $AWS_CMD delete-subnet --subnet-id ${subnet_id} > $LOG - done - - igw_ids=$($AWS_CMD describe-internet-gateways \ - --filters Name=attachment.vpc-id,Values=${vpc_id} \ - --query InternetGateways[].InternetGatewayId \ - | tr "\t" "\n") - for igw_id in ${igw_ids}; do - $AWS_CMD detach-internet-gateway --internet-gateway-id $igw_id --vpc-id $vpc_id > $LOG - $AWS_CMD delete-internet-gateway --internet-gateway-id $igw_id > $LOG - done - - route_table_ids=$($AWS_CMD describe-route-tables \ - --filters Name=vpc-id,Values=$vpc_id \ - Name=route.destination-cidr-block,Values=0.0.0.0/0 \ - --query RouteTables[].RouteTableId \ - | tr "\t" "\n") - for route_table_id in ${route_table_ids}; do - $AWS_CMD delete-route --route-table-id $route_table_id --destination-cidr-block 0.0.0.0/0 > $LOG - done - route_table_ids=$($AWS_CMD describe-route-tables \ - --filters Name=vpc-id,Values=$vpc_id \ - Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \ - --query RouteTables[].RouteTableId \ - | tr "\t" "\n") - for route_table_id in ${route_table_ids}; do - $AWS_CMD delete-route-table --route-table-id $route_table_id > $LOG - done - - echo "Deleting VPC: ${vpc_id}" - $AWS_CMD delete-vpc --vpc-id $vpc_id > $LOG - else - echo "" >&2 - echo -e "${color_red}Cluster NOT deleted!${color_norm}" >&2 - echo "" >&2 - echo "No VPC was found with tag KubernetesCluster=${CLUSTER_ID}" >&2 - echo "" >&2 - echo "If you are trying to delete a cluster in a shared VPC," >&2 - echo "please consider using one of the methods in the kube-deploy repo." >&2 - echo "See: https://github.com/kubernetes/kube-deploy/blob/master/docs/delete_cluster.md" >&2 - echo "" >&2 - echo "Note: You may be seeing this message may be because the cluster was already deleted, or" >&2 - echo "has a name other than '${CLUSTER_ID}'." >&2 - fi - - if [[ -z "${DHCP_OPTION_SET_ID:-}" ]]; then - dhcp_option_ids=$($AWS_CMD describe-dhcp-options \ - --output text \ - --filters Name=tag:Name,Values=kubernetes-dhcp-option-set \ - Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \ - --query DhcpOptions[].DhcpOptionsId \ - | tr "\t" "\n") - for dhcp_option_id in ${dhcp_option_ids}; do - echo "Deleting DHCP option set: ${dhcp_option_id}" - $AWS_CMD delete-dhcp-options --dhcp-options-id $dhcp_option_id > $LOG - done - else - echo "Skipping deletion of pre-existing DHCP option set: ${DHCP_OPTION_SET_ID}" - fi - - echo "Deleting IAM Instance profiles" - delete-iam-profiles -} - -# Update a kubernetes cluster with latest source -function kube-push { - detect-master - - # Make sure we have the tar files staged on Google Storage - find-release-tars - create-bootstrap-script - upload-server-tars - - ( - echo "#! /bin/bash" - echo "mkdir -p /var/cache/kubernetes-install" - echo "cd /var/cache/kubernetes-install" - echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'" - echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'" - grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/common.sh" - grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/download-release.sh" - echo "echo Executing configuration" - echo "sudo salt '*' mine.update" - echo "sudo salt --force-color '*' state.highstate" - ) | ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ${SSH_USER}@${KUBE_MASTER_IP} sudo bash - - get-kubeconfig-basicauth - - echo - echo "Kubernetes cluster is running. The master is running at:" - echo - echo " https://${KUBE_MASTER_IP}" - echo - -} - -# ----------------------------------------------------------------------------- -# Cluster specific test helpers used from hack/e2e.go - -# Execute prior to running tests to build a release if required for env. -# -# Assumed Vars: -# KUBE_ROOT -function test-build-release { - # Make a release - "${KUBE_ROOT}/build/release.sh" -} - -# Execute prior to running tests to initialize required structure. This is -# called from hack/e2e.go only when running -up. -# -# Assumed vars: -# Variables from config.sh -function test-setup { - "${KUBE_ROOT}/cluster/kube-up.sh" - - VPC_ID=$(get_vpc_id) - detect-security-groups - - # Open up port 80 & 8080 so common containers on minions can be reached - # TODO(roberthbailey): Remove this once we are no longer relying on hostPorts. - authorize-security-group-ingress "${NODE_SG_ID}" "--protocol tcp --port 80 --cidr 0.0.0.0/0" - authorize-security-group-ingress "${NODE_SG_ID}" "--protocol tcp --port 8080 --cidr 0.0.0.0/0" - - # Open up the NodePort range - # TODO(justinsb): Move to main setup, if we decide whether we want to do this by default. - authorize-security-group-ingress "${NODE_SG_ID}" "--protocol all --port 30000-32767 --cidr 0.0.0.0/0" - - echo "test-setup complete" -} - -# Execute after running tests to perform any required clean-up. This is called -# from hack/e2e.go -function test-teardown { - # (ingress rules will be deleted along with the security group) - echo "Shutting down test cluster." - "${KUBE_ROOT}/cluster/kube-down.sh" -} - - -# Gets the hostname (or IP) that we should SSH to for the given nodename -# For the master, we use the nodename, for the nodes we use their instanceids -function get_ssh_hostname { - local node="$1" - - if [[ "${node}" == "${MASTER_NAME}" ]]; then - node=$(get_instanceid_from_name ${MASTER_NAME}) - if [[ -z "${node-}" ]]; then - echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'" 1>&2 - exit 1 - fi - fi - - local ip=$(get_instance_public_ip ${node}) - if [[ -z "$ip" ]]; then - echo "Could not detect IP for ${node}." 1>&2 - exit 1 - fi - echo ${ip} -} - -# SSH to a node by name ($1) and run a command ($2). -function ssh-to-node { - local node="$1" - local cmd="$2" - - local ip=$(get_ssh_hostname ${node}) - - for try in {1..5}; do - if ssh -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ${SSH_USER}@${ip} "echo test > /dev/null"; then - break - fi - sleep 5 - done - ssh -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ${SSH_USER}@${ip} "${cmd}" -} - -# Perform preparations required to run e2e tests -function prepare-e2e() { - # (AWS runs detect-project, I don't think we need to anything) - # Note: we can't print anything here, or else the test tools will break with the extra output - return -} - -function get-tokens() { - KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) - KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) -} +echo -e "${color_red}WARNING${color_norm}: The bash deployment for AWS is obsolete. The" >&2 +echo -e "v1.5.x releases are the last to support cluster/kube-up.sh with AWS." >&2 +echo "For a list of viable alternatives, see:" >&2 +echo >&2 +echo " http://kubernetes.io/docs/getting-started-guides/aws/" >&2 +echo >&2 +exit 1 diff --git a/cluster/aws/wily/util.sh b/cluster/aws/wily/util.sh deleted file mode 100644 index 377192cd317..00000000000 --- a/cluster/aws/wily/util.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -source "${KUBE_ROOT}/cluster/aws/common/common.sh" - -SSH_USER=ubuntu - -# Detects the AMI to use for ubuntu (considering the region) -# -# Vars set: -# AWS_IMAGE -function detect-wily-image () { - # This is the ubuntu 15.10 image for , amd64, hvm:ebs-ssd - # See here: http://cloud-images.ubuntu.com/locator/ec2/ for other images - # This will need to be updated from time to time as amis are deprecated - if [[ -z "${AWS_IMAGE-}" ]]; then - case "${AWS_REGION}" in - ap-northeast-1) - AWS_IMAGE=ami-3355505d - ;; - - ap-northeast-2) - AWS_IMAGE=ami-e427e98a - ;; - - ap-southeast-1) - AWS_IMAGE=ami-60975903 - ;; - - eu-central-1) - AWS_IMAGE=ami-6da2ba01 - ;; - - eu-west-1) - AWS_IMAGE=ami-36a71645 - ;; - - sa-east-1) - AWS_IMAGE=ami-fd36b691 - ;; - - us-east-1) - AWS_IMAGE=ami-6610390c - ;; - - us-west-1) - AWS_IMAGE=ami-6e64120e - ;; - - cn-north-1) - AWS_IMAGE=ami-17a76f7a - ;; - - us-gov-west-1) - AWS_IMAGE=ami-b0bad893 - ;; - - ap-southeast-2) - AWS_IMAGE=ami-3895b15b - ;; - - us-west-2) - AWS_IMAGE=ami-d95abcb9 - ;; - - *) - echo "Please specify AWS_IMAGE directly (region ${AWS_REGION} not recognized)" - exit 1 - esac - fi -} -