From 6ea71915929ac394514f9807614faf1c40da8fb4 Mon Sep 17 00:00:00 2001 From: Vladimir Shcherbakov Date: Sun, 16 Nov 2014 23:56:15 -0800 Subject: [PATCH 1/9] deploy update --- cluster/azure/config-default.sh | 37 ++ cluster/azure/templates/common.sh | 59 ++ .../templates/create-dynamic-salt-files.sh | 27 + cluster/azure/templates/download-release.sh | 35 ++ cluster/azure/templates/salt-master.sh | 69 +++ cluster/azure/templates/salt-minion.sh | 66 ++ cluster/azure/util.sh | 566 ++++++++++++++++++ 7 files changed, 859 insertions(+) create mode 100644 cluster/azure/config-default.sh create mode 100644 cluster/azure/templates/common.sh create mode 100644 cluster/azure/templates/create-dynamic-salt-files.sh create mode 100644 cluster/azure/templates/download-release.sh create mode 100644 cluster/azure/templates/salt-master.sh create mode 100644 cluster/azure/templates/salt-minion.sh create mode 100644 cluster/azure/util.sh diff --git a/cluster/azure/config-default.sh b/cluster/azure/config-default.sh new file mode 100644 index 00000000000..c1ad60daf7d --- /dev/null +++ b/cluster/azure/config-default.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Copyright 2014 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +INSTANCE_PREFIX=kubenertes +AZ_LOCATION='West US' +TAG=testing +AZ_CS_PREFIX=kube +AZ_VNET=shchTest +AZ_SUBNET=Subnet-1 +AZ_IMAGE=b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140618.1-en-us-30GB +AZ_CS="" # is set in azure/util.sh verify-prereqs + +AZ_SSH_KEY=$HOME/.ssh/azure_rsa +AZ_SSH_CERT=$HOME/.ssh/azure.pem + +NUM_MINIONS=2 + +MASTER_NAME="${INSTANCE_PREFIX}-master" +MASTER_TAG="${INSTANCE_PREFIX}-master" +MINION_TAG="${INSTANCE_PREFIX}-minion" +MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) +MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) +MINION_SCOPES="" + diff --git a/cluster/azure/templates/common.sh b/cluster/azure/templates/common.sh new file mode 100644 index 00000000000..b5d3a025803 --- /dev/null +++ b/cluster/azure/templates/common.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +# Copyright 2014 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Retry a download until we get it. +# +# $1 is the URL to download +download-or-bust() { + local -r url="$1" + local -r file="${url##*/}" + rm -f "$file" + until [[ -e "${file}" ]]; do + curl --ipv4 -Lo "$file" --connect-timeout 20 --retry 6 --retry-delay 10 "$url" + md5sum "$file" + done +} + +# Install salt from GCS. See README.md for instructions on how to update these +# debs. +# +# $1 If set to --master, also install the master +install-salt() { + apt-get update + + mkdir -p /var/cache/salt-install + cd /var/cache/salt-install + + TARS=( + libzmq3_3.2.3+dfsg-1~bpo70~dst+1_amd64.deb + python-zmq_13.1.0-1~bpo70~dst+1_amd64.deb + salt-common_2014.1.13+ds-1~bpo70+1_all.deb + salt-minion_2014.1.13+ds-1~bpo70+1_all.deb + ) + if [[ ${1-} == '--master' ]]; then + TARS+=(salt-master_2014.1.13+ds-1~bpo70+1_all.deb) + fi + URL_BASE="https://storage.googleapis.com/kubernetes-release/salt" + + for tar in "${TARS[@]}"; do + download-or-bust "${URL_BASE}/${tar}" + dpkg -i "${tar}" + done + +# This will install any of the unmet dependencies from above. + apt-get install -f -y + +} diff --git a/cluster/azure/templates/create-dynamic-salt-files.sh b/cluster/azure/templates/create-dynamic-salt-files.sh new file mode 100644 index 00000000000..9255fbf0c24 --- /dev/null +++ b/cluster/azure/templates/create-dynamic-salt-files.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +# Copyright 2014 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Create the overlay files for the salt tree. We create these in a separate +# place so that we can blow away the rest of the salt configs on a kube-push and +# re-apply these. + +mkdir -p /srv/salt-overlay/pillar +cat </srv/salt-overlay/pillar/cluster-params.sls +node_instance_prefix: $NODE_INSTANCE_PREFIX +EOF + +mkdir -p /srv/salt-overlay/salt/nginx +echo $MASTER_HTPASSWD > /srv/salt-overlay/salt/nginx/htpasswd diff --git a/cluster/azure/templates/download-release.sh b/cluster/azure/templates/download-release.sh new file mode 100644 index 00000000000..5fadefdb26c --- /dev/null +++ b/cluster/azure/templates/download-release.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# Copyright 2014 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Download and install release + +# This script assumes that the environment variable MASTER_RELEASE_TAR contains +# the release tar to download and unpack. It is meant to be pushed to the +# master and run. + + +echo "Downloading binary release tar ($SERVER_BINARY_TAR_URL)" +download-or-bust "$SERVER_BINARY_TAR_URL" + +echo "Downloading binary release tar ($SALT_TAR_URL)" +download-or-bust "$SALT_TAR_URL" + +echo "Unpacking Salt tree" +rm -rf kubernetes +tar xzf "${SALT_TAR_URL##*/}" + +echo "Running release install script" +sudo kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR_URL##*/}" diff --git a/cluster/azure/templates/salt-master.sh b/cluster/azure/templates/salt-master.sh new file mode 100644 index 00000000000..3f80592837e --- /dev/null +++ b/cluster/azure/templates/salt-master.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +# Copyright 2014 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Prepopulate the name of the Master +mkdir -p /etc/salt/minion.d +echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf + +cat </etc/salt/minion.d/grains.conf +grains: + roles: + - kubernetes-master + cloud: azure +EOF + +# Auto accept all keys from minions that try to join +mkdir -p /etc/salt/master.d +cat </etc/salt/master.d/auto-accept.conf +auto_accept: True +EOF + +cat </etc/salt/master.d/reactor.conf +# React to new minions starting by running highstate on them. +reactor: + - 'salt/minion/*/start': + - /srv/reactor/highstate-new.sls +EOF + +mkdir -p /srv/salt/nginx +echo $MASTER_HTPASSWD > /srv/salt/nginx/htpasswd + +mkdir -p /etc/openvpn +umask=$(umask) +umask 0066 +echo "$CA_CRT" > /etc/openvpn/ca.crt +echo "$SERVER_CRT" > /etc/openvpn/server.crt +echo "$SERVER_KEY" > /etc/openvpn/server.key +umask $umask + +cat </etc/salt/minion.d/log-level-debug.conf +log_level: debug +log_level_logfile: debug +EOF + +cat </etc/salt/master.d/log-level-debug.d +log_level: debug +log_level_logfile: debug +EOF + +install-salt --master + +# Wait a few minutes and trigger another Salt run to better recover from +# any transient errors. +echo "Sleeping 180" +sleep 180 +salt-call state.highstate || true + diff --git a/cluster/azure/templates/salt-minion.sh b/cluster/azure/templates/salt-minion.sh new file mode 100644 index 00000000000..6e8c5a4d36e --- /dev/null +++ b/cluster/azure/templates/salt-minion.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +# Copyright 2014 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +mkdir -p /etc/openvpn +umask=$(umask) +umask 0066 +echo "$CA_CRT" > /etc/openvpn/ca.crt +echo "$CLIENT_CRT" > /etc/openvpn/client.crt +echo "$CLIENT_KEY" > /etc/openvpn/client.key +umask $umask + +# Prepopulate the name of the Master +mkdir -p /etc/salt/minion.d +echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf + +cat </etc/salt/minion.d/log-level-debug.conf +log_level: debug +log_level_logfile: debug +EOF + +# Our minions will have a pool role to distinguish them from the master. +#cat </etc/salt/minion.d/grains.conf +#grains: +# roles: +# - kubernetes-pool +# cbr-cidr: $MINION_IP_RANGE +# cloud: gce +#EOF + +hostnamef=$(hostname -f) +sudo apt-get install ipcalc +netmask=$(ipcalc $MINION_IP_RANGE | grep Netmask | awk '{ print $2 }') +network=$(ipcalc $MINION_IP_RANGE | grep Address | awk '{ print $2 }') +cbrstring="$network $netmask" + +# Our minions will have a pool role to distinguish them from the master. +cat </etc/salt/minion.d/grains.conf +grains: + roles: + - kubernetes-pool + cbr-cidr: $MINION_IP_RANGE + cloud: azure + hostnamef: $hostnamef + cbr-string: $cbrstring +EOF + +install-salt + +# Wait a few minutes and trigger another Salt run to better recover from +# any transient errors. +echo "Sleeping 180" +sleep 180 +salt-call state.highstate || true diff --git a/cluster/azure/util.sh b/cluster/azure/util.sh new file mode 100644 index 00000000000..4e7d4a91268 --- /dev/null +++ b/cluster/azure/util.sh @@ -0,0 +1,566 @@ +#!/bin/bash +# Copyright 2014 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A library of helper functions and constant for the local config. + +# Use the config file specified in $KUBE_CONFIG_FILE, or default to +# config-default.sh. + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. +source "${KUBE_ROOT}/cluster/azure/${KUBE_CONFIG_FILE-"config-default.sh"}" + +function json_val () { + python -c 'import json,sys;obj=json.load(sys.stdin);print obj'$1''; +} + +# Verify prereqs +function verify-prereqs { + if [ -z "$(which azure)" ]; then + echo "Couldn't find azure in PATH" + echo " please install with 'npm install azure-cli'" + exit 1 + fi + + if [ -z "$(azure account list | grep true)" ]; then + echo "Default azure account not set" + echo " please set with 'azure account set'" + exit 1 + fi + + account=$(azure account list | grep true | awk '{ print $2 }') + if which md5 > /dev/null 2>&1; then + AZ_HSH=$(md5 -q -s "$AZ_SUBNET@$account") + else + AZ_HSH=$(echo -n "$AZ_SUBNET@$account" | md5sum) + fi + + AZ_HSH=${AZ_HSH:0:7} + AZ_STG=kube$AZ_HSH + echo "==> AZ_STG: $AZ_STG" + + AZ_CS="$AZ_CS_PREFIX-$AZ_HSH" + echo "==> AZ_CS: $AZ_CS" + + CONTAINER=kube-$TAG + echo "==> CONTAINER: $CONTAINER" +} + +# Create a temp dir that'll be deleted at the end of this bash session. +# +# Vars set: +# KUBE_TEMP +function ensure-temp-dir { + if [[ -z ${KUBE_TEMP-} ]]; then + KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX) + trap 'rm -rf "${KUBE_TEMP}"' EXIT + fi +} + +# Verify and find the various tar files that we are going to use on the server. +# +# Vars set: +# SERVER_BINARY_TAR +# SALT_TAR +function find-release-tars { + SERVER_BINARY_TAR="${KUBE_ROOT}/server/kubernetes-server-linux-amd64.tar.gz" + if [[ ! -f "$SERVER_BINARY_TAR" ]]; then + SERVER_BINARY_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz" + fi + if [[ ! -f "$SERVER_BINARY_TAR" ]]; then + echo "!!! Cannot find kubernetes-server-linux-amd64.tar.gz" + exit 1 + fi + + SALT_TAR="${KUBE_ROOT}/server/kubernetes-salt.tar.gz" + if [[ ! -f "$SALT_TAR" ]]; then + SALT_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-salt.tar.gz" + fi + if [[ ! -f "$SALT_TAR" ]]; then + echo "!!! Cannot find kubernetes-salt.tar.gz" + exit 1 + fi +} + + +# Take the local tar files and upload them to Azure Storage. They will then be +# downloaded by the master as part of the start up script for the master. +# +# Assumed vars: +# SERVER_BINARY_TAR +# SALT_TAR +# Vars set: +# SERVER_BINARY_TAR_URL +# SALT_TAR_URL +function upload-server-tars() { + SERVER_BINARY_TAR_URL= + SALT_TAR_URL= + + echo "==> SERVER_BINARY_TAR: $SERVER_BINARY_TAR" + echo "==> SALT_TAR: $SALT_TAR" + + echo "+++ Staging server tars to Azure Storage: $AZ_STG" + local server_binary_url="${SERVER_BINARY_TAR##*/}" + local salt_url="${SALT_TAR##*/}" + + SERVER_BINARY_TAR_URL="https://${AZ_STG}.blob.core.windows.net/$CONTAINER/$server_binary_url" + SALT_TAR_URL="https://${AZ_STG}.blob.core.windows.net/$CONTAINER/$salt_url" + + echo "==> SERVER_BINARY_TAR_URL: $SERVER_BINARY_TAR_URL" + echo "==> SALT_TAR_URL: $SALT_TAR_URL" + + + echo "--> Checking storage exsists..." + if [ -z "$(azure storage account show $AZ_STG 2>/dev/null | \ + grep data)" ]; then + echo "--> Creating storage..." + azure storage account create -l "$AZ_LOCATION" $AZ_STG + fi + + echo "--> Getting storage key..." + stg_key=$(azure storage account keys list $AZ_STG --json | \ + json_val '["primaryKey"]') + + echo "--> Checking storage container exsists..." + if [ -z "$(azure storage container show -a $AZ_STG -k "$stg_key" \ + $CONTAINER 2>/dev/null | grep data)" ]; then + echo "--> Creating storage container..." + azure storage container create \ + -a $AZ_STG \ + -k "$stg_key" \ + -p Blob \ + $CONTAINER + fi + + echo "--> Checking server binary exists in the container..." + if [ -n "$(azure storage blob show -a $AZ_STG -k "$stg_key" \ + $CONTAINER $server_binary_url 2>/dev/null | grep data)" ]; then + echo "--> Deleting server binary in the container..." + azure storage blob delete \ + -a $AZ_STG \ + -k "$stg_key" \ + $CONTAINER \ + $server_binary_url + fi + + echo "--> Uploading server binary to the container..." + azure storage blob upload \ + -a $AZ_STG \ + -k "$stg_key" \ + $SERVER_BINARY_TAR \ + $CONTAINER \ + $server_binary_url + + echo "--> Checking salt data exists in the container..." + if [ -n "$(azure storage blob show -a $AZ_STG -k "$stg_key" \ + $CONTAINER $salt_url 2>/dev/null | grep data)" ]; then + echo "--> Deleting salt data in the container..." + azure storage blob delete \ + -a $AZ_STG \ + -k "$stg_key" \ + $CONTAINER \ + $salt_url + fi + + echo "--> Uploading salt data to the container..." + azure storage blob upload \ + -a $AZ_STG \ + -k "$stg_key" \ + $SALT_TAR \ + $CONTAINER \ + $salt_url +} + +# Detect the information about the minions +# +# Assumed vars: +# MINION_NAMES +# ZONE +# Vars set: +# +function detect-minions () { + if [ -z "$AZ_CS" ]; then + verify-prereqs + fi + ssh_ports=($(eval echo "2200{1..$NUM_MINIONS}")) + for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + MINION_NAMES[$i]=$(ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net hostname -f) + done + +} + +# Detect the IP for the master +# +# Assumed vars: +# MASTER_NAME +# ZONE +# Vars set: +# KUBE_MASTER +# KUBE_MASTER_IP +function detect-master () { + if [ -z "$AZ_CS" ]; then + verify-prereqs + fi + + KUBE_MASTER=${MASTER_NAME} + KUBE_MASTER_IP="${AZ_CS}.cloudapp.net" + echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)" + +} + +# Ensure that we have a password created for validating to the master. Will +# read from $HOME/.kubernetres_auth if available. +# +# Vars set: +# KUBE_USER +# KUBE_PASSWORD +function get-password { + local file="$HOME/.kubernetes_auth" + if [[ -r "$file" ]]; then + KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]') + KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]') + return + fi + KUBE_USER=admin + KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') + + # Remove this code, since in all use cases I can see, we are overwriting this + # at cluster creation time. + cat << EOF > "$file" +{ + "User": "$KUBE_USER", + "Password": "$KUBE_PASSWORD" +} +EOF + chmod 0600 "$file" +} + +# Generate authentication token for admin user. Will +# read from $HOME/.kubernetes_auth if available. +# +# Vars set: +# KUBE_ADMIN_TOKEN +function get-admin-token { + local file="$HOME/.kubernetes_auth" + if [[ -r "$file" ]]; then + KUBE_ADMIN_TOKEN=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["BearerToken"]') + return + fi + KUBE_ADMIN_TOKEN=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(32))') +} + +# Instantiate a kubernetes cluster +# +# Assumed vars +# KUBE_ROOT +# +function kube-up { + # Make sure we have the tar files staged on Azure Storage + find-release-tars + upload-server-tars + + ensure-temp-dir + + get-password + python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \ + -b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD" + local htpasswd + htpasswd=$(cat "${KUBE_TEMP}/htpasswd") + + + # Generate openvpn certs + echo "--> Generating openvpn certs" + echo 01 > ${KUBE_TEMP}/ca.srl + openssl genrsa -out ${KUBE_TEMP}/ca.key + openssl req -new -x509 -days 1095 \ + -key ${KUBE_TEMP}/ca.key \ + -out ${KUBE_TEMP}/ca.crt \ + -subj "/CN=openvpn-ca" + openssl genrsa -out ${KUBE_TEMP}/server.key + openssl req -new \ + -key ${KUBE_TEMP}/server.key \ + -out ${KUBE_TEMP}/server.csr \ + -subj "/CN=server" + openssl x509 -req -days 1095 \ + -in ${KUBE_TEMP}/server.csr \ + -CA ${KUBE_TEMP}/ca.crt \ + -CAkey ${KUBE_TEMP}/ca.key \ + -CAserial ${KUBE_TEMP}/ca.srl \ + -out ${KUBE_TEMP}/server.crt + for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + openssl genrsa -out ${KUBE_TEMP}/${MINION_NAMES[$i]}.key + openssl req -new \ + -key ${KUBE_TEMP}/${MINION_NAMES[$i]}.key \ + -out ${KUBE_TEMP}/${MINION_NAMES[$i]}.csr \ + -subj "/CN=${MINION_NAMES[$i]}" + openssl x509 -req -days 1095 \ + -in ${KUBE_TEMP}/${MINION_NAMES[$i]}.csr \ + -CA ${KUBE_TEMP}/ca.crt \ + -CAkey ${KUBE_TEMP}/ca.key \ + -CAserial ${KUBE_TEMP}/ca.srl \ + -out ${KUBE_TEMP}/${MINION_NAMES[$i]}.crt + done + + # Build up start up script for master + echo "--> Building up start up script for master" + ( + echo "#!/bin/bash" + echo "CA_CRT=\"$(cat ${KUBE_TEMP}/ca.crt)\"" + echo "SERVER_CRT=\"$(cat ${KUBE_TEMP}/server.crt)\"" + echo "SERVER_KEY=\"$(cat ${KUBE_TEMP}/server.key)\"" + echo "mkdir -p /var/cache/kubernetes-install" + echo "cd /var/cache/kubernetes-install" + echo "readonly MASTER_NAME='${MASTER_NAME}'" + echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-minion'" + echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'" + echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'" + echo "readonly MASTER_HTPASSWD='${htpasswd}'" + grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh" + grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/create-dynamic-salt-files.sh" + grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/download-release.sh" + grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/salt-master.sh" + ) > "${KUBE_TEMP}/master-start.sh" + + cat "${KUBE_TEMP}/master-start.sh" > ~/kubernetes/start-files/master-start.sh + + + + if [ ! -f $AZ_SSH_KEY ]; then + ssh-keygen -f $AZ_SSH_KEY -N '' + fi + + if [ ! -f $AZ_SSH_CERT ]; then + openssl req -new -x509 -days 1095 -key $AZ_SSH_KEY -out $AZ_SSH_CERT \ + -subj "/CN=azure-ssh-key" + fi + + if [ -z "$(azure network vnet show $AZ_VNET 2>/dev/null | grep data)" ]; then + #azure network vnet create with $AZ_SUBNET + #FIXME not working + echo error create vnet $AZ_VNET with subnet $AZ_SUBNET + exit 1 + fi + + echo "--> Starting VM" + azure vm create \ + -w $AZ_VNET \ + -n $MASTER_NAME \ + -l "$AZ_LOCATION" \ + -t $AZ_SSH_CERT \ + -e 22000 -P \ + -d ${KUBE_TEMP}/master-start.sh \ + -b $AZ_SUBNET \ + $AZ_CS $AZ_IMAGE $USER + + ssh_ports=($(eval echo "2200{1..$NUM_MINIONS}")) + + + #Build up start up script for minions + echo "--> Building up start up script for minions" + for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + ( + echo "#!/bin/bash" + echo "MASTER_NAME='${MASTER_NAME}'" + echo "CA_CRT=\"$(cat ${KUBE_TEMP}/ca.crt)\"" + echo "CLIENT_CRT=\"$(cat ${KUBE_TEMP}/${MINION_NAMES[$i]}.crt)\"" + echo "CLIENT_KEY=\"$(cat ${KUBE_TEMP}/${MINION_NAMES[$i]}.key)\"" + echo "MINION_IP_RANGE='${MINION_IP_RANGES[$i]}'" + grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh" + grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/salt-minion.sh" + ) > "${KUBE_TEMP}/minion-start-${i}.sh" + + cat "${KUBE_TEMP}/minion-start-${i}.sh" > ~/kubernetes/start-files/minion-start-${i}.sh + + echo "--> Starting VM" + azure vm create \ + -c -w $AZ_VNET \ + -n ${MINION_NAMES[$i]} \ + -l "$AZ_LOCATION" \ + -t $AZ_SSH_CERT \ + -e ${ssh_ports[$i]} -P \ + -d ${KUBE_TEMP}/minion-start-${i}.sh \ + -b $AZ_SUBNET \ + $AZ_CS $AZ_IMAGE $USER + done + + echo "--> Createing endpoint" + azure vm endpoint create $MASTER_NAME 443 + + local fail=0 + local job + for job in $(jobs -p); do + wait "${job}" || fail=$((fail + 1)) + done + if (( $fail != 0 )); then + echo "${fail} commands failed. Exiting." >&2 + exit 2 + fi + + + detect-master > /dev/null + + echo "==> KUBE_USER:KUBE_PASSWORD: ${KUBE_USER}:${KUBE_PASSWORD}" + echo "==> KUBE_MASTER_IP: ${KUBE_MASTER_IP}" + + echo "Waiting for cluster initialization." + echo + echo " This will continually check to see if the API for kubernetes is reachable." + echo " This might loop forever if there was some uncaught error during start" + echo " up." + echo + + + until curl --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" --max-time 5 \ + --fail --output /dev/null --silent "https://${KUBE_MASTER_IP}/api/v1beta1/pods"; do + printf "." + sleep 2 + done + + printf "\n" + echo "Kubernetes cluster created." + echo "Sanity checking cluster..." + + sleep 5 + + # Basic sanity checking + for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + # Make sure docker is installed + echo "--> Making sure docker is installed on ${MINION_NAMES[$i]}." + ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net which docker > /dev/null || { + echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely" >&2 + echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2 + echo "cluster. (sorry!)" >&2 + exit 1 + } + done + + echo + echo "Kubernetes cluster is running. The master is running at:" + echo + echo " https://${KUBE_MASTER_IP}" + echo + echo "The user name and password to use is located in ~/.kubernetes_auth." + echo + + echo "--> nginx auth" + + local kube_cert=".kubecfg.crt" + local kube_key=".kubecfg.key" + local ca_cert=".kubernetes.ca.crt" + + # TODO: generate ADMIN (and KUBELET) tokens and put those in the master's + # config file. Distribute the same way the htpasswd is done. + (umask 077 + ssh -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null + ssh -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null + ssh -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null + + cat << EOF > ~/.kubernetes_auth +{ + "User": "$KUBE_USER", + "Password": "$KUBE_PASSWORD", + "CAFile": "$HOME/$ca_cert", + "CertFile": "$HOME/$kube_cert", + "KeyFile": "$HOME/$kube_key" +} +EOF + + chmod 0600 ~/.kubernetes_auth "${HOME}/${kube_cert}" \ + "${HOME}/${kube_key}" "${HOME}/${ca_cert}" + ) +} + +# Delete a kubernetes cluster +function kube-down { + echo "Bringing down cluster" + + set +e + azure vm delete $MASTER_NAME -b -q + for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + azure vm delete ${MINION_NAMES[$i]} -b -q + done + + wait + +} + +# Update a kubernetes cluster with latest source +#function kube-push { +# detect-project +# detect-master + + # Make sure we have the tar files staged on Azure Storage +# find-release-tars +# upload-server-tars + +# ( +# echo "#! /bin/bash" +# echo "mkdir -p /var/cache/kubernetes-install" +# echo "cd /var/cache/kubernetes-install" +# echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'" +# echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'" +# grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh" +# grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/download-release.sh" +# echo "echo Executing configuration" +# echo "sudo salt '*' mine.update" +# echo "sudo salt --force-color '*' state.highstate" +# ) | gcutil ssh --project "$PROJECT" --zone "$ZONE" "$KUBE_MASTER" sudo bash + +# get-password + +# echo +# echo "Kubernetes cluster is running. The master is running at:" +# echo +# echo " https://${KUBE_MASTER_IP}" +# echo +# echo "The user name and password to use is located in ~/.kubernetes_auth." +# echo + +#} + +# ----------------------------------------------------------------------------- +# Cluster specific test helpers used from hack/e2e-test.sh + +# Execute prior to running tests to build a release if required for env. +# +# Assumed Vars: +# KUBE_ROOT +function test-build-release { + # Make a release + "${KUBE_ROOT}/build/release.sh" +} + +# SSH to a node by name ($1) and run a command ($2). +function ssh-to-node { + local node="$1" + local cmd="$2" + ssh --ssh_arg "-o LogLevel=quiet" "${node}" "${cmd}" +} + +# Restart the kube-proxy on a node ($1) +function restart-kube-proxy { + ssh-to-node "$1" "sudo /etc/init.d/kube-proxy restart" +} + +# Setup monitoring using heapster and InfluxDB +function setup-monitoring { + echo "not implemented" >/dev/null +} + +function teardown-monitoring { + echo "not implemented" >/dev/null +} + + + From 1ee5ab5e5e83aceb9ca0ccfa1c1ad83d1f9cea52 Mon Sep 17 00:00:00 2001 From: Jeff Mendoza Date: Tue, 18 Nov 2014 14:10:18 -0800 Subject: [PATCH 2/9] Updates and formatting to azure scripts. --- cluster/azure/config-default.sh | 11 +- .../templates/create-dynamic-salt-files.sh | 1 + cluster/azure/util.sh | 718 +++++++++--------- 3 files changed, 365 insertions(+), 365 deletions(-) diff --git a/cluster/azure/config-default.sh b/cluster/azure/config-default.sh index c1ad60daf7d..77838953a77 100644 --- a/cluster/azure/config-default.sh +++ b/cluster/azure/config-default.sh @@ -14,19 +14,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -INSTANCE_PREFIX=kubenertes +INSTANCE_PREFIX=kubernetes AZ_LOCATION='West US' TAG=testing AZ_CS_PREFIX=kube -AZ_VNET=shchTest +AZ_VNET=MyVnet AZ_SUBNET=Subnet-1 -AZ_IMAGE=b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140618.1-en-us-30GB -AZ_CS="" # is set in azure/util.sh verify-prereqs +AZ_IMAGE=b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04_1-LTS-amd64-server-20140927-en-us-30GB +AZ_CS="" # is set in azure/util.sh verify-prereqs AZ_SSH_KEY=$HOME/.ssh/azure_rsa AZ_SSH_CERT=$HOME/.ssh/azure.pem -NUM_MINIONS=2 +NUM_MINIONS=4 MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_TAG="${INSTANCE_PREFIX}-master" @@ -35,3 +35,4 @@ MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) MINION_SCOPES="" +PORTAL_NET="10.250.0.0/16" diff --git a/cluster/azure/templates/create-dynamic-salt-files.sh b/cluster/azure/templates/create-dynamic-salt-files.sh index 9255fbf0c24..afea82b88c8 100644 --- a/cluster/azure/templates/create-dynamic-salt-files.sh +++ b/cluster/azure/templates/create-dynamic-salt-files.sh @@ -21,6 +21,7 @@ mkdir -p /srv/salt-overlay/pillar cat </srv/salt-overlay/pillar/cluster-params.sls node_instance_prefix: $NODE_INSTANCE_PREFIX +portal_net: $PORTAL_NET EOF mkdir -p /srv/salt-overlay/salt/nginx diff --git a/cluster/azure/util.sh b/cluster/azure/util.sh index 4e7d4a91268..6830bda1494 100644 --- a/cluster/azure/util.sh +++ b/cluster/azure/util.sh @@ -21,40 +21,60 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. source "${KUBE_ROOT}/cluster/azure/${KUBE_CONFIG_FILE-"config-default.sh"}" +function azure_call { + params="" + for param in "$@" + do + params="${params} \"${param}\"" + done + rv=1 + stderr="getaddrinfo ENOTFOUND" + while [ $rv -ne 0 -a -n "$(echo $stderr | grep "getaddrinfo ENOTFOUND")" ]; do + set +e + { stderr=$(bash -c "azure $params" 2>&1 1>&3-) ;} 3>&1 + rv=$? + set -e + done + if [ $rv -ne 0 ]; then + echo $stderr 1>&2 + exit + fi +} + function json_val () { python -c 'import json,sys;obj=json.load(sys.stdin);print obj'$1''; } # Verify prereqs function verify-prereqs { - if [ -z "$(which azure)" ]; then - echo "Couldn't find azure in PATH" - echo " please install with 'npm install azure-cli'" - exit 1 - fi + if [ -z "$(which azure)" ]; then + echo "Couldn't find azure in PATH" + echo " please install with 'npm install azure-cli'" + exit 1 + fi - if [ -z "$(azure account list | grep true)" ]; then - echo "Default azure account not set" - echo " please set with 'azure account set'" - exit 1 - fi - - account=$(azure account list | grep true | awk '{ print $2 }') - if which md5 > /dev/null 2>&1; then - AZ_HSH=$(md5 -q -s "$AZ_SUBNET@$account") - else - AZ_HSH=$(echo -n "$AZ_SUBNET@$account" | md5sum) - fi - - AZ_HSH=${AZ_HSH:0:7} - AZ_STG=kube$AZ_HSH - echo "==> AZ_STG: $AZ_STG" - - AZ_CS="$AZ_CS_PREFIX-$AZ_HSH" - echo "==> AZ_CS: $AZ_CS" - - CONTAINER=kube-$TAG - echo "==> CONTAINER: $CONTAINER" + if [ -z "$(azure_call account list | grep true)" ]; then + echo "Default azure account not set" + echo " please set with 'azure account set'" + exit 1 + fi + + account=$(azure_call account list | grep true) + if which md5 > /dev/null 2>&1; then + AZ_HSH=$(md5 -q -s "$account") + else + AZ_HSH=$(echo -n "$account" | md5sum) + fi + + AZ_HSH=${AZ_HSH:0:7} + AZ_STG=kube$AZ_HSH + echo "==> AZ_STG: $AZ_STG" + + AZ_CS="$AZ_CS_PREFIX-$AZ_HSH" + echo "==> AZ_CS: $AZ_CS" + + CONTAINER=kube-$TAG + echo "==> CONTAINER: $CONTAINER" } # Create a temp dir that'll be deleted at the end of this bash session. @@ -62,10 +82,10 @@ function verify-prereqs { # Vars set: # KUBE_TEMP function ensure-temp-dir { - if [[ -z ${KUBE_TEMP-} ]]; then - KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX) - trap 'rm -rf "${KUBE_TEMP}"' EXIT - fi + if [[ -z ${KUBE_TEMP-} ]]; then + KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX) + trap 'rm -rf "${KUBE_TEMP}"' EXIT + fi } # Verify and find the various tar files that we are going to use on the server. @@ -74,23 +94,23 @@ function ensure-temp-dir { # SERVER_BINARY_TAR # SALT_TAR function find-release-tars { - SERVER_BINARY_TAR="${KUBE_ROOT}/server/kubernetes-server-linux-amd64.tar.gz" - if [[ ! -f "$SERVER_BINARY_TAR" ]]; then - SERVER_BINARY_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz" - fi - if [[ ! -f "$SERVER_BINARY_TAR" ]]; then - echo "!!! Cannot find kubernetes-server-linux-amd64.tar.gz" - exit 1 - fi + SERVER_BINARY_TAR="${KUBE_ROOT}/server/kubernetes-server-linux-amd64.tar.gz" + if [[ ! -f "$SERVER_BINARY_TAR" ]]; then + SERVER_BINARY_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz" + fi + if [[ ! -f "$SERVER_BINARY_TAR" ]]; then + echo "!!! Cannot find kubernetes-server-linux-amd64.tar.gz" + exit 1 + fi - SALT_TAR="${KUBE_ROOT}/server/kubernetes-salt.tar.gz" - if [[ ! -f "$SALT_TAR" ]]; then - SALT_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-salt.tar.gz" - fi - if [[ ! -f "$SALT_TAR" ]]; then - echo "!!! Cannot find kubernetes-salt.tar.gz" - exit 1 - fi + SALT_TAR="${KUBE_ROOT}/server/kubernetes-salt.tar.gz" + if [[ ! -f "$SALT_TAR" ]]; then + SALT_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-salt.tar.gz" + fi + if [[ ! -f "$SALT_TAR" ]]; then + echo "!!! Cannot find kubernetes-salt.tar.gz" + exit 1 + fi } @@ -104,82 +124,81 @@ function find-release-tars { # SERVER_BINARY_TAR_URL # SALT_TAR_URL function upload-server-tars() { - SERVER_BINARY_TAR_URL= - SALT_TAR_URL= - - echo "==> SERVER_BINARY_TAR: $SERVER_BINARY_TAR" - echo "==> SALT_TAR: $SALT_TAR" - - echo "+++ Staging server tars to Azure Storage: $AZ_STG" - local server_binary_url="${SERVER_BINARY_TAR##*/}" - local salt_url="${SALT_TAR##*/}" - - SERVER_BINARY_TAR_URL="https://${AZ_STG}.blob.core.windows.net/$CONTAINER/$server_binary_url" - SALT_TAR_URL="https://${AZ_STG}.blob.core.windows.net/$CONTAINER/$salt_url" + SERVER_BINARY_TAR_URL= + SALT_TAR_URL= - echo "==> SERVER_BINARY_TAR_URL: $SERVER_BINARY_TAR_URL" - echo "==> SALT_TAR_URL: $SALT_TAR_URL" - - - echo "--> Checking storage exsists..." - if [ -z "$(azure storage account show $AZ_STG 2>/dev/null | \ + echo "==> SERVER_BINARY_TAR: $SERVER_BINARY_TAR" + echo "==> SALT_TAR: $SALT_TAR" + + echo "+++ Staging server tars to Azure Storage: $AZ_STG" + local server_binary_url="${SERVER_BINARY_TAR##*/}" + local salt_url="${SALT_TAR##*/}" + + SERVER_BINARY_TAR_URL="https://${AZ_STG}.blob.core.windows.net/$CONTAINER/$server_binary_url" + SALT_TAR_URL="https://${AZ_STG}.blob.core.windows.net/$CONTAINER/$salt_url" + + echo "==> SERVER_BINARY_TAR_URL: $SERVER_BINARY_TAR_URL" + echo "==> SALT_TAR_URL: $SALT_TAR_URL" + + echo "--> Checking storage exsists..." + if [ -z "$(azure_call storage account show $AZ_STG 2>/dev/null | \ grep data)" ]; then - echo "--> Creating storage..." - azure storage account create -l "$AZ_LOCATION" $AZ_STG - fi + echo "--> Creating storage..." + azure_call storage account create -l "$AZ_LOCATION" $AZ_STG + fi - echo "--> Getting storage key..." - stg_key=$(azure storage account keys list $AZ_STG --json | \ - json_val '["primaryKey"]') + echo "--> Getting storage key..." + stg_key=$(azure_call storage account keys list $AZ_STG --json | \ + json_val '["primaryKey"]') - echo "--> Checking storage container exsists..." - if [ -z "$(azure storage container show -a $AZ_STG -k "$stg_key" \ + echo "--> Checking storage container exsists..." + if [ -z "$(azure_call storage container show -a $AZ_STG -k "$stg_key" \ $CONTAINER 2>/dev/null | grep data)" ]; then - echo "--> Creating storage container..." - azure storage container create \ - -a $AZ_STG \ - -k "$stg_key" \ - -p Blob \ - $CONTAINER - fi + echo "--> Creating storage container..." + azure_call storage container create \ + -a $AZ_STG \ + -k "$stg_key" \ + -p Blob \ + $CONTAINER + fi - echo "--> Checking server binary exists in the container..." - if [ -n "$(azure storage blob show -a $AZ_STG -k "$stg_key" \ + echo "--> Checking server binary exists in the container..." + if [ -n "$(azure_call storage blob show -a $AZ_STG -k "$stg_key" \ $CONTAINER $server_binary_url 2>/dev/null | grep data)" ]; then - echo "--> Deleting server binary in the container..." - azure storage blob delete \ - -a $AZ_STG \ - -k "$stg_key" \ - $CONTAINER \ - $server_binary_url - fi - - echo "--> Uploading server binary to the container..." - azure storage blob upload \ - -a $AZ_STG \ - -k "$stg_key" \ - $SERVER_BINARY_TAR \ - $CONTAINER \ - $server_binary_url - - echo "--> Checking salt data exists in the container..." - if [ -n "$(azure storage blob show -a $AZ_STG -k "$stg_key" \ - $CONTAINER $salt_url 2>/dev/null | grep data)" ]; then - echo "--> Deleting salt data in the container..." - azure storage blob delete \ - -a $AZ_STG \ - -k "$stg_key" \ - $CONTAINER \ - $salt_url - fi + echo "--> Deleting server binary in the container..." + azure_call storage blob delete \ + -a $AZ_STG \ + -k "$stg_key" \ + $CONTAINER \ + $server_binary_url + fi - echo "--> Uploading salt data to the container..." - azure storage blob upload \ - -a $AZ_STG \ - -k "$stg_key" \ - $SALT_TAR \ - $CONTAINER \ - $salt_url + echo "--> Uploading server binary to the container..." + azure_call storage blob upload \ + -a $AZ_STG \ + -k "$stg_key" \ + $SERVER_BINARY_TAR \ + $CONTAINER \ + $server_binary_url + + echo "--> Checking salt data exists in the container..." + if [ -n "$(azure_call storage blob show -a $AZ_STG -k "$stg_key" \ + $CONTAINER $salt_url 2>/dev/null | grep data)" ]; then + echo "--> Deleting salt data in the container..." + azure_call storage blob delete \ + -a $AZ_STG \ + -k "$stg_key" \ + $CONTAINER \ + $salt_url + fi + + echo "--> Uploading salt data to the container..." + azure_call storage blob upload \ + -a $AZ_STG \ + -k "$stg_key" \ + $SALT_TAR \ + $CONTAINER \ + $salt_url } # Detect the information about the minions @@ -188,16 +207,15 @@ function upload-server-tars() { # MINION_NAMES # ZONE # Vars set: -# +# function detect-minions () { - if [ -z "$AZ_CS" ]; then - verify-prereqs - fi - ssh_ports=($(eval echo "2200{1..$NUM_MINIONS}")) - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - MINION_NAMES[$i]=$(ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net hostname -f) - done - + if [ -z "$AZ_CS" ]; then + verify-prereqs + fi + ssh_ports=($(eval echo "2200{1..$NUM_MINIONS}")) + for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + MINION_NAMES[$i]=$(ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net hostname -f) + done } # Detect the IP for the master @@ -209,14 +227,13 @@ function detect-minions () { # KUBE_MASTER # KUBE_MASTER_IP function detect-master () { - if [ -z "$AZ_CS" ]; then - verify-prereqs - fi - - KUBE_MASTER=${MASTER_NAME} - KUBE_MASTER_IP="${AZ_CS}.cloudapp.net" - echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)" + if [ -z "$AZ_CS" ]; then + verify-prereqs + fi + KUBE_MASTER=${MASTER_NAME} + KUBE_MASTER_IP="${AZ_CS}.cloudapp.net" + echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)" } # Ensure that we have a password created for validating to the master. Will @@ -226,24 +243,24 @@ function detect-master () { # KUBE_USER # KUBE_PASSWORD function get-password { - local file="$HOME/.kubernetes_auth" - if [[ -r "$file" ]]; then - KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]') - KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]') - return - fi - KUBE_USER=admin - KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') + local file="$HOME/.kubernetes_auth" + if [[ -r "$file" ]]; then + KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]') + KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]') + return + fi + KUBE_USER=admin + KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') - # Remove this code, since in all use cases I can see, we are overwriting this - # at cluster creation time. - cat << EOF > "$file" + # Remove this code, since in all use cases I can see, we are overwriting this + # at cluster creation time. + cat << EOF > "$file" { "User": "$KUBE_USER", "Password": "$KUBE_PASSWORD" } EOF - chmod 0600 "$file" + chmod 0600 "$file" } # Generate authentication token for admin user. Will @@ -252,12 +269,12 @@ EOF # Vars set: # KUBE_ADMIN_TOKEN function get-admin-token { - local file="$HOME/.kubernetes_auth" - if [[ -r "$file" ]]; then - KUBE_ADMIN_TOKEN=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["BearerToken"]') - return - fi - KUBE_ADMIN_TOKEN=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(32))') + local file="$HOME/.kubernetes_auth" + if [[ -r "$file" ]]; then + KUBE_ADMIN_TOKEN=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["BearerToken"]') + return + fi + KUBE_ADMIN_TOKEN=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(32))') } # Instantiate a kubernetes cluster @@ -266,207 +283,192 @@ function get-admin-token { # KUBE_ROOT # function kube-up { - # Make sure we have the tar files staged on Azure Storage - find-release-tars - upload-server-tars + # Make sure we have the tar files staged on Azure Storage + find-release-tars + upload-server-tars - ensure-temp-dir + ensure-temp-dir + + get-password + python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \ + -b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD" + local htpasswd + htpasswd=$(cat "${KUBE_TEMP}/htpasswd") - get-password - python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \ - -b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD" - local htpasswd - htpasswd=$(cat "${KUBE_TEMP}/htpasswd") - - # Generate openvpn certs - echo "--> Generating openvpn certs" - echo 01 > ${KUBE_TEMP}/ca.srl - openssl genrsa -out ${KUBE_TEMP}/ca.key - openssl req -new -x509 -days 1095 \ - -key ${KUBE_TEMP}/ca.key \ - -out ${KUBE_TEMP}/ca.crt \ - -subj "/CN=openvpn-ca" - openssl genrsa -out ${KUBE_TEMP}/server.key - openssl req -new \ - -key ${KUBE_TEMP}/server.key \ - -out ${KUBE_TEMP}/server.csr \ - -subj "/CN=server" - openssl x509 -req -days 1095 \ - -in ${KUBE_TEMP}/server.csr \ - -CA ${KUBE_TEMP}/ca.crt \ - -CAkey ${KUBE_TEMP}/ca.key \ - -CAserial ${KUBE_TEMP}/ca.srl \ - -out ${KUBE_TEMP}/server.crt - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - openssl genrsa -out ${KUBE_TEMP}/${MINION_NAMES[$i]}.key - openssl req -new \ - -key ${KUBE_TEMP}/${MINION_NAMES[$i]}.key \ - -out ${KUBE_TEMP}/${MINION_NAMES[$i]}.csr \ - -subj "/CN=${MINION_NAMES[$i]}" - openssl x509 -req -days 1095 \ - -in ${KUBE_TEMP}/${MINION_NAMES[$i]}.csr \ - -CA ${KUBE_TEMP}/ca.crt \ - -CAkey ${KUBE_TEMP}/ca.key \ - -CAserial ${KUBE_TEMP}/ca.srl \ - -out ${KUBE_TEMP}/${MINION_NAMES[$i]}.crt - done + echo "--> Generating openvpn certs" + echo 01 > ${KUBE_TEMP}/ca.srl + openssl genrsa -out ${KUBE_TEMP}/ca.key + openssl req -new -x509 -days 1095 \ + -key ${KUBE_TEMP}/ca.key \ + -out ${KUBE_TEMP}/ca.crt \ + -subj "/CN=openvpn-ca" + openssl genrsa -out ${KUBE_TEMP}/server.key + openssl req -new \ + -key ${KUBE_TEMP}/server.key \ + -out ${KUBE_TEMP}/server.csr \ + -subj "/CN=server" + openssl x509 -req -days 1095 \ + -in ${KUBE_TEMP}/server.csr \ + -CA ${KUBE_TEMP}/ca.crt \ + -CAkey ${KUBE_TEMP}/ca.key \ + -CAserial ${KUBE_TEMP}/ca.srl \ + -out ${KUBE_TEMP}/server.crt + for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + openssl genrsa -out ${KUBE_TEMP}/${MINION_NAMES[$i]}.key + openssl req -new \ + -key ${KUBE_TEMP}/${MINION_NAMES[$i]}.key \ + -out ${KUBE_TEMP}/${MINION_NAMES[$i]}.csr \ + -subj "/CN=${MINION_NAMES[$i]}" + openssl x509 -req -days 1095 \ + -in ${KUBE_TEMP}/${MINION_NAMES[$i]}.csr \ + -CA ${KUBE_TEMP}/ca.crt \ + -CAkey ${KUBE_TEMP}/ca.key \ + -CAserial ${KUBE_TEMP}/ca.srl \ + -out ${KUBE_TEMP}/${MINION_NAMES[$i]}.crt + done - # Build up start up script for master - echo "--> Building up start up script for master" - ( - echo "#!/bin/bash" - echo "CA_CRT=\"$(cat ${KUBE_TEMP}/ca.crt)\"" - echo "SERVER_CRT=\"$(cat ${KUBE_TEMP}/server.crt)\"" - echo "SERVER_KEY=\"$(cat ${KUBE_TEMP}/server.key)\"" - echo "mkdir -p /var/cache/kubernetes-install" - echo "cd /var/cache/kubernetes-install" - echo "readonly MASTER_NAME='${MASTER_NAME}'" - echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-minion'" - echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'" - echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'" - echo "readonly MASTER_HTPASSWD='${htpasswd}'" - grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh" - grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/create-dynamic-salt-files.sh" - grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/download-release.sh" - grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/salt-master.sh" - ) > "${KUBE_TEMP}/master-start.sh" - - cat "${KUBE_TEMP}/master-start.sh" > ~/kubernetes/start-files/master-start.sh - - - - if [ ! -f $AZ_SSH_KEY ]; then - ssh-keygen -f $AZ_SSH_KEY -N '' - fi - - if [ ! -f $AZ_SSH_CERT ]; then - openssl req -new -x509 -days 1095 -key $AZ_SSH_KEY -out $AZ_SSH_CERT \ - -subj "/CN=azure-ssh-key" - fi - - if [ -z "$(azure network vnet show $AZ_VNET 2>/dev/null | grep data)" ]; then - #azure network vnet create with $AZ_SUBNET - #FIXME not working - echo error create vnet $AZ_VNET with subnet $AZ_SUBNET - exit 1 - fi - - echo "--> Starting VM" - azure vm create \ - -w $AZ_VNET \ - -n $MASTER_NAME \ - -l "$AZ_LOCATION" \ - -t $AZ_SSH_CERT \ - -e 22000 -P \ - -d ${KUBE_TEMP}/master-start.sh \ - -b $AZ_SUBNET \ - $AZ_CS $AZ_IMAGE $USER - - ssh_ports=($(eval echo "2200{1..$NUM_MINIONS}")) - - - #Build up start up script for minions - echo "--> Building up start up script for minions" - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + # Build up start up script for master + echo "--> Building up start up script for master" ( - echo "#!/bin/bash" - echo "MASTER_NAME='${MASTER_NAME}'" - echo "CA_CRT=\"$(cat ${KUBE_TEMP}/ca.crt)\"" - echo "CLIENT_CRT=\"$(cat ${KUBE_TEMP}/${MINION_NAMES[$i]}.crt)\"" - echo "CLIENT_KEY=\"$(cat ${KUBE_TEMP}/${MINION_NAMES[$i]}.key)\"" - echo "MINION_IP_RANGE='${MINION_IP_RANGES[$i]}'" - grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh" - grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/salt-minion.sh" - ) > "${KUBE_TEMP}/minion-start-${i}.sh" + echo "#!/bin/bash" + echo "CA_CRT=\"$(cat ${KUBE_TEMP}/ca.crt)\"" + echo "SERVER_CRT=\"$(cat ${KUBE_TEMP}/server.crt)\"" + echo "SERVER_KEY=\"$(cat ${KUBE_TEMP}/server.key)\"" + echo "mkdir -p /var/cache/kubernetes-install" + echo "cd /var/cache/kubernetes-install" + echo "readonly MASTER_NAME='${MASTER_NAME}'" + echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-minion'" + echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'" + echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'" + echo "readonly MASTER_HTPASSWD='${htpasswd}'" + echo "readonly PORTAL_NET='${PORTAL_NET}'" + grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh" + grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/create-dynamic-salt-files.sh" + grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/download-release.sh" + grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/salt-master.sh" + ) > "${KUBE_TEMP}/master-start.sh" - cat "${KUBE_TEMP}/minion-start-${i}.sh" > ~/kubernetes/start-files/minion-start-${i}.sh + if [ ! -f $AZ_SSH_KEY ]; then + ssh-keygen -f $AZ_SSH_KEY -N '' + fi - echo "--> Starting VM" - azure vm create \ - -c -w $AZ_VNET \ - -n ${MINION_NAMES[$i]} \ - -l "$AZ_LOCATION" \ - -t $AZ_SSH_CERT \ - -e ${ssh_ports[$i]} -P \ - -d ${KUBE_TEMP}/minion-start-${i}.sh \ - -b $AZ_SUBNET \ - $AZ_CS $AZ_IMAGE $USER - done + if [ ! -f $AZ_SSH_CERT ]; then + openssl req -new -x509 -days 1095 -key $AZ_SSH_KEY -out $AZ_SSH_CERT \ + -subj "/CN=azure-ssh-key" + fi - echo "--> Createing endpoint" - azure vm endpoint create $MASTER_NAME 443 - - local fail=0 - local job - for job in $(jobs -p); do - wait "${job}" || fail=$((fail + 1)) - done - if (( $fail != 0 )); then - echo "${fail} commands failed. Exiting." >&2 - exit 2 - fi - - - detect-master > /dev/null - - echo "==> KUBE_USER:KUBE_PASSWORD: ${KUBE_USER}:${KUBE_PASSWORD}" - echo "==> KUBE_MASTER_IP: ${KUBE_MASTER_IP}" - - echo "Waiting for cluster initialization." - echo - echo " This will continually check to see if the API for kubernetes is reachable." - echo " This might loop forever if there was some uncaught error during start" - echo " up." - echo - - - until curl --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" --max-time 5 \ - --fail --output /dev/null --silent "https://${KUBE_MASTER_IP}/api/v1beta1/pods"; do - printf "." - sleep 2 - done - - printf "\n" - echo "Kubernetes cluster created." - echo "Sanity checking cluster..." - - sleep 5 - - # Basic sanity checking - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - # Make sure docker is installed - echo "--> Making sure docker is installed on ${MINION_NAMES[$i]}." - ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net which docker > /dev/null || { - echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely" >&2 - echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2 - echo "cluster. (sorry!)" >&2 + if [ -z "$(azure_call network vnet show $AZ_VNET 2>/dev/null | grep data)" ]; then + #azure network vnet create with $AZ_SUBNET + #FIXME not working + echo error create vnet $AZ_VNET with subnet $AZ_SUBNET exit 1 - } - done + fi - echo - echo "Kubernetes cluster is running. The master is running at:" - echo - echo " https://${KUBE_MASTER_IP}" - echo - echo "The user name and password to use is located in ~/.kubernetes_auth." - echo - - echo "--> nginx auth" + echo "--> Starting VM" + azure_call vm create \ + -w $AZ_VNET \ + -n $MASTER_NAME \ + -l "$AZ_LOCATION" \ + -t $AZ_SSH_CERT \ + -e 22000 -P \ + -d ${KUBE_TEMP}/master-start.sh \ + -b $AZ_SUBNET \ + $AZ_CS $AZ_IMAGE $USER - local kube_cert=".kubecfg.crt" - local kube_key=".kubecfg.key" - local ca_cert=".kubernetes.ca.crt" + ssh_ports=($(eval echo "2200{1..$NUM_MINIONS}")) - # TODO: generate ADMIN (and KUBELET) tokens and put those in the master's - # config file. Distribute the same way the htpasswd is done. - (umask 077 - ssh -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null - ssh -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null - ssh -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null + #Build up start up script for minions + echo "--> Building up start up script for minions" + for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + ( + echo "#!/bin/bash" + echo "MASTER_NAME='${MASTER_NAME}'" + echo "CA_CRT=\"$(cat ${KUBE_TEMP}/ca.crt)\"" + echo "CLIENT_CRT=\"$(cat ${KUBE_TEMP}/${MINION_NAMES[$i]}.crt)\"" + echo "CLIENT_KEY=\"$(cat ${KUBE_TEMP}/${MINION_NAMES[$i]}.key)\"" + echo "MINION_IP_RANGE='${MINION_IP_RANGES[$i]}'" + grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh" + grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/salt-minion.sh" + ) > "${KUBE_TEMP}/minion-start-${i}.sh" - cat << EOF > ~/.kubernetes_auth + echo "--> Starting VM" + azure_call vm create \ + -c -w $AZ_VNET \ + -n ${MINION_NAMES[$i]} \ + -l "$AZ_LOCATION" \ + -t $AZ_SSH_CERT \ + -e ${ssh_ports[$i]} -P \ + -d ${KUBE_TEMP}/minion-start-${i}.sh \ + -b $AZ_SUBNET \ + $AZ_CS $AZ_IMAGE $USER + done + + echo "--> Createing endpoint" + azure_call vm endpoint create $MASTER_NAME 443 + + detect-master > /dev/null + + echo "==> KUBE_MASTER_IP: ${KUBE_MASTER_IP}" + + echo "Waiting for cluster initialization." + echo + echo " This will continually check to see if the API for kubernetes is reachable." + echo " This might loop forever if there was some uncaught error during start" + echo " up." + echo + + until curl --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" --max-time 5 \ + --fail --output /dev/null --silent "https://${KUBE_MASTER_IP}/api/v1beta1/pods"; do + printf "." + sleep 2 + done + + printf "\n" + echo "Kubernetes cluster created." + echo "Sanity checking cluster..." + + # Wait for salt on the minions + sleep 30 + + # Basic sanity checking + for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + # Make sure docker is installed + echo "--> Making sure docker is installed on ${MINION_NAMES[$i]}." + ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} \ + $AZ_CS.cloudapp.net which docker > /dev/null || { + echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely" >&2 + echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2 + echo "cluster. (sorry!)" >&2 + exit 1 + } + done + + echo + echo "Kubernetes cluster is running. The master is running at:" + echo + echo " https://${KUBE_MASTER_IP}" + echo + echo "The user name and password to use is located in ~/.kubernetes_auth." + echo + + echo "--> nginx auth" + + local kube_cert=".kubecfg.crt" + local kube_key=".kubecfg.key" + local ca_cert=".kubernetes.ca.crt" + + # TODO: generate ADMIN (and KUBELET) tokens and put those in the master's +# config file. Distribute the same way the htpasswd is done. +(umask 077 + ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \ + sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null + ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \ + sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null + ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \ + sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null + + cat << EOF > ~/.kubernetes_auth { "User": "$KUBE_USER", "Password": "$KUBE_PASSWORD", @@ -476,23 +478,22 @@ function kube-up { } EOF - chmod 0600 ~/.kubernetes_auth "${HOME}/${kube_cert}" \ - "${HOME}/${kube_key}" "${HOME}/${ca_cert}" - ) + chmod 0600 ~/.kubernetes_auth "${HOME}/${kube_cert}" \ + "${HOME}/${kube_key}" "${HOME}/${ca_cert}" +) } # Delete a kubernetes cluster function kube-down { - echo "Bringing down cluster" - - set +e - azure vm delete $MASTER_NAME -b -q - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - azure vm delete ${MINION_NAMES[$i]} -b -q - done + echo "Bringing down cluster" - wait + set +e + azure_call vm delete $MASTER_NAME -b -q + for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + azure_call vm delete ${MINION_NAMES[$i]} -b -q + done + wait } # Update a kubernetes cluster with latest source @@ -500,7 +501,7 @@ function kube-down { # detect-project # detect-master - # Make sure we have the tar files staged on Azure Storage +# Make sure we have the tar files staged on Azure Storage # find-release-tars # upload-server-tars @@ -537,30 +538,27 @@ function kube-down { # Assumed Vars: # KUBE_ROOT function test-build-release { - # Make a release - "${KUBE_ROOT}/build/release.sh" + # Make a release + "${KUBE_ROOT}/build/release.sh" } # SSH to a node by name ($1) and run a command ($2). function ssh-to-node { - local node="$1" - local cmd="$2" - ssh --ssh_arg "-o LogLevel=quiet" "${node}" "${cmd}" + local node="$1" + local cmd="$2" + ssh --ssh_arg "-o LogLevel=quiet" "${node}" "${cmd}" } # Restart the kube-proxy on a node ($1) function restart-kube-proxy { - ssh-to-node "$1" "sudo /etc/init.d/kube-proxy restart" + ssh-to-node "$1" "sudo /etc/init.d/kube-proxy restart" } # Setup monitoring using heapster and InfluxDB function setup-monitoring { - echo "not implemented" >/dev/null + echo "not implemented" >/dev/null } function teardown-monitoring { - echo "not implemented" >/dev/null + echo "not implemented" >/dev/null } - - - From 9934a0ace4a2a35463bf9c933eab9ab2b71a139a Mon Sep 17 00:00:00 2001 From: Jeff Mendoza Date: Tue, 18 Nov 2014 14:10:50 -0800 Subject: [PATCH 3/9] Update cert generation for Azure. --- cluster/saltbase/salt/generate-cert/init.sls | 3 +++ .../salt/generate-cert/make-ca-cert.sh | 19 ++++++++++++++++--- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/cluster/saltbase/salt/generate-cert/init.sls b/cluster/saltbase/salt/generate-cert/init.sls index 604685a5269..cfe53747b1e 100644 --- a/cluster/saltbase/salt/generate-cert/init.sls +++ b/cluster/saltbase/salt/generate-cert/init.sls @@ -5,6 +5,9 @@ {% if grains.cloud == 'aws' %} {% set cert_ip='_use_aws_external_ip_' %} {% endif %} + {% if grains.cloud == 'azure' %} + {% set cert_ip='_use_azure_dns_name_' %} + {% endif %} {% if grains.cloud == 'vagrant' %} {% set cert_ip=grains.ip_interfaces.eth1[0] %} {% endif %} diff --git a/cluster/saltbase/salt/generate-cert/make-ca-cert.sh b/cluster/saltbase/salt/generate-cert/make-ca-cert.sh index eb6fa3d7161..84ee7859a8a 100755 --- a/cluster/saltbase/salt/generate-cert/make-ca-cert.sh +++ b/cluster/saltbase/salt/generate-cert/make-ca-cert.sh @@ -24,6 +24,8 @@ cert_group=kube-cert mkdir -p "$cert_dir" +use_cn=false + # TODO: Add support for discovery on other providers? if [ "$cert_ip" == "_use_gce_external_ip_" ]; then cert_ip=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip) @@ -33,6 +35,11 @@ if [ "$cert_ip" == "_use_aws_external_ip_" ]; then cert_ip=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4) fi +if [ "$cert_ip" == "_use_azure_dns_name_" ]; then + cert_ip=$(hostname -f | awk -F. '{ print $2 }').cloudapp.net + use_cn=true +fi + tmpdir=$(mktemp -d --tmpdir kubernetes_cacert.XXXXXX) trap 'rm -rf "${tmpdir}"' EXIT cd "${tmpdir}" @@ -55,10 +62,16 @@ tar xzf easy-rsa.tar.gz > /dev/null 2>&1 cd easy-rsa-master/easyrsa3 ./easyrsa init-pki > /dev/null 2>&1 ./easyrsa --batch build-ca nopass > /dev/null 2>&1 -./easyrsa --subject-alt-name=IP:$cert_ip build-server-full kubernetes-master nopass > /dev/null 2>&1 +if [ $use_cn = "true" ]; then + ./easyrsa build-server-full $cert_ip nopass > /dev/null 2>&1 + cp -p pki/issued/$cert_ip.crt "${cert_dir}/server.cert" > /dev/null 2>&1 + cp -p pki/private/$cert_ip.key "${cert_dir}/server.key" > /dev/null 2>&1 +else + ./easyrsa --subject-alt-name=IP:$cert_ip build-server-full kubernetes-master nopass > /dev/null 2>&1 + cp -p pki/issued/kubernetes-master.crt "${cert_dir}/server.cert" > /dev/null 2>&1 + cp -p pki/private/kubernetes-master.key "${cert_dir}/server.key" > /dev/null 2>&1 +fi ./easyrsa build-client-full kubecfg nopass > /dev/null 2>&1 -cp -p pki/issued/kubernetes-master.crt "${cert_dir}/server.cert" > /dev/null 2>&1 -cp -p pki/private/kubernetes-master.key "${cert_dir}/server.key" > /dev/null 2>&1 cp -p pki/ca.crt "${cert_dir}/ca.crt" cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt" cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key" From f8df2d3478af2299ce5a976845e4035e8dcbf152 Mon Sep 17 00:00:00 2001 From: Jeff Mendoza Date: Tue, 18 Nov 2014 14:11:42 -0800 Subject: [PATCH 4/9] Remove Azure from icebox. --- icebox/cluster/azure/config-default.sh | 34 -- .../azure/templates/download-release.sh | 31 -- icebox/cluster/azure/templates/salt-master.sh | 60 ---- icebox/cluster/azure/templates/salt-minion.sh | 53 ---- icebox/cluster/azure/util.sh | 294 ------------------ icebox/release/azure/config.sh | 40 --- icebox/release/azure/release.sh | 63 ---- 7 files changed, 575 deletions(-) delete mode 100644 icebox/cluster/azure/config-default.sh delete mode 100644 icebox/cluster/azure/templates/download-release.sh delete mode 100644 icebox/cluster/azure/templates/salt-master.sh delete mode 100644 icebox/cluster/azure/templates/salt-minion.sh delete mode 100644 icebox/cluster/azure/util.sh delete mode 100644 icebox/release/azure/config.sh delete mode 100755 icebox/release/azure/release.sh diff --git a/icebox/cluster/azure/config-default.sh b/icebox/cluster/azure/config-default.sh deleted file mode 100644 index 64d8f0cf45c..00000000000 --- a/icebox/cluster/azure/config-default.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -SCRIPT_DIR=$(CDPATH="" cd $(dirname $0); pwd) -source $SCRIPT_DIR/../release/azure/config.sh - -AZ_SSH_KEY=$HOME/.ssh/azure_rsa -AZ_SSH_CERT=$HOME/.ssh/azure.pem -AZ_IMAGE=b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04_1-LTS-amd64-server-20140926-en-us-30GB -AZ_SUBNET=Subnet-1 -AZ_VNET=kube-$AZ_HSH -AZ_CS=kube-$AZ_HSH - -NUM_MINIONS=4 - -MASTER_NAME="${INSTANCE_PREFIX}-master" -MASTER_TAG="${INSTANCE_PREFIX}-master" -MINION_TAG="${INSTANCE_PREFIX}-minion" -MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) -MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) -MINION_SCOPES="" diff --git a/icebox/cluster/azure/templates/download-release.sh b/icebox/cluster/azure/templates/download-release.sh deleted file mode 100644 index 0eed27faf65..00000000000 --- a/icebox/cluster/azure/templates/download-release.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Download and install release - -# This script assumes that the environment variable MASTER_RELEASE_TAR contains -# the release tar to download and unpack. It is meant to be pushed to the -# master and run. - -echo "Downloading release ($MASTER_RELEASE_TAR)" -wget $MASTER_RELEASE_TAR - -echo "Unpacking release" -rm -rf master-release || false -tar xzf master-release.tgz - -echo "Running release install script" -sudo master-release/src/scripts/master-release-install.sh diff --git a/icebox/cluster/azure/templates/salt-master.sh b/icebox/cluster/azure/templates/salt-master.sh deleted file mode 100644 index d9da3e6b07d..00000000000 --- a/icebox/cluster/azure/templates/salt-master.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/bash - -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Prepopulate the name of the Master -mkdir -p /etc/salt/minion.d -echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf - -cat </etc/salt/minion.d/grains.conf -grains: - roles: - - kubernetes-master - cloud: azure -EOF - -# Auto accept all keys from minions that try to join -mkdir -p /etc/salt/master.d -cat </etc/salt/master.d/auto-accept.conf -auto_accept: True -EOF - -cat </etc/salt/master.d/reactor.conf -# React to new minions starting by running highstate on them. -reactor: - - 'salt/minion/*/start': - - /srv/reactor/start.sls -EOF - -mkdir -p /srv/salt/nginx -echo $MASTER_HTPASSWD > /srv/salt/nginx/htpasswd - -mkdir -p /etc/openvpn -umask=$(umask) -umask 0066 -echo "$CA_CRT" > /etc/openvpn/ca.crt -echo "$SERVER_CRT" > /etc/openvpn/server.crt -echo "$SERVER_KEY" > /etc/openvpn/server.key -umask $umask - -# Install Salt -# -# We specify -X to avoid a race condition that can cause minion failure to -# install. See https://github.com/saltstack/salt-bootstrap/issues/270 -# -# -M installs the master -curl -L http://bootstrap.saltstack.com | sh -s -- -M -X - -echo $MASTER_HTPASSWD > /srv/salt/nginx/htpasswd diff --git a/icebox/cluster/azure/templates/salt-minion.sh b/icebox/cluster/azure/templates/salt-minion.sh deleted file mode 100644 index e60152ef7e6..00000000000 --- a/icebox/cluster/azure/templates/salt-minion.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash - -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -mkdir -p /etc/openvpn -umask=$(umask) -umask 0066 -echo "$CA_CRT" > /etc/openvpn/ca.crt -echo "$CLIENT_CRT" > /etc/openvpn/client.crt -echo "$CLIENT_KEY" > /etc/openvpn/client.key -umask $umask - -# Prepopulate the name of the Master -mkdir -p /etc/salt/minion.d -echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf - -# Turn on debugging for salt-minion -# echo "DAEMON_ARGS=\"\$DAEMON_ARGS --log-file-level=debug\"" > /etc/default/salt-minion - -hostnamef=$(hostname -f) -sudo apt-get install ipcalc -netmask=$(ipcalc $MINION_IP_RANGE | grep Netmask | awk '{ print $2 }') -network=$(ipcalc $MINION_IP_RANGE | grep Address | awk '{ print $2 }') -cbrstring="$network $netmask" - -# Our minions will have a pool role to distinguish them from the master. -cat </etc/salt/minion.d/grains.conf -grains: - roles: - - kubernetes-pool - cbr-cidr: $MINION_IP_RANGE - cloud: azure - hostnamef: $hostnamef - cbr-string: $cbrstring -EOF - -# Install Salt -# -# We specify -X to avoid a race condition that can cause minion failure to -# install. See https://github.com/saltstack/salt-bootstrap/issues/270 -curl -L http://bootstrap.saltstack.com | sh -s -- -X diff --git a/icebox/cluster/azure/util.sh b/icebox/cluster/azure/util.sh deleted file mode 100644 index 3dfc61cfa46..00000000000 --- a/icebox/cluster/azure/util.sh +++ /dev/null @@ -1,294 +0,0 @@ -#!/bin/bash - -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -SCRIPT_DIR=$(CDPATH="" cd $(dirname $0); pwd) - -# Use the config file specified in $KUBE_CONFIG_FILE, or default to -# config-default.sh. -source ${SCRIPT_DIR}/azure/${KUBE_CONFIG_FILE-"config-default.sh"} - -function detect-minions () { - ssh_ports=($(eval echo "2200{1..$NUM_MINIONS}")) - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - MINION_NAMES[$i]=$(ssh -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net hostname -f) - done -} - -function detect-master () { - KUBE_MASTER_IP=${AZ_CS}.cloudapp.net - echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)" -} - -function get-password { - file=${HOME}/.kubernetes_auth - if [ -e ${file} ]; then - user=$(cat $file | python -c 'import json,sys;print json.load(sys.stdin)["User"]') - passwd=$(cat $file | python -c 'import json,sys;print json.load(sys.stdin)["Password"]') - return - fi - user=admin - passwd=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') - - # Store password for reuse. - cat << EOF > ~/.kubernetes_auth -{ - "User": "$user", - "Password": "$passwd" -} -EOF - chmod 0600 ~/.kubernetes_auth -} - -# Verify prereqs -function verify-prereqs { - echo "OK" - # Already done in sourcing config-default, which sources - # release/azure/config.sh -} - -# Instantiate a kubernetes cluster -function kube-up { - KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX) - trap "rm -rf ${KUBE_TEMP}" EXIT - - get-password - python $SCRIPT_DIR/../third_party/htpasswd/htpasswd.py -b -c \ - ${KUBE_TEMP}/htpasswd $user $passwd - HTPASSWD=$(cat ${KUBE_TEMP}/htpasswd) - - # Generate openvpn certs - echo 01 > ${KUBE_TEMP}/ca.srl - openssl genrsa -out ${KUBE_TEMP}/ca.key - openssl req -new -x509 -days 1095 \ - -key ${KUBE_TEMP}/ca.key \ - -out ${KUBE_TEMP}/ca.crt \ - -subj "/CN=openvpn-ca" - openssl genrsa -out ${KUBE_TEMP}/server.key - openssl req -new \ - -key ${KUBE_TEMP}/server.key \ - -out ${KUBE_TEMP}/server.csr \ - -subj "/CN=server" - openssl x509 -req -days 1095 \ - -in ${KUBE_TEMP}/server.csr \ - -CA ${KUBE_TEMP}/ca.crt \ - -CAkey ${KUBE_TEMP}/ca.key \ - -CAserial ${KUBE_TEMP}/ca.srl \ - -out ${KUBE_TEMP}/server.crt - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - openssl genrsa -out ${KUBE_TEMP}/${MINION_NAMES[$i]}.key - openssl req -new \ - -key ${KUBE_TEMP}/${MINION_NAMES[$i]}.key \ - -out ${KUBE_TEMP}/${MINION_NAMES[$i]}.csr \ - -subj "/CN=${MINION_NAMES[$i]}" - openssl x509 -req -days 1095 \ - -in ${KUBE_TEMP}/${MINION_NAMES[$i]}.csr \ - -CA ${KUBE_TEMP}/ca.crt \ - -CAkey ${KUBE_TEMP}/ca.key \ - -CAserial ${KUBE_TEMP}/ca.srl \ - -out ${KUBE_TEMP}/${MINION_NAMES[$i]}.crt - done - - # Build up start up script for master - ( - echo "#!/bin/bash" - echo "MASTER_NAME=${MASTER_NAME}" - echo "MASTER_RELEASE_TAR=${FULL_URL}" - echo "MASTER_HTPASSWD='${HTPASSWD}'" - echo "CA_CRT=\"$(cat ${KUBE_TEMP}/ca.crt)\"" - echo "SERVER_CRT=\"$(cat ${KUBE_TEMP}/server.crt)\"" - echo "SERVER_KEY=\"$(cat ${KUBE_TEMP}/server.key)\"" - grep -v "^#" $SCRIPT_DIR/azure/templates/download-release.sh - grep -v "^#" $SCRIPT_DIR/azure/templates/salt-master.sh - ) > ${KUBE_TEMP}/master-start.sh - - echo "Starting VMs" - - if [ ! -f $AZ_SSH_KEY ]; then - ssh-keygen -f $AZ_SSH_KEY -N '' - fi - - if [ ! -f $AZ_SSH_CERT ]; then - openssl req -new -x509 -days 1095 -key $AZ_SSH_KEY -out $AZ_SSH_CERT \ - -subj "/CN=azure-ssh-key" - fi - - if [ -z "$(azure network vnet show $AZ_VNET 2>/dev/null | grep data)" ]; then - #azure network vnet create with $AZ_SUBNET - #FIXME not working - echo error create vnet $AZ_VNET with subnet $AZ_SUBNET - exit 1 - fi - - azure vm create \ - -w $AZ_VNET \ - -n $MASTER_NAME \ - -l "$AZ_LOCATION" \ - -t $AZ_SSH_CERT \ - -e 22000 -P \ - -d ${KUBE_TEMP}/master-start.sh \ - -b $AZ_SUBNET \ - $AZ_CS $AZ_IMAGE $USER - - ssh_ports=($(eval echo "2200{1..$NUM_MINIONS}")) - - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - ( - echo "#!/bin/bash" - echo "MASTER_NAME=${MASTER_NAME}" - echo "MINION_IP_RANGE=${MINION_IP_RANGES[$i]}" - echo "CA_CRT=\"$(cat ${KUBE_TEMP}/ca.crt)\"" - echo "CLIENT_CRT=\"$(cat ${KUBE_TEMP}/${MINION_NAMES[$i]}.crt)\"" - echo "CLIENT_KEY=\"$(cat ${KUBE_TEMP}/${MINION_NAMES[$i]}.key)\"" - grep -v "^#" $SCRIPT_DIR/azure/templates/salt-minion.sh - ) > ${KUBE_TEMP}/minion-start-${i}.sh - - azure vm create \ - -c -w $AZ_VNET \ - -n ${MINION_NAMES[$i]} \ - -l "$AZ_LOCATION" \ - -t $AZ_SSH_CERT \ - -e ${ssh_ports[$i]} -P \ - -d ${KUBE_TEMP}/minion-start-${i}.sh \ - -b $AZ_SUBNET \ - $AZ_CS $AZ_IMAGE $USER - done - - azure vm endpoint create $MASTER_NAME 443 - - echo "Waiting for cluster initialization." - echo - echo " This will continually check to see if the API for kubernetes is reachable." - echo " This might loop forever if there was some uncaught error during start" - echo " up." - echo - - until $(curl --insecure --user ${user}:${passwd} --max-time 5 \ - --fail --output /dev/null --silent https://$AZ_CS.cloudapp.net/api/v1beta1/pods); do - printf "." - sleep 2 - done - - # Basic sanity checking - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - # Make sure docker is installed - ssh -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net which docker > /dev/null - if [ "$?" != "0" ]; then - echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely to work correctly." - echo "Please run ./cluster/kube-down.sh and re-create the cluster. (sorry!)" - exit 1 - fi - - # Make sure the kubelet is running - ssh -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net /etc/init.d/kubelet status - if [ "$?" != "0" ]; then - echo "Kubelet failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely to work correctly." - echo "Please run ./cluster/kube-down.sh and re-create the cluster. (sorry!)" - exit 1 - fi - done - - echo - echo "Kubernetes cluster is running. The master is running at:" - echo - echo " https://$AZ_CS.cloudapp.net" - echo - echo "The user name and password to use is located in ~/.kubernetes_auth." - echo - echo "Security note: The server above uses a self signed certificate. This is" - echo " subject to \"Man in the middle\" type attacks." - echo -} - -# Delete a kubernetes cluster -function kube-down { - echo "Bringing down cluster" - set +e - azure vm delete $MASTER_NAME -b -q - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - azure vm delete ${MINION_NAMES[$i]} -b -q - done -} - -# # Update a kubernetes cluster with latest source -# function kube-push { - -# # Find the release to use. Generally it will be passed when doing a 'prod' -# # install and will default to the release/config.sh version when doing a -# # developer up. -# find-release $1 - -# # Detect the project into $PROJECT -# detect-master - -# ( -# echo MASTER_RELEASE_TAR=$RELEASE_NORMALIZED/master-release.tgz -# grep -v "^#" $(dirname $0)/templates/download-release.sh -# echo "echo Executing configuration" -# echo "sudo salt '*' mine.update" -# echo "sudo salt --force-color '*' state.highstate" -# ) | gcutil ssh --project ${PROJECT} --zone ${ZONE} $KUBE_MASTER bash - -# get-password - -# echo "Kubernetes cluster is updated. The master is running at:" -# echo -# echo " https://${KUBE_MASTER_IP}" -# echo -# echo "The user name and password to use is located in ~/.kubernetes_auth." -# echo -# echo "Security note: The server above uses a self signed certificate. This is" -# echo " subject to \"Man in the middle\" type attacks." -# echo -# } - -# # Execute prior to running tests to build a release if required for env -# function test-build-release { -# # Build source -# ${KUBE_REPO_ROOT}/hack/build-go.sh -# # Make a release -# $(dirname $0)/../release/release.sh -# } - -# # Execute prior to running tests to initialize required structure -# function test-setup { - -# # Detect the project into $PROJECT if it isn't set -# # gce specific -# detect-project - -# if [[ ${ALREADY_UP} -ne 1 ]]; then -# # Open up port 80 & 8080 so common containers on minions can be reached -# gcutil addfirewall \ -# --norespect_terminal_width \ -# --project ${PROJECT} \ -# --target_tags ${MINION_TAG} \ -# --allowed tcp:80,tcp:8080 \ -# --network ${NETWORK} \ -# ${MINION_TAG}-${INSTANCE_PREFIX}-http-alt -# fi - -# } - -# # Execute after running tests to perform any required clean-up -# function test-teardown { -# echo "Shutting down test cluster in background." -# gcutil deletefirewall \ -# --project ${PROJECT} \ -# --norespect_terminal_width \ -# --force \ -# ${MINION_TAG}-${INSTANCE_PREFIX}-http-alt || true > /dev/null -# $(dirname $0)/../cluster/kube-down.sh > /dev/null -# } diff --git a/icebox/release/azure/config.sh b/icebox/release/azure/config.sh deleted file mode 100644 index 19738bd82fd..00000000000 --- a/icebox/release/azure/config.sh +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -INSTANCE_PREFIX=kubenertes -AZ_LOCATION='West US' -TAG=testing - -if [ -z "$(which azure)" ]; then - echo "Couldn't find azure in PATH" - echo " please install with 'npm install azure-cli'" - exit 1 -fi - -if [ -z "$(azure account list | grep true)" ]; then - echo "Default azure account not set" - echo " please set with 'azure account set'" - exit 1 -fi - -account=$(azure account list | grep true | awk '{ print $2 }') -if which md5 > /dev/null 2>&1; then - AZ_HSH=$(md5 -q -s $account) -else - AZ_HSH=$(echo -n "$account" | md5sum) -fi -AZ_HSH=${AZ_HSH:0:7} -AZ_STG=kube$AZ_HSH -CONTAINER=kube-$TAG -FULL_URL="https://${AZ_STG}.blob.core.windows.net/$CONTAINER/master-release.tgz" diff --git a/icebox/release/azure/release.sh b/icebox/release/azure/release.sh deleted file mode 100755 index c6bc000112b..00000000000 --- a/icebox/release/azure/release.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash - -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This script will build and release Kubernetes. - -set -eu -set -o pipefail -IFS=$'\n\t' -SCRIPT_DIR=$(CDPATH="" cd $(dirname $0); pwd) - -function json_val () { - python -c 'import json,sys;obj=json.load(sys.stdin);print obj'$1''; -} - -source $SCRIPT_DIR/config.sh - -$SCRIPT_DIR/../build-release.sh $INSTANCE_PREFIX - -if [ -z "$(azure storage account show $AZ_STG 2>/dev/null | \ - grep data)" ]; then - azure storage account create -l "$AZ_LOCATION" $AZ_STG -fi - -stg_key=$(azure storage account keys list $AZ_STG --json | \ - json_val '["primaryKey"]') - -if [ -z "$(azure storage container show -a $AZ_STG -k "$stg_key" \ - $CONTAINER 2>/dev/null | grep data)" ]; then - azure storage container create \ - -a $AZ_STG \ - -k "$stg_key" \ - -p Blob \ - $CONTAINER -fi - -if [ -n "$(azure storage blob show -a $AZ_STG -k "$stg_key" \ - $CONTAINER master-release.tgz 2>/dev/null | grep data)" ]; then - azure storage blob delete \ - -a $AZ_STG \ - -k "$stg_key" \ - $CONTAINER \ - master-release.tgz -fi - -azure storage blob upload \ - -a $AZ_STG \ - -k "$stg_key" \ - $SCRIPT_DIR/../../_output/release/master-release.tgz \ - $CONTAINER \ - master-release.tgz From 84f4f001bfb63ba28e232a256aa220c3570ab36c Mon Sep 17 00:00:00 2001 From: Jeff Mendoza Date: Tue, 18 Nov 2014 14:15:51 -0800 Subject: [PATCH 5/9] Update README.md Update Azure. --- README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/README.md b/README.md index c48b43758b3..ac0591d260d 100644 --- a/README.md +++ b/README.md @@ -28,8 +28,6 @@ While the concepts and architecture in Kubernetes represent years of experience * [CloudStack](docs/getting-started-guides/cloudstack.md) * [Rackspace](docs/getting-started-guides/rackspace.md) * [vSphere](docs/getting-started-guides/vsphere.md) - -* The following clouds are currently broken at Kubernetes head. Please sync your client to `v0.3` (`git checkout v0.3`) to use these: * [Microsoft Azure](docs/getting-started-guides/azure.md) * [Kubernetes 101](examples/walkthrough) From cea70d51e7ece9de401e263bc314781896336725 Mon Sep 17 00:00:00 2001 From: Jeff Mendoza Date: Tue, 18 Nov 2014 14:23:03 -0800 Subject: [PATCH 6/9] Bring Azure guide up to date. --- docs/getting-started-guides/azure.md | 31 +++++++++++----------------- 1 file changed, 12 insertions(+), 19 deletions(-) diff --git a/docs/getting-started-guides/azure.md b/docs/getting-started-guides/azure.md index b46b433a02e..5bfb55d29e1 100644 --- a/docs/getting-started-guides/azure.md +++ b/docs/getting-started-guides/azure.md @@ -1,30 +1,24 @@ -# WARNING -These instructions are broken at git HEAD. Please either: -* Sync back to `v0.3` with `git checkout v0.3` -* Download a [snapshot of `v0.3`](https://github.com/GoogleCloudPlatform/kubernetes/archive/v0.3.tar.gz) - ## Getting started on Microsoft Azure -### Prerequisites +### Azure Prerequisites 1. You need an Azure account. Visit http://azure.microsoft.com/ to get started. 2. Install and configure the Azure cross-platform command-line interface. http://azure.microsoft.com/en-us/documentation/articles/xplat-cli/ 3. Make sure you have a default account set in the Azure cli, using `azure account set` -4. You must have Go (version 1.2 or later) installed: [www.golang.org](http://www.golang.org). -5. Get the Kubernetes source: - git clone https://github.com/GoogleCloudPlatform/kubernetes.git +### Prerequisites for your workstation + +1. Be running a Linux or Mac OS X. +5. Get or build a [binary release](binary_release.md) +4. If you want to build your own release, you need to have [Docker +installed](https://docs.docker.com/installation/). On Mac OS X you can use +boot2docker. ### Setup The cluster setup scripts can setup Kubernetes for multiple targets. First modify `cluster/kube-env.sh` to specify azure: KUBERNETES_PROVIDER="azure" -Next build Kubernetes, package the release, and upload to Azure Storage: - - cd kubernetes - release/azure/release.sh - Next, specify an existing virtual network in `cluster/azure/config-defualt.sh`: AZ_VNET= @@ -35,16 +29,15 @@ You can then use the `cluster/kube-*.sh` scripts to manage your azure cluster, s cluster/kube-up.sh +The script above will start (by default) a single master VM along with 4 worker VMs. You +can tweak some of these parameters by editing `cluster/azure/config-default.sh`. + ### Running a container (simple version) -Once you have your instances up and running, the `hack/build-go.sh` script sets up -your Go workspace and builds the Go components. - -The `cluster/kubecfg.sh` script spins up two containers, running [Nginx](http://nginx.org/en/) and with port 80 mapped to 8080: +The `cluster/kubecfg.sh` command below spins up two containers, running [Nginx](http://nginx.org/en/) and with port 80 mapped to 8080: ``` cd kubernetes -hack/build-go.sh cluster/kubecfg.sh -p 8080:80 run dockerfile/nginx 2 myNginx ``` From 8b0f263c00caba5c5cbb2d0784fbeb80b890e33b Mon Sep 17 00:00:00 2001 From: Jeff Mendoza Date: Tue, 18 Nov 2014 22:37:06 -0800 Subject: [PATCH 7/9] Rearrange cluster sanity checks for Azure. --- cluster/azure/util.sh | 54 +++++++++++++++++++++---------------------- 1 file changed, 26 insertions(+), 28 deletions(-) diff --git a/cluster/azure/util.sh b/cluster/azure/util.sh index 6830bda1494..d163ea2f425 100644 --- a/cluster/azure/util.sh +++ b/cluster/azure/util.sh @@ -426,40 +426,13 @@ function kube-up { printf "\n" echo "Kubernetes cluster created." - echo "Sanity checking cluster..." - - # Wait for salt on the minions - sleep 30 - - # Basic sanity checking - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - # Make sure docker is installed - echo "--> Making sure docker is installed on ${MINION_NAMES[$i]}." - ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} \ - $AZ_CS.cloudapp.net which docker > /dev/null || { - echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely" >&2 - echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2 - echo "cluster. (sorry!)" >&2 - exit 1 - } - done - - echo - echo "Kubernetes cluster is running. The master is running at:" - echo - echo " https://${KUBE_MASTER_IP}" - echo - echo "The user name and password to use is located in ~/.kubernetes_auth." - echo - - echo "--> nginx auth" local kube_cert=".kubecfg.crt" local kube_key=".kubecfg.key" local ca_cert=".kubernetes.ca.crt" # TODO: generate ADMIN (and KUBELET) tokens and put those in the master's -# config file. Distribute the same way the htpasswd is done. + # config file. Distribute the same way the htpasswd is done. (umask 077 ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \ sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null @@ -481,6 +454,31 @@ EOF chmod 0600 ~/.kubernetes_auth "${HOME}/${kube_cert}" \ "${HOME}/${kube_key}" "${HOME}/${ca_cert}" ) + + # Wait for salt on the minions + sleep 30 + + echo "Sanity checking cluster..." + # Basic sanity checking + for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + # Make sure docker is installed + echo "--> Making sure docker is installed on ${MINION_NAMES[$i]}." + ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} \ + $AZ_CS.cloudapp.net which docker > /dev/null || { + echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely" >&2 + echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2 + echo "cluster. (sorry!)" >&2 + exit 1 + } + done + + echo + echo "Kubernetes cluster is running. The master is running at:" + echo + echo " https://${KUBE_MASTER_IP}" + echo + echo "The user name and password to use is located in ~/.kubernetes_auth." + echo } # Delete a kubernetes cluster From 5d62ac1a56fb7a366e8932be95ed707434727403 Mon Sep 17 00:00:00 2001 From: Jeff Mendoza Date: Wed, 19 Nov 2014 10:25:27 -0800 Subject: [PATCH 8/9] Fix Azure util.sh boilerplate. --- cluster/azure/util.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/cluster/azure/util.sh b/cluster/azure/util.sh index d163ea2f425..8664f4d58df 100644 --- a/cluster/azure/util.sh +++ b/cluster/azure/util.sh @@ -1,4 +1,5 @@ #!/bin/bash + # Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); From d0586ed066a4e7cfeb5f4dc55a3a7bf9b684f64c Mon Sep 17 00:00:00 2001 From: Jeff Mendoza Date: Thu, 20 Nov 2014 16:40:35 -0800 Subject: [PATCH 9/9] Azure review fixes. --- cluster/azure/templates/common.sh | 3 +- cluster/azure/templates/salt-master.sh | 1 - cluster/azure/templates/salt-minion.sh | 11 +---- cluster/azure/util.sh | 64 ++++++++++++++------------ docs/getting-started-guides/azure.md | 4 +- 5 files changed, 38 insertions(+), 45 deletions(-) diff --git a/cluster/azure/templates/common.sh b/cluster/azure/templates/common.sh index b5d3a025803..f5b1847dd96 100644 --- a/cluster/azure/templates/common.sh +++ b/cluster/azure/templates/common.sh @@ -53,7 +53,6 @@ install-salt() { dpkg -i "${tar}" done -# This will install any of the unmet dependencies from above. + # This will install any of the unmet dependencies from above. apt-get install -f -y - } diff --git a/cluster/azure/templates/salt-master.sh b/cluster/azure/templates/salt-master.sh index 3f80592837e..8dc169c826c 100644 --- a/cluster/azure/templates/salt-master.sh +++ b/cluster/azure/templates/salt-master.sh @@ -66,4 +66,3 @@ install-salt --master echo "Sleeping 180" sleep 180 salt-call state.highstate || true - diff --git a/cluster/azure/templates/salt-minion.sh b/cluster/azure/templates/salt-minion.sh index 6e8c5a4d36e..e22509cefec 100644 --- a/cluster/azure/templates/salt-minion.sh +++ b/cluster/azure/templates/salt-minion.sh @@ -31,17 +31,8 @@ log_level: debug log_level_logfile: debug EOF -# Our minions will have a pool role to distinguish them from the master. -#cat </etc/salt/minion.d/grains.conf -#grains: -# roles: -# - kubernetes-pool -# cbr-cidr: $MINION_IP_RANGE -# cloud: gce -#EOF - hostnamef=$(hostname -f) -sudo apt-get install ipcalc +apt-get install -y ipcalc netmask=$(ipcalc $MINION_IP_RANGE | grep Netmask | awk '{ print $2 }') network=$(ipcalc $MINION_IP_RANGE | grep Address | awk '{ print $2 }') cbrstring="$network $netmask" diff --git a/cluster/azure/util.sh b/cluster/azure/util.sh index 8664f4d58df..5a5c228c82a 100644 --- a/cluster/azure/util.sh +++ b/cluster/azure/util.sh @@ -23,22 +23,26 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. source "${KUBE_ROOT}/cluster/azure/${KUBE_CONFIG_FILE-"config-default.sh"}" function azure_call { - params="" - for param in "$@" - do - params="${params} \"${param}\"" + local -a params=() + local param + # the '... in "$@"' is implicit on a for, so doesn't need to be stated. + for param; do + params+=("${param}") done - rv=1 - stderr="getaddrinfo ENOTFOUND" - while [ $rv -ne 0 -a -n "$(echo $stderr | grep "getaddrinfo ENOTFOUND")" ]; do - set +e - { stderr=$(bash -c "azure $params" 2>&1 1>&3-) ;} 3>&1 - rv=$? - set -e - done - if [ $rv -ne 0 ]; then - echo $stderr 1>&2 - exit + local rc=0 + local stderr + local count=0 + while [[ count -lt 10 ]]; do + stderr=$(azure "${params[@]}" 2>&1 >&3) && break + rc=$? + if [[ "${stderr}" != *"getaddrinfo ENOTFOUND"* ]]; then + break + fi + count=$(($count + 1)) + done 3>&1 + if [[ "${rc}" -ne 0 ]]; then + echo "${stderr}" >&2 + return "${rc}" fi } @@ -48,13 +52,13 @@ function json_val () { # Verify prereqs function verify-prereqs { - if [ -z "$(which azure)" ]; then + if [[ -z "$(which azure)" ]]; then echo "Couldn't find azure in PATH" echo " please install with 'npm install azure-cli'" exit 1 fi - if [ -z "$(azure_call account list | grep true)" ]; then + if [[ -z "$(azure_call account list | grep true)" ]]; then echo "Default azure account not set" echo " please set with 'azure account set'" exit 1 @@ -142,8 +146,8 @@ function upload-server-tars() { echo "==> SALT_TAR_URL: $SALT_TAR_URL" echo "--> Checking storage exsists..." - if [ -z "$(azure_call storage account show $AZ_STG 2>/dev/null | \ - grep data)" ]; then + if [[ -z "$(azure_call storage account show $AZ_STG 2>/dev/null | \ + grep data)" ]]; then echo "--> Creating storage..." azure_call storage account create -l "$AZ_LOCATION" $AZ_STG fi @@ -153,8 +157,8 @@ function upload-server-tars() { json_val '["primaryKey"]') echo "--> Checking storage container exsists..." - if [ -z "$(azure_call storage container show -a $AZ_STG -k "$stg_key" \ - $CONTAINER 2>/dev/null | grep data)" ]; then + if [[ -z "$(azure_call storage container show -a $AZ_STG -k "$stg_key" \ + $CONTAINER 2>/dev/null | grep data)" ]]; then echo "--> Creating storage container..." azure_call storage container create \ -a $AZ_STG \ @@ -164,8 +168,8 @@ function upload-server-tars() { fi echo "--> Checking server binary exists in the container..." - if [ -n "$(azure_call storage blob show -a $AZ_STG -k "$stg_key" \ - $CONTAINER $server_binary_url 2>/dev/null | grep data)" ]; then + if [[ -n "$(azure_call storage blob show -a $AZ_STG -k "$stg_key" \ + $CONTAINER $server_binary_url 2>/dev/null | grep data)" ]]; then echo "--> Deleting server binary in the container..." azure_call storage blob delete \ -a $AZ_STG \ @@ -183,8 +187,8 @@ function upload-server-tars() { $server_binary_url echo "--> Checking salt data exists in the container..." - if [ -n "$(azure_call storage blob show -a $AZ_STG -k "$stg_key" \ - $CONTAINER $salt_url 2>/dev/null | grep data)" ]; then + if [[ -n "$(azure_call storage blob show -a $AZ_STG -k "$stg_key" \ + $CONTAINER $salt_url 2>/dev/null | grep data)" ]]; then echo "--> Deleting salt data in the container..." azure_call storage blob delete \ -a $AZ_STG \ @@ -210,7 +214,7 @@ function upload-server-tars() { # Vars set: # function detect-minions () { - if [ -z "$AZ_CS" ]; then + if [[ -z "$AZ_CS" ]]; then verify-prereqs fi ssh_ports=($(eval echo "2200{1..$NUM_MINIONS}")) @@ -228,7 +232,7 @@ function detect-minions () { # KUBE_MASTER # KUBE_MASTER_IP function detect-master () { - if [ -z "$AZ_CS" ]; then + if [[ -z "$AZ_CS" ]]; then verify-prereqs fi @@ -350,16 +354,16 @@ function kube-up { grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/salt-master.sh" ) > "${KUBE_TEMP}/master-start.sh" - if [ ! -f $AZ_SSH_KEY ]; then + if [[ ! -f $AZ_SSH_KEY ]]; then ssh-keygen -f $AZ_SSH_KEY -N '' fi - if [ ! -f $AZ_SSH_CERT ]; then + if [[ ! -f $AZ_SSH_CERT ]]; then openssl req -new -x509 -days 1095 -key $AZ_SSH_KEY -out $AZ_SSH_CERT \ -subj "/CN=azure-ssh-key" fi - if [ -z "$(azure_call network vnet show $AZ_VNET 2>/dev/null | grep data)" ]; then + if [[ -z "$(azure_call network vnet show $AZ_VNET 2>/dev/null | grep data)" ]]; then #azure network vnet create with $AZ_SUBNET #FIXME not working echo error create vnet $AZ_VNET with subnet $AZ_SUBNET diff --git a/docs/getting-started-guides/azure.md b/docs/getting-started-guides/azure.md index 5bfb55d29e1..7cb55400843 100644 --- a/docs/getting-started-guides/azure.md +++ b/docs/getting-started-guides/azure.md @@ -9,8 +9,8 @@ ### Prerequisites for your workstation 1. Be running a Linux or Mac OS X. -5. Get or build a [binary release](binary_release.md) -4. If you want to build your own release, you need to have [Docker +2. Get or build a [binary release](binary_release.md) +3. If you want to build your own release, you need to have [Docker installed](https://docs.docker.com/installation/). On Mac OS X you can use boot2docker.