Updates and formatting to azure scripts.

This commit is contained in:
Jeff Mendoza 2014-11-18 14:10:18 -08:00
parent 6ea7191592
commit 1ee5ab5e5e
3 changed files with 365 additions and 365 deletions

View File

@ -14,19 +14,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
INSTANCE_PREFIX=kubenertes
INSTANCE_PREFIX=kubernetes
AZ_LOCATION='West US'
TAG=testing
AZ_CS_PREFIX=kube
AZ_VNET=shchTest
AZ_VNET=MyVnet
AZ_SUBNET=Subnet-1
AZ_IMAGE=b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140618.1-en-us-30GB
AZ_IMAGE=b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04_1-LTS-amd64-server-20140927-en-us-30GB
AZ_CS="" # is set in azure/util.sh verify-prereqs
AZ_SSH_KEY=$HOME/.ssh/azure_rsa
AZ_SSH_CERT=$HOME/.ssh/azure.pem
NUM_MINIONS=2
NUM_MINIONS=4
MASTER_NAME="${INSTANCE_PREFIX}-master"
MASTER_TAG="${INSTANCE_PREFIX}-master"
@ -35,3 +35,4 @@ MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}}))
MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
MINION_SCOPES=""
PORTAL_NET="10.250.0.0/16"

View File

@ -21,6 +21,7 @@
mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
node_instance_prefix: $NODE_INSTANCE_PREFIX
portal_net: $PORTAL_NET
EOF
mkdir -p /srv/salt-overlay/salt/nginx

View File

@ -21,6 +21,26 @@
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/azure/${KUBE_CONFIG_FILE-"config-default.sh"}"
function azure_call {
params=""
for param in "$@"
do
params="${params} \"${param}\""
done
rv=1
stderr="getaddrinfo ENOTFOUND"
while [ $rv -ne 0 -a -n "$(echo $stderr | grep "getaddrinfo ENOTFOUND")" ]; do
set +e
{ stderr=$(bash -c "azure $params" 2>&1 1>&3-) ;} 3>&1
rv=$?
set -e
done
if [ $rv -ne 0 ]; then
echo $stderr 1>&2
exit
fi
}
function json_val () {
python -c 'import json,sys;obj=json.load(sys.stdin);print obj'$1'';
}
@ -33,17 +53,17 @@ function verify-prereqs {
exit 1
fi
if [ -z "$(azure account list | grep true)" ]; then
if [ -z "$(azure_call account list | grep true)" ]; then
echo "Default azure account not set"
echo " please set with 'azure account set'"
exit 1
fi
account=$(azure account list | grep true | awk '{ print $2 }')
account=$(azure_call account list | grep true)
if which md5 > /dev/null 2>&1; then
AZ_HSH=$(md5 -q -s "$AZ_SUBNET@$account")
AZ_HSH=$(md5 -q -s "$account")
else
AZ_HSH=$(echo -n "$AZ_SUBNET@$account" | md5sum)
AZ_HSH=$(echo -n "$account" | md5sum)
fi
AZ_HSH=${AZ_HSH:0:7}
@ -120,23 +140,22 @@ function upload-server-tars() {
echo "==> SERVER_BINARY_TAR_URL: $SERVER_BINARY_TAR_URL"
echo "==> SALT_TAR_URL: $SALT_TAR_URL"
echo "--> Checking storage exsists..."
if [ -z "$(azure storage account show $AZ_STG 2>/dev/null | \
if [ -z "$(azure_call storage account show $AZ_STG 2>/dev/null | \
grep data)" ]; then
echo "--> Creating storage..."
azure storage account create -l "$AZ_LOCATION" $AZ_STG
azure_call storage account create -l "$AZ_LOCATION" $AZ_STG
fi
echo "--> Getting storage key..."
stg_key=$(azure storage account keys list $AZ_STG --json | \
stg_key=$(azure_call storage account keys list $AZ_STG --json | \
json_val '["primaryKey"]')
echo "--> Checking storage container exsists..."
if [ -z "$(azure storage container show -a $AZ_STG -k "$stg_key" \
if [ -z "$(azure_call storage container show -a $AZ_STG -k "$stg_key" \
$CONTAINER 2>/dev/null | grep data)" ]; then
echo "--> Creating storage container..."
azure storage container create \
azure_call storage container create \
-a $AZ_STG \
-k "$stg_key" \
-p Blob \
@ -144,10 +163,10 @@ function upload-server-tars() {
fi
echo "--> Checking server binary exists in the container..."
if [ -n "$(azure storage blob show -a $AZ_STG -k "$stg_key" \
if [ -n "$(azure_call storage blob show -a $AZ_STG -k "$stg_key" \
$CONTAINER $server_binary_url 2>/dev/null | grep data)" ]; then
echo "--> Deleting server binary in the container..."
azure storage blob delete \
azure_call storage blob delete \
-a $AZ_STG \
-k "$stg_key" \
$CONTAINER \
@ -155,7 +174,7 @@ function upload-server-tars() {
fi
echo "--> Uploading server binary to the container..."
azure storage blob upload \
azure_call storage blob upload \
-a $AZ_STG \
-k "$stg_key" \
$SERVER_BINARY_TAR \
@ -163,10 +182,10 @@ function upload-server-tars() {
$server_binary_url
echo "--> Checking salt data exists in the container..."
if [ -n "$(azure storage blob show -a $AZ_STG -k "$stg_key" \
if [ -n "$(azure_call storage blob show -a $AZ_STG -k "$stg_key" \
$CONTAINER $salt_url 2>/dev/null | grep data)" ]; then
echo "--> Deleting salt data in the container..."
azure storage blob delete \
azure_call storage blob delete \
-a $AZ_STG \
-k "$stg_key" \
$CONTAINER \
@ -174,7 +193,7 @@ function upload-server-tars() {
fi
echo "--> Uploading salt data to the container..."
azure storage blob upload \
azure_call storage blob upload \
-a $AZ_STG \
-k "$stg_key" \
$SALT_TAR \
@ -197,7 +216,6 @@ function detect-minions () {
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
MINION_NAMES[$i]=$(ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net hostname -f)
done
}
# Detect the IP for the master
@ -216,7 +234,6 @@ function detect-master () {
KUBE_MASTER=${MASTER_NAME}
KUBE_MASTER_IP="${AZ_CS}.cloudapp.net"
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)"
}
# Ensure that we have a password created for validating to the master. Will
@ -278,7 +295,6 @@ function kube-up {
local htpasswd
htpasswd=$(cat "${KUBE_TEMP}/htpasswd")
# Generate openvpn certs
echo "--> Generating openvpn certs"
echo 01 > ${KUBE_TEMP}/ca.srl
@ -326,16 +342,13 @@ function kube-up {
echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'"
echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
echo "readonly PORTAL_NET='${PORTAL_NET}'"
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/create-dynamic-salt-files.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/download-release.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/salt-master.sh"
) > "${KUBE_TEMP}/master-start.sh"
cat "${KUBE_TEMP}/master-start.sh" > ~/kubernetes/start-files/master-start.sh
if [ ! -f $AZ_SSH_KEY ]; then
ssh-keygen -f $AZ_SSH_KEY -N ''
fi
@ -345,7 +358,7 @@ function kube-up {
-subj "/CN=azure-ssh-key"
fi
if [ -z "$(azure network vnet show $AZ_VNET 2>/dev/null | grep data)" ]; then
if [ -z "$(azure_call network vnet show $AZ_VNET 2>/dev/null | grep data)" ]; then
#azure network vnet create with $AZ_SUBNET
#FIXME not working
echo error create vnet $AZ_VNET with subnet $AZ_SUBNET
@ -353,7 +366,7 @@ function kube-up {
fi
echo "--> Starting VM"
azure vm create \
azure_call vm create \
-w $AZ_VNET \
-n $MASTER_NAME \
-l "$AZ_LOCATION" \
@ -365,7 +378,6 @@ function kube-up {
ssh_ports=($(eval echo "2200{1..$NUM_MINIONS}"))
#Build up start up script for minions
echo "--> Building up start up script for minions"
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
@ -380,10 +392,8 @@ function kube-up {
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/salt-minion.sh"
) > "${KUBE_TEMP}/minion-start-${i}.sh"
cat "${KUBE_TEMP}/minion-start-${i}.sh" > ~/kubernetes/start-files/minion-start-${i}.sh
echo "--> Starting VM"
azure vm create \
azure_call vm create \
-c -w $AZ_VNET \
-n ${MINION_NAMES[$i]} \
-l "$AZ_LOCATION" \
@ -395,22 +405,10 @@ function kube-up {
done
echo "--> Createing endpoint"
azure vm endpoint create $MASTER_NAME 443
local fail=0
local job
for job in $(jobs -p); do
wait "${job}" || fail=$((fail + 1))
done
if (( $fail != 0 )); then
echo "${fail} commands failed. Exiting." >&2
exit 2
fi
azure_call vm endpoint create $MASTER_NAME 443
detect-master > /dev/null
echo "==> KUBE_USER:KUBE_PASSWORD: ${KUBE_USER}:${KUBE_PASSWORD}"
echo "==> KUBE_MASTER_IP: ${KUBE_MASTER_IP}"
echo "Waiting for cluster initialization."
@ -420,7 +418,6 @@ function kube-up {
echo " up."
echo
until curl --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" --max-time 5 \
--fail --output /dev/null --silent "https://${KUBE_MASTER_IP}/api/v1beta1/pods"; do
printf "."
@ -431,13 +428,15 @@ function kube-up {
echo "Kubernetes cluster created."
echo "Sanity checking cluster..."
sleep 5
# Wait for salt on the minions
sleep 30
# Basic sanity checking
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
# Make sure docker is installed
echo "--> Making sure docker is installed on ${MINION_NAMES[$i]}."
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net which docker > /dev/null || {
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} \
$AZ_CS.cloudapp.net which docker > /dev/null || {
echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely" >&2
echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2
echo "cluster. (sorry!)" >&2
@ -462,9 +461,12 @@ function kube-up {
# TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
# config file. Distribute the same way the htpasswd is done.
(umask 077
ssh -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null
ssh -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null
ssh -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null
cat << EOF > ~/.kubernetes_auth
{
@ -486,13 +488,12 @@ function kube-down {
echo "Bringing down cluster"
set +e
azure vm delete $MASTER_NAME -b -q
azure_call vm delete $MASTER_NAME -b -q
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
azure vm delete ${MINION_NAMES[$i]} -b -q
azure_call vm delete ${MINION_NAMES[$i]} -b -q
done
wait
}
# Update a kubernetes cluster with latest source
@ -561,6 +562,3 @@ function setup-monitoring {
function teardown-monitoring {
echo "not implemented" >/dev/null
}