From e90f98a52c4dd47409fba6edeaa51e929c9ee904 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 10 Oct 2014 18:52:02 -0700 Subject: [PATCH] Make vSphere scripts work for binary deploys Fixes #1732. --- cluster/saltbase/reactor/highstate-all.sls | 9 + cluster/saltbase/salt/base.sls | 1 + cluster/saltbase/salt/nginx/init.sls | 5 + cluster/validate-cluster.sh | 2 +- .../vsphere/config-common.sh | 2 - .../vsphere/config-default.sh | 7 +- .../vsphere/config-test.sh | 7 +- .../vsphere/templates/hostname.sh | 0 .../vsphere/templates/install-release.sh | 12 +- .../vsphere/templates/salt-master.sh | 15 +- .../vsphere/templates/salt-minion.sh | 14 +- cluster/vsphere/util.sh | 421 ++++++++++++++++++ icebox/cluster/vsphere/util.sh | 306 ------------- 13 files changed, 467 insertions(+), 334 deletions(-) create mode 100644 cluster/saltbase/reactor/highstate-all.sls rename {icebox/cluster => cluster}/vsphere/config-common.sh (96%) rename {icebox/cluster => cluster}/vsphere/config-default.sh (87%) rename {icebox/cluster => cluster}/vsphere/config-test.sh (87%) rename {icebox/cluster => cluster}/vsphere/templates/hostname.sh (100%) rename {icebox/cluster => cluster}/vsphere/templates/install-release.sh (68%) rename {icebox/cluster => cluster}/vsphere/templates/salt-master.sh (88%) rename {icebox/cluster => cluster}/vsphere/templates/salt-minion.sh (81%) create mode 100755 cluster/vsphere/util.sh delete mode 100644 icebox/cluster/vsphere/util.sh diff --git a/cluster/saltbase/reactor/highstate-all.sls b/cluster/saltbase/reactor/highstate-all.sls new file mode 100644 index 00000000000..d9b1263474d --- /dev/null +++ b/cluster/saltbase/reactor/highstate-all.sls @@ -0,0 +1,9 @@ +# This runs highstate on the ALL nodes. +# +# Used from the vSphere provider. The IP addresses of the minons are passed to +# the apiserver as arguments and every minion has static routes to every other +# minion. This means every node should be refreshed when a node is added. +# +highstate_run: + cmd.state.highstate: + - tgt: '*' diff --git a/cluster/saltbase/salt/base.sls b/cluster/saltbase/salt/base.sls index e0418f7acc5..cad04ee75f0 100644 --- a/cluster/saltbase/salt/base.sls +++ b/cluster/saltbase/salt/base.sls @@ -1,6 +1,7 @@ pkg-core: pkg.installed: - names: + - curl {% if grains['os_family'] == 'RedHat' %} - python - git diff --git a/cluster/saltbase/salt/nginx/init.sls b/cluster/saltbase/salt/nginx/init.sls index 08879167538..edbad35960a 100644 --- a/cluster/saltbase/salt/nginx/init.sls +++ b/cluster/saltbase/salt/nginx/init.sls @@ -17,6 +17,9 @@ nginx: {% if grains.cloud == 'vagrant' %} {% set cert_ip=grains.fqdn_ip4 %} {% endif %} + {% if grains.cloud == 'vsphere' %} + {% set cert_ip=grains.ip_interfaces.eth0[0] %} + {% endif %} {% endif %} # If there is a pillar defined, override any defaults. {% if pillar['cert_ip'] is defined %} @@ -34,6 +37,8 @@ nginx: - source: salt://nginx/{{certgen}} {% if cert_ip is defined %} - args: {{cert_ip}} + - require: + - pkg: curl {% endif %} - cwd: / - user: root diff --git a/cluster/validate-cluster.sh b/cluster/validate-cluster.sh index 0c793e0f0a6..82cca633d6d 100755 --- a/cluster/validate-cluster.sh +++ b/cluster/validate-cluster.sh @@ -36,7 +36,7 @@ MINIONS_FILE=/tmp/minions "${KUBE_ROOT}/cluster/kubecfg.sh" -template $'{{range.Items}}{{.Name}}\n{{end}}' list minions > ${MINIONS_FILE} # On vSphere, use minion IPs as their names -if [ "$KUBERNETES_PROVIDER" == "vsphere" ]; then +if [[ "${KUBERNETES_PROVIDER}" == "vsphere" ]]; then for (( i=0; i<${#MINION_NAMES[@]}; i++)); do MINION_NAMES[i]=${KUBE_MINION_IP_ADDRESSES[i]} done diff --git a/icebox/cluster/vsphere/config-common.sh b/cluster/vsphere/config-common.sh similarity index 96% rename from icebox/cluster/vsphere/config-common.sh rename to cluster/vsphere/config-common.sh index 0c918486a46..a2169b92871 100644 --- a/icebox/cluster/vsphere/config-common.sh +++ b/cluster/vsphere/config-common.sh @@ -28,8 +28,6 @@ function public-key { exit 1 } -DISK=./kube/kube.vmdk -GUEST_ID=debian7_64Guest PUBLIC_KEY_FILE=${PUBLIC_KEY_FILE-$(public-key)} SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null" diff --git a/icebox/cluster/vsphere/config-default.sh b/cluster/vsphere/config-default.sh similarity index 87% rename from icebox/cluster/vsphere/config-default.sh rename to cluster/vsphere/config-default.sh index fb5e6e321c2..aacbb2ac066 100755 --- a/icebox/cluster/vsphere/config-default.sh +++ b/cluster/vsphere/config-default.sh @@ -14,10 +14,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -source $(dirname ${BASH_SOURCE})/config-common.sh - NUM_MINIONS=4 +DISK=./kube/kube.vmdk +GUEST_ID=debian7_64Guest + INSTANCE_PREFIX=kubernetes +MASTER_TAG="${INSTANCE_PREFIX}-master" +MINION_TAG="${INSTANCE_PREFIX}-minion" MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_MEMORY_MB=1024 diff --git a/icebox/cluster/vsphere/config-test.sh b/cluster/vsphere/config-test.sh similarity index 87% rename from icebox/cluster/vsphere/config-test.sh rename to cluster/vsphere/config-test.sh index 3d36bc7cde2..46430466ec0 100755 --- a/icebox/cluster/vsphere/config-test.sh +++ b/cluster/vsphere/config-test.sh @@ -14,10 +14,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -source $(dirname ${BASH_SOURCE})/config-common.sh - NUM_MINIONS=2 +DISK=./kube/kube.vmdk +GUEST_ID=debian7_64Guest + INSTANCE_PREFIX="e2e-test-${USER}" +MASTER_TAG="${INSTANCE_PREFIX}-master" +MINION_TAG="${INSTANCE_PREFIX}-minion" MASTER_NAME="${INSTANCE_PREFIX}-master" MASTER_MEMORY_MB=1024 diff --git a/icebox/cluster/vsphere/templates/hostname.sh b/cluster/vsphere/templates/hostname.sh similarity index 100% rename from icebox/cluster/vsphere/templates/hostname.sh rename to cluster/vsphere/templates/hostname.sh diff --git a/icebox/cluster/vsphere/templates/install-release.sh b/cluster/vsphere/templates/install-release.sh similarity index 68% rename from icebox/cluster/vsphere/templates/install-release.sh rename to cluster/vsphere/templates/install-release.sh index 877f150f13a..5984ec1ff54 100755 --- a/icebox/cluster/vsphere/templates/install-release.sh +++ b/cluster/vsphere/templates/install-release.sh @@ -14,11 +14,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Install release +# This script assumes that the environment variable SERVER_BINARY_TAR contains +# the release tar to download and unpack. It is meant to be pushed to the +# master and run. -echo "Unpacking release" -rm -rf master-release || false -tar xzf master-release.tgz +echo "Unpacking Salt tree" +rm -rf kubernetes +tar xzf "${SALT_TAR}" echo "Running release install script" -sudo master-release/src/scripts/master-release-install.sh +sudo kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR}" diff --git a/icebox/cluster/vsphere/templates/salt-master.sh b/cluster/vsphere/templates/salt-master.sh similarity index 88% rename from icebox/cluster/vsphere/templates/salt-master.sh rename to cluster/vsphere/templates/salt-master.sh index 92b526fc3f2..19f1cab7f04 100755 --- a/icebox/cluster/vsphere/templates/salt-master.sh +++ b/cluster/vsphere/templates/salt-master.sh @@ -28,6 +28,10 @@ grains: cloud: vsphere EOF +cat </srv/pillar/cluster-params.sls +node_instance_prefix: $NODE_INSTANCE_PREFIX +EOF + # Auto accept all keys from minions that try to join mkdir -p /etc/salt/master.d cat </etc/salt/master.d/auto-accept.conf @@ -38,7 +42,7 @@ cat </etc/salt/master.d/reactor.conf # React to new minions starting by running highstate on them. reactor: - 'salt/minion/*/start': - - /srv/reactor/start.sls + - /srv/reactor/highstate-all.sls EOF mkdir -p /srv/salt/nginx @@ -50,11 +54,8 @@ echo $MASTER_HTPASSWD > /srv/salt/nginx/htpasswd # install. See https://github.com/saltstack/salt-bootstrap/issues/270 # # -M installs the master -if [ ! -x /etc/init.d/salt-master ]; then - wget -q -O - https://bootstrap.saltstack.com | sh -s -- -M -X -else - /etc/init.d/salt-master restart - /etc/init.d/salt-minion restart -fi +set +x +wget -q -O - https://bootstrap.saltstack.com | sh -s -- -M -X +set -x echo $MASTER_HTPASSWD > /srv/salt/nginx/htpasswd diff --git a/icebox/cluster/vsphere/templates/salt-minion.sh b/cluster/vsphere/templates/salt-minion.sh similarity index 81% rename from icebox/cluster/vsphere/templates/salt-minion.sh rename to cluster/vsphere/templates/salt-minion.sh index 75a6aab2631..40983c6bfa2 100755 --- a/icebox/cluster/vsphere/templates/salt-minion.sh +++ b/cluster/vsphere/templates/salt-minion.sh @@ -18,14 +18,14 @@ sed -i -e "s/http.us.debian.org/mirrors.kernel.org/" /etc/apt/sources.list # Resolve hostname of master -if ! grep -q $MASTER_NAME /etc/hosts; then - echo "Adding host entry for $MASTER_NAME" - echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts +if ! grep -q $KUBE_MASTER /etc/hosts; then + echo "Adding host entry for $KUBE_MASTER" + echo "$KUBE_MASTER_IP $KUBE_MASTER" >> /etc/hosts fi # Prepopulate the name of the Master mkdir -p /etc/salt/minion.d -echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf +echo "master: $KUBE_MASTER" > /etc/salt/minion.d/master.conf # Turn on debugging for salt-minion # echo "DAEMON_ARGS=\"\$DAEMON_ARGS --log-file-level=debug\"" > /etc/default/salt-minion @@ -48,8 +48,4 @@ EOF # # We specify -X to avoid a race condition that can cause minion failure to # install. See https://github.com/saltstack/salt-bootstrap/issues/270 -if [ ! -x /etc/init.d/salt-minion ]; then - wget -q -O - https://bootstrap.saltstack.com | sh -s -- -X -else - /etc/init.d/salt-minion restart -fi +wget -q -O - https://bootstrap.saltstack.com | sh -s -- -X diff --git a/cluster/vsphere/util.sh b/cluster/vsphere/util.sh new file mode 100755 index 00000000000..0c9163b588f --- /dev/null +++ b/cluster/vsphere/util.sh @@ -0,0 +1,421 @@ +#!/bin/bash + +# Copyright 2014 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A library of helper functions and constants for the local config. + +# Use the config file specified in $KUBE_CONFIG_FILE, or default to +# config-default.sh. +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. +source "${KUBE_ROOT}/cluster/vsphere/config-common.sh" +source "${KUBE_ROOT}/cluster/vsphere/${KUBE_CONFIG_FILE-"config-default.sh"}" + +# Detect the IP for the master +# +# Assumed vars: +# MASTER_NAME +# Vars set: +# KUBE_MASTER +# KUBE_MASTER_IP +function detect-master { + KUBE_MASTER=${MASTER_NAME} + if [[ -z "${KUBE_MASTER_IP-}" ]]; then + KUBE_MASTER_IP=$(govc vm.ip ${MASTER_NAME}) + fi + if [[ -z "${KUBE_MASTER_IP-}" ]]; then + echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'" >&2 + exit 1 + fi + echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)" +} + +# Detect the information about the minions +# +# Assumed vars: +# MINION_NAMES +# Vars set: +# KUBE_MINION_IP_ADDRESS (array) +function detect-minions { + KUBE_MINION_IP_ADDRESSES=() + for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + local minion_ip=$(govc vm.ip ${MINION_NAMES[$i]}) + if [[ -z "${minion_ip-}" ]] ; then + echo "Did not find ${MINION_NAMES[$i]}" >&2 + else + echo "Found ${MINION_NAMES[$i]} at ${minion_ip}" + KUBE_MINION_IP_ADDRESSES+=("${minion_ip}") + fi + done + if [[ -z "${KUBE_MINION_IP_ADDRESSES-}" ]]; then + echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" >&2 + exit 1 + fi +} + + +# Verify prereqs +function verify-prereqs { + which "govc" >/dev/null || { + echo "Can't find govc in PATH, please install and retry." + echo "" + echo " go install github.com/vmware/govmomi/govc" + echo "" + exit 1 + } +} + +# Create a temp dir that'll be deleted at the end of this bash session. +# +# Vars set: +# KUBE_TEMP +function ensure-temp-dir { + if [[ -z ${KUBE_TEMP-} ]]; then + KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX) + trap 'rm -rf "${KUBE_TEMP}"' EXIT + fi +} + +# Verify and find the various tar files that we are going to use on the server. +# +# Vars set: +# SERVER_BINARY_TAR +# SALT_TAR +function find-release-tars { + SERVER_BINARY_TAR="${KUBE_ROOT}/server/kubernetes-server-linux-amd64.tar.gz" + if [[ ! -f "$SERVER_BINARY_TAR" ]]; then + SERVER_BINARY_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz" + fi + if [[ ! -f "$SERVER_BINARY_TAR" ]]; then + echo "!!! Cannot find kubernetes-server-linux-amd64.tar.gz" + exit 1 + fi + + SALT_TAR="${KUBE_ROOT}/server/kubernetes-salt.tar.gz" + if [[ ! -f "$SALT_TAR" ]]; then + SALT_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-salt.tar.gz" + fi + if [[ ! -f "$SALT_TAR" ]]; then + echo "!!! Cannot find kubernetes-salt.tar.gz" + exit 1 + fi +} + +# Take the local tar files and upload them to the master. +# +# Assumed vars: +# MASTER_NAME +# SERVER_BINARY_TAR +# SALT_TAR +function upload-server-tars { + local args="-vm ${MASTER_NAME}" + + govc guest.mkdir ${args} -p /home/kube/cache/kubernetes-install + + local tar + for tar in "${SERVER_BINARY_TAR}" "${SALT_TAR}"; do + govc guest.upload ${args} -f "${tar}" "/home/kube/cache/kubernetes-install/${tar##*/}" + done +} + +# Ensure that we have a password created for validating to the master. Will +# read from $HOME/.kubernetes_auth if available. +# +# Vars set: +# KUBE_USER +# KUBE_PASSWORD +function get-password { + local file="$HOME/.kubernetes_auth" + if [[ -r "$file" ]]; then + KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]') + KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]') + return + fi + KUBE_USER=admin + KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') + + # Store password for reuse. + cat << EOF > "$file" +{ + "User": "$KUBE_USER", + "Password": "$KUBE_PASSWORD" +} +EOF + chmod 0600 "$file" +} + +# Run command over ssh +function kube-ssh { + local host="$1" + shift + ssh ${SSH_OPTS-} "kube@${host}" "$@" 2> /dev/null +} + +# Instantiate a generic kubernetes virtual machine (master or minion) +# +# Usage: +# kube-up-vm VM_NAME [options to pass to govc vm.create] +# +# Example: +# kube-up-vm "vm-name" -c 2 -m 4096 +# +# Assumed vars: +# DISK +# GUEST_ID +# PUBLIC_KEY_FILE +function kube-up-vm { + local vm_name="$1" + shift + + govc vm.create \ + -debug \ + -disk="${DISK}" \ + -g="${GUEST_ID}" \ + -link=true \ + "$@" \ + "${vm_name}" + + # Retrieve IP first, to confirm the guest operations agent is running. + govc vm.ip "${vm_name}" > /dev/null + + govc guest.mkdir \ + -vm="${vm_name}" \ + -p \ + /home/kube/.ssh + + govc guest.upload \ + -vm="${vm_name}" \ + -f \ + "${PUBLIC_KEY_FILE}" \ + /home/kube/.ssh/authorized_keys +} + +# Kick off a local script on a kubernetes virtual machine (master or minion) +# +# Usage: +# kube-run VM_NAME LOCAL_FILE +function kube-run { + local vm_name="$1" + local file="$2" + local dst="/tmp/$(basename "${file}")" + govc guest.upload -vm="${vm_name}" -f -perm=0755 "${file}" "${dst}" + + local vm_ip + vm_ip=$(govc vm.ip "${vm_name}") + kube-ssh ${vm_ip} "nohup sudo ${dst} < /dev/null 1> ${dst}.out 2> ${dst}.err &" +} + +# Instantiate a kubernetes cluster +# +# Assumed vars: +# KUBE_ROOT +# +function kube-up { + find-release-tars + + ensure-temp-dir + + get-password + python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \ + -b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD" + local htpasswd + htpasswd=$(cat "${KUBE_TEMP}/htpasswd") + + echo "Starting master VM (this can take a minute)..." + + ( + echo "#! /bin/bash" + echo "readonly MY_NAME=${MASTER_NAME}" + grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/hostname.sh" + echo "cd /home/kube/cache/kubernetes-install" + echo "readonly MASTER_NAME='${MASTER_NAME}'" + echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-minion'" + echo "readonly SERVER_BINARY_TAR='${SERVER_BINARY_TAR##*/}'" + echo "readonly SALT_TAR='${SALT_TAR##*/}'" + echo "readonly MASTER_HTPASSWD='${htpasswd}'" + grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/install-release.sh" + grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/salt-master.sh" + ) > "${KUBE_TEMP}/master-start.sh" + + kube-up-vm ${MASTER_NAME} -c ${MASTER_CPU-1} -m ${MASTER_MEMORY_MB-1024} + upload-server-tars + kube-run ${MASTER_NAME} "${KUBE_TEMP}/master-start.sh" + + # Print master IP, so user can log in for debugging. + detect-master + echo + + echo "Starting minion VMs (this can take a minute)..." + + for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + ( + echo "#! /bin/bash" + echo "readonly MY_NAME=${MINION_NAMES[$i]}" + grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/hostname.sh" + echo "KUBE_MASTER=${KUBE_MASTER}" + echo "KUBE_MASTER_IP=${KUBE_MASTER_IP}" + echo "MINION_IP_RANGE=${MINION_IP_RANGES[$i]}" + grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/salt-minion.sh" + ) > "${KUBE_TEMP}/minion-start-${i}.sh" + + ( + kube-up-vm "${MINION_NAMES[$i]}" -c ${MINION_CPU-1} -m ${MINION_MEMORY_MB-1024} + kube-run "${MINION_NAMES[$i]}" "${KUBE_TEMP}/minion-start-${i}.sh" + ) & + done + + local fail=0 + local job + for job in $(jobs -p); do + wait "${job}" || fail=$((fail + 1)) + done + if (( $fail != 0 )); then + echo "${fail} commands failed. Exiting." >&2 + exit 2 + fi + + # Print minion IPs, so user can log in for debugging. + detect-minions + echo + + echo "Waiting for master and minion initialization." + echo + echo " This will continually check to see if the API for kubernetes is reachable." + echo " This might loop forever if there was some uncaught error during start up." + echo + + printf "Waiting for ${KUBE_MASTER} to become available..." + until curl --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" --max-time 5 \ + --fail --output /dev/null --silent "https://${KUBE_MASTER_IP}/api/v1beta1/pods"; do + printf "." + sleep 2 + done + printf " OK\n" + + local i + for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + printf "Waiting for ${MINION_NAMES[$i]} to become available..." + until curl --max-time 5 \ + --fail --output /dev/null --silent "http://${KUBE_MINION_IP_ADDRESSES[$i]}:10250/healthz"; do + printf "." + sleep 2 + done + printf " OK\n" + done + + echo + echo "Sanity checking cluster..." + + sleep 5 + + # Basic sanity checking + local i + for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + # Make sure docker is installed + kube-ssh "${KUBE_MINION_IP_ADDRESSES[$i]}" which docker > /dev/null || { + echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely" >&2 + echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2 + echo "cluster. (sorry!)" >&2 + exit 1 + } + done + + echo + echo "Kubernetes cluster is running. The master is running at:" + echo + echo " https://${KUBE_MASTER_IP}" + echo + echo "The user name and password to use is located in ~/.kubernetes_auth." + echo + + local kube_cert=".kubecfg.crt" + local kube_key=".kubecfg.key" + local ca_cert=".kubernetes.ca.crt" + + ( + umask 077 + + kube-ssh "${KUBE_MASTER_IP}" sudo cat /usr/share/nginx/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null + kube-ssh "${KUBE_MASTER_IP}" sudo cat /usr/share/nginx/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null + kube-ssh "${KUBE_MASTER_IP}" sudo cat /usr/share/nginx/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null + + cat << EOF > ~/.kubernetes_auth + { + "User": "$KUBE_USER", + "Password": "$KUBE_PASSWORD", + "CAFile": "$HOME/$ca_cert", + "CertFile": "$HOME/$kube_cert", + "KeyFile": "$HOME/$kube_key" + } +EOF + + chmod 0600 ~/.kubernetes_auth "${HOME}/${kube_cert}" \ + "${HOME}/${kube_key}" "${HOME}/${ca_cert}" + ) +} + +# Delete a kubernetes cluster +function kube-down { + govc vm.destroy ${MASTER_NAME} & + + for (( i=0; i<${#MINION_NAMES[@]}; i++)); do + govc vm.destroy ${MINION_NAMES[i]} & + done + + wait +} + +# Update a kubernetes cluster with latest source +function kube-push { + find-release-tars + + detect-master + upload-release-tars + + ( + echo "#! /bin/bash" + echo "cd /home/kube/cache/kubernetes-install" + echo "readonly SERVER_BINARY_TAR='${SERVER_BINARY_TAR##*/}'" + echo "readonly SALT_TAR='${SALT_TAR##*/}'" + grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/install-release.sh" + echo "echo Executing configuration" + echo "sudo salt '*' mine.update" + echo "sudo salt --force-color '*' state.highstate" + ) | kube-ssh "${KUBE_MASTER_IP}" + + get-password + + echo + echo "Kubernetes cluster is running. The master is running at:" + echo + echo " https://${KUBE_MASTER_IP}" + echo + echo "The user name and password to use is located in ~/.kubernetes_auth." + echo +} + +# Execute prior to running tests to build a release if required for env +function test-build-release { + echo "TODO" +} + +# Execute prior to running tests to initialize required structure +function test-setup { + echo "TODO" +} + +# Execute after running tests to perform any required clean-up +function test-teardown { + echo "TODO" +} diff --git a/icebox/cluster/vsphere/util.sh b/icebox/cluster/vsphere/util.sh deleted file mode 100644 index 8e5fb15055d..00000000000 --- a/icebox/cluster/vsphere/util.sh +++ /dev/null @@ -1,306 +0,0 @@ -#!/bin/bash - -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A library of helper functions and constants for the local config. - -# Use the config file specified in $KUBE_CONFIG_FILE, or default to -# config-default.sh. -source $(dirname ${BASH_SOURCE})/${KUBE_CONFIG_FILE-"config-default.sh"} - -function detect-master { - KUBE_MASTER=${MASTER_NAME} - if [ -z "$KUBE_MASTER_IP" ]; then - KUBE_MASTER_IP=$(govc vm.ip ${MASTER_NAME}) - fi - if [ -z "$KUBE_MASTER_IP" ]; then - echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'" - exit 1 - fi - echo "Found ${KUBE_MASTER} at ${KUBE_MASTER_IP}" -} - -function detect-minions { - KUBE_MINION_IP_ADDRESSES=() - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - local minion_ip=$(govc vm.ip ${MINION_NAMES[$i]}) - echo "Found ${MINION_NAMES[$i]} at ${minion_ip}" - KUBE_MINION_IP_ADDRESSES+=("${minion_ip}") - done - if [ -z "$KUBE_MINION_IP_ADDRESSES" ]; then - echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" - exit 1 - fi -} - -# Verify prereqs on host machine -function verify-prereqs { - if [ "$(which govc)" == "" ]; then - echo "Can't find govc in PATH, please install and retry." - echo "" - echo " go install github.com/vmware/govmomi/govc" - echo "" - exit 1 - fi -} - -# Run command over ssh -function kube-ssh { - local host=$1 - shift - ssh ${SSH_OPTS} kube@${host} "$*" 2> /dev/null -} - -# Instantiate a generic kubernetes virtual machine (master or minion) -function kube-up-vm { - local vm_name=$1 - local vm_memory=$2 - local vm_cpu=$3 - local vm_ip= - - govc vm.create \ - -debug \ - -m ${vm_memory} \ - -c ${vm_cpu} \ - -disk ${DISK} \ - -g ${GUEST_ID} \ - -link=true \ - ${vm_name} - - # Retrieve IP first, to confirm the guest operations agent is running. - vm_ip=$(govc vm.ip ${vm_name}) - - govc guest.mkdir \ - -vm ${vm_name} \ - -p \ - /home/kube/.ssh - - govc guest.upload \ - -vm ${vm_name} \ - -f \ - ${PUBLIC_KEY_FILE} \ - /home/kube/.ssh/authorized_keys -} - -# Instantiate a kubernetes cluster -function kube-up { - # Build up start up script for master - KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX) - trap "rm -rf ${KUBE_TEMP}" EXIT - - get-password - python $(dirname $0)/../third_party/htpasswd/htpasswd.py -b -c ${KUBE_TEMP}/htpasswd $user $passwd - HTPASSWD=$(cat ${KUBE_TEMP}/htpasswd) - - echo "Starting master VM (this can take a minute)..." - - kube-up-vm ${MASTER_NAME} ${MASTER_MEMORY_MB-1024} ${MASTER_CPU-1} - - # Prints master IP, so user can log in for debugging. - detect-master - echo - - echo "Starting minion VMs (this can take a minute)..." - - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - ( - echo "#! /bin/bash" - echo "MY_NAME=${MINION_NAMES[$i]}" - grep -v "^#" $(dirname $0)/vsphere/templates/hostname.sh - echo "MASTER_NAME=${MASTER_NAME}" - echo "MASTER_IP=${KUBE_MASTER_IP}" - echo "MINION_IP_RANGE=${MINION_IP_RANGES[$i]}" - grep -v "^#" $(dirname $0)/vsphere/templates/salt-minion.sh - ) > ${KUBE_TEMP}/minion-start-${i}.sh - - ( - kube-up-vm ${MINION_NAMES[$i]} ${MINION_MEMORY_MB-1024} ${MINION_CPU-1} - - MINION_IP=$(govc vm.ip ${MINION_NAMES[$i]}) - - govc guest.upload \ - -vm ${MINION_NAMES[$i]} \ - -perm 0700 \ - -f \ - ${KUBE_TEMP}/minion-start-${i}.sh \ - /home/kube/minion-start.sh - - # Kickstart start script - kube-ssh ${MINION_IP} "nohup sudo ~/minion-start.sh < /dev/null 1> minion-start.out 2> minion-start.err &" - ) & - done - - FAIL=0 - for job in `jobs -p` - do - wait $job || let "FAIL+=1" - done - if (( $FAIL != 0 )); then - echo "${FAIL} commands failed. Exiting." - exit 2 - fi - - # Print minion IPs, so user can log in for debugging. - detect-minions - echo - - # Continue provisioning the master. - - ( - echo "#! /bin/bash" - echo "MY_NAME=${MASTER_NAME}" - grep -v "^#" $(dirname $0)/vsphere/templates/hostname.sh - echo "MASTER_NAME=${MASTER_NAME}" - echo "MASTER_HTPASSWD='${HTPASSWD}'" - grep -v "^#" $(dirname $0)/vsphere/templates/install-release.sh - grep -v "^#" $(dirname $0)/vsphere/templates/salt-master.sh - ) > ${KUBE_TEMP}/master-start.sh - - govc guest.upload \ - -vm ${MASTER_NAME} \ - -perm 0700 \ - -f \ - ${KUBE_TEMP}/master-start.sh \ - /home/kube/master-start.sh - - govc guest.upload \ - -vm ${MASTER_NAME} \ - -f \ - ./_output/release/master-release.tgz \ - /home/kube/master-release.tgz - - # Kickstart start script - kube-ssh ${KUBE_MASTER_IP} "nohup sudo ~/master-start.sh < /dev/null 1> master-start.out 2> master-start.err &" - - echo "Waiting for cluster initialization." - echo - echo " This will continually check to see if the API for kubernetes is reachable." - echo " This might loop forever if there was some uncaught error during start up." - echo - - until $(curl --insecure --user ${user}:${passwd} --max-time 5 \ - --fail --output /dev/null --silent https://${KUBE_MASTER_IP}/api/v1beta1/pods); do - printf "." - sleep 2 - done - - echo "Kubernetes cluster created." - echo - - echo "Sanity checking cluster..." - - sleep 5 - - # Don't bail on errors, we want to be able to print some info. - set +e - - # Basic sanity checking - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - # Make sure docker is installed - kube-ssh ${KUBE_MINION_IP_ADDRESSES[$i]} which docker > /dev/null - if [ "$?" != "0" ]; then - echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely to work correctly." - echo "Please run ./cluster/kube-down.sh and re-create the cluster. (sorry!)" - exit 1 - fi - done - - echo - echo "Kubernetes cluster is running. The master is running at:" - echo - echo " https://${KUBE_MASTER_IP}" - echo - echo "The user name and password to use is located in ~/.kubernetes_auth." - echo - echo "Security note: The server above uses a self signed certificate." - echo "This is subject to \"Man in the middle\" type attacks." - echo -} - -# Delete a kubernetes cluster -function kube-down { - govc vm.destroy ${MASTER_NAME} & - - for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - govc vm.destroy ${MINION_NAMES[i]} & - done - - wait - -} - -# Update a kubernetes cluster with latest source -function kube-push { - detect-master - - govc guest.upload \ - -vm ${MASTER_NAME} \ - -f \ - ./_output/release/master-release.tgz \ - /home/kube/master-release.tgz - - ( - grep -v "^#" $(dirname $0)/vsphere/templates/install-release.sh - echo "echo Executing configuration" - echo "sudo salt '*' mine.update" - echo "sudo salt --force-color '*' state.highstate" - ) | kube-ssh ${KUBE_MASTER_IP} bash - - get-password - - echo - echo "Kubernetes cluster is updated. The master is running at:" - echo - echo " https://${KUBE_MASTER_IP}" - echo - echo "The user name and password to use is located in ~/.kubernetes_auth." - echo -} - -# Execute prior to running tests to build a release if required for env -function test-build-release { - echo "TODO" -} - -# Execute prior to running tests to initialize required structure -function test-setup { - echo "TODO" -} - -# Execute after running tests to perform any required clean-up -function test-teardown { - echo "TODO" -} - -# Set the {user} and {password} environment values required to interact with provider -function get-password { - file=${HOME}/.kubernetes_auth - if [ -e ${file} ]; then - user=$(cat $file | python -c 'import json,sys;print(json.load(sys.stdin)["User"])') - passwd=$(cat $file | python -c 'import json,sys;print(json.load(sys.stdin)["Password"])') - return - fi - user=admin - passwd=$(python -c 'import string,random; print("".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16)))') - - # Store password for reuse. - cat << EOF > ~/.kubernetes_auth -{ - "User": "$user", - "Password": "$passwd" -} -EOF - chmod 0600 ~/.kubernetes_auth -}