Merge pull request #1747 from pietern/vsphere-binary-deploy

vSphere support for binary deploys
This commit is contained in:
Joe Beda 2014-10-29 16:52:12 -07:00
commit 78df01172a
23 changed files with 593 additions and 387 deletions

View File

@ -12,7 +12,7 @@ least one maintainer on relevant issues and PRs.
* GCE: [Brendan Burns](https://github.com/brendandburns), [Joe Beda](https://github.com/jbeda), [Daniel Smith](https://github.com/lavalamp), [Tim Hockin](https://github.com/thockin)
* Azure: [Jeff Mendoza](https://github.com/jeffmendoza)
* VSphere: [Pieter Noordhuis](https://github.com/pietern)
* vSphere: [Pieter Noordhuis](https://github.com/pietern)
* Rackspace: [Ryan Richard](https://github.com/doublerr)
* oVirt: [Federico Simoncelli](https://github.com/simon3z)
* Local: [Derek Carr](https://github.com/derekwaynecarr)

View File

@ -24,10 +24,10 @@ While the concepts and architecture in Kubernetes represent years of experience
* [OpenStack](https://developer.rackspace.com/blog/running-coreos-and-kubernetes/)
* [CloudStack](docs/getting-started-guides/cloudstack.md)
* [Rackspace](docs/getting-started-guides/rackspace.md)
* [vSphere](docs/getting-started-guides/vsphere.md)
* The following clouds are currently broken at Kubernetes head. Please sync your client to `v0.3` (`git checkout v0.3`) to use these:
* [Locally](docs/getting-started-guides/locally.md)
* [vSphere](docs/getting-started-guides/vsphere.md)
* [Microsoft Azure](docs/getting-started-guides/azure.md)
* [Kubernetes 101](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/examples/walkthrough)
* [kubecfg command line tool](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/cli.md)

View File

@ -38,7 +38,7 @@ cat <<EOF >/etc/salt/master.d/reactor.conf
# React to new minions starting by running highstate on them.
reactor:
- 'salt/minion/*/start':
- /srv/reactor/start.sls
- /srv/reactor/highstate-new.sls
EOF
# Install Salt

View File

@ -0,0 +1,10 @@
# This runs highstate on the master node(s).
#
# Some of the cluster deployment scripts pass the list of minion addresses to
# the apiserver as a command line argument. This list needs to be updated if a
# new minion is started, so run highstate on the master(s) when this happens.
#
highstate_master:
cmd.state.highstate:
- tgt: 'roles:kubernetes-master'
- expr_form: grain

View File

@ -0,0 +1,11 @@
# This runs highstate on the minion nodes.
#
# Some of the cluster deployment scripts use the list of minions on the minions
# themselves (for example: every minion is configured with static routes to
# every other minion on a vSphere deployment). To propagate changes throughout
# the pool, run highstate on all minions whenever a single minion starts.
#
highstate_minions:
cmd.state.highstate:
- tgt: 'roles:kubernetes-pool'
- expr_form: grain

View File

@ -0,0 +1,4 @@
# This runs highstate only on the NEW node, regardless of type.
highstate_new:
cmd.state.highstate:
- tgt: {{ data['id'] }}

View File

@ -1,5 +0,0 @@
# This runs highstate on the target node
highstate_run:
cmd.state.highstate:
- tgt: {{ data['id'] }}

View File

@ -1,6 +1,7 @@
pkg-core:
pkg.installed:
- names:
- curl
{% if grains['os_family'] == 'RedHat' %}
- python
- git

View File

@ -17,12 +17,16 @@ docker-repo:
- require:
- pkg: pkg-core
{% if grains.cloud is defined %}
{% if grains.cloud == 'gce' %}
# The default GCE images have ip_forwarding explicitly set to 0.
# Here we take care of commenting that out.
/etc/sysctl.d/11-gce-network-security.conf:
file.replace:
- pattern: '^net.ipv4.ip_forward=0'
- repl: '# net.ipv4.ip_forward=0'
{% endif %}
{% endif %}
net.ipv4.ip_forward:
sysctl.present:

View File

@ -17,6 +17,9 @@ nginx:
{% if grains.cloud == 'vagrant' %}
{% set cert_ip=grains.fqdn_ip4 %}
{% endif %}
{% if grains.cloud == 'vsphere' %}
{% set cert_ip=grains.ip_interfaces.eth0[0] %}
{% endif %}
{% endif %}
# If there is a pillar defined, override any defaults.
{% if pillar['cert_ip'] is defined %}
@ -34,6 +37,8 @@ nginx:
- source: salt://nginx/{{certgen}}
{% if cert_ip is defined %}
- args: {{cert_ip}}
- require:
- pkg: curl
{% endif %}
- cwd: /
- user: root

View File

@ -92,7 +92,7 @@ cat <<EOF >/etc/salt/master.d/reactor.conf
# React to new minions starting by running highstate on them.
reactor:
- 'salt/minion/*/start':
- /srv/reactor/start.sls
- /srv/reactor/highstate-new.sls
EOF
cat <<EOF >/etc/salt/master.d/salt-output.conf

View File

@ -36,7 +36,7 @@ MINIONS_FILE=/tmp/minions
"${KUBE_ROOT}/cluster/kubecfg.sh" -template $'{{range.Items}}{{.Name}}\n{{end}}' list minions > ${MINIONS_FILE}
# On vSphere, use minion IPs as their names
if [ "$KUBERNETES_PROVIDER" == "vsphere" ]; then
if [[ "${KUBERNETES_PROVIDER}" == "vsphere" ]]; then
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
MINION_NAMES[i]=${KUBE_MINION_IP_ADDRESSES[i]}
done

View File

@ -14,24 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
function public-key {
local dir=${HOME}/.ssh
for f in $HOME/.ssh/{id_{rsa,dsa},*}.pub; do
if [ -r $f ]; then
echo $f
return
fi
done
echo "Can't find public key file..." 1>&2
exit 1
}
DISK=./kube/kube.vmdk
GUEST_ID=debian7_64Guest
PUBLIC_KEY_FILE=${PUBLIC_KEY_FILE-$(public-key)}
SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null"
SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=ERROR"
# These need to be set
#export GOVC_URL=

View File

@ -14,10 +14,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
source $(dirname ${BASH_SOURCE})/config-common.sh
NUM_MINIONS=4
DISK=./kube/kube.vmdk
GUEST_ID=debian7_64Guest
INSTANCE_PREFIX=kubernetes
MASTER_TAG="${INSTANCE_PREFIX}-master"
MINION_TAG="${INSTANCE_PREFIX}-minion"
MASTER_NAME="${INSTANCE_PREFIX}-master"
MASTER_MEMORY_MB=1024
@ -27,3 +30,5 @@ MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}}))
MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
MINION_MEMORY_MB=2048
MINION_CPU=1
PORTAL_NET="10.244.240.0/20"

View File

@ -14,10 +14,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
source $(dirname ${BASH_SOURCE})/config-common.sh
NUM_MINIONS=2
DISK=./kube/kube.vmdk
GUEST_ID=debian7_64Guest
INSTANCE_PREFIX="e2e-test-${USER}"
MASTER_TAG="${INSTANCE_PREFIX}-master"
MINION_TAG="${INSTANCE_PREFIX}-minion"
MASTER_NAME="${INSTANCE_PREFIX}-master"
MASTER_MEMORY_MB=1024
@ -27,3 +30,5 @@ MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}}))
MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
MINION_MEMORY_MB=1024
MINION_CPU=1
PORTAL_NET="10.244.240.0/20"

View File

@ -0,0 +1,28 @@
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Create the overlay files for the salt tree. We create these in a separate
# place so that we can blow away the rest of the salt configs on a kube-push and
# re-apply these.
mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
node_instance_prefix: $NODE_INSTANCE_PREFIX
portal_net: $PORTAL_NET
EOF
mkdir -p /srv/salt-overlay/salt/nginx
echo $MASTER_HTPASSWD > /srv/salt-overlay/salt/nginx/htpasswd

View File

@ -14,11 +14,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Install release
# This script assumes that the environment variable SERVER_BINARY_TAR contains
# the release tar to download and unpack. It is meant to be pushed to the
# master and run.
echo "Unpacking release"
rm -rf master-release || false
tar xzf master-release.tgz
echo "Unpacking Salt tree"
rm -rf kubernetes
tar xzf "${SALT_TAR}"
echo "Running release install script"
sudo master-release/src/scripts/master-release-install.sh
sudo kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR}"

View File

@ -38,23 +38,19 @@ cat <<EOF >/etc/salt/master.d/reactor.conf
# React to new minions starting by running highstate on them.
reactor:
- 'salt/minion/*/start':
- /srv/reactor/start.sls
- /srv/reactor/highstate-new.sls
- /srv/reactor/highstate-masters.sls
- /srv/reactor/highstate-minions.sls
EOF
mkdir -p /srv/salt/nginx
echo $MASTER_HTPASSWD > /srv/salt/nginx/htpasswd
# Install Salt
#
# We specify -X to avoid a race condition that can cause minion failure to
# install. See https://github.com/saltstack/salt-bootstrap/issues/270
#
# -M installs the master
if [ ! -x /etc/init.d/salt-master ]; then
wget -q -O - https://bootstrap.saltstack.com | sh -s -- -M -X
else
/etc/init.d/salt-master restart
/etc/init.d/salt-minion restart
fi
set +x
wget -q -O - https://bootstrap.saltstack.com | sh -s -- -M -X
set -x
echo $MASTER_HTPASSWD > /srv/salt/nginx/htpasswd

View File

@ -18,14 +18,14 @@
sed -i -e "s/http.us.debian.org/mirrors.kernel.org/" /etc/apt/sources.list
# Resolve hostname of master
if ! grep -q $MASTER_NAME /etc/hosts; then
echo "Adding host entry for $MASTER_NAME"
echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts
if ! grep -q $KUBE_MASTER /etc/hosts; then
echo "Adding host entry for $KUBE_MASTER"
echo "$KUBE_MASTER_IP $KUBE_MASTER" >> /etc/hosts
fi
# Prepopulate the name of the Master
mkdir -p /etc/salt/minion.d
echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf
echo "master: $KUBE_MASTER" > /etc/salt/minion.d/master.conf
# Turn on debugging for salt-minion
# echo "DAEMON_ARGS=\"\$DAEMON_ARGS --log-file-level=debug\"" > /etc/default/salt-minion
@ -48,8 +48,4 @@ EOF
#
# We specify -X to avoid a race condition that can cause minion failure to
# install. See https://github.com/saltstack/salt-bootstrap/issues/270
if [ ! -x /etc/init.d/salt-minion ]; then
wget -q -O - https://bootstrap.saltstack.com | sh -s -- -X
else
/etc/init.d/salt-minion restart
fi
wget -q -O - https://bootstrap.saltstack.com | sh -s -- -X

473
cluster/vsphere/util.sh Executable file
View File

@ -0,0 +1,473 @@
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constants for the local config.
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
# config-default.sh.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/vsphere/config-common.sh"
source "${KUBE_ROOT}/cluster/vsphere/${KUBE_CONFIG_FILE-"config-default.sh"}"
# Detect the IP for the master
#
# Assumed vars:
# MASTER_NAME
# Vars set:
# KUBE_MASTER
# KUBE_MASTER_IP
function detect-master {
KUBE_MASTER=${MASTER_NAME}
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
KUBE_MASTER_IP=$(govc vm.ip ${MASTER_NAME})
fi
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'" >&2
exit 1
fi
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)"
}
# Detect the information about the minions
#
# Assumed vars:
# MINION_NAMES
# Vars set:
# KUBE_MINION_IP_ADDRESS (array)
function detect-minions {
KUBE_MINION_IP_ADDRESSES=()
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
local minion_ip=$(govc vm.ip ${MINION_NAMES[$i]})
if [[ -z "${minion_ip-}" ]] ; then
echo "Did not find ${MINION_NAMES[$i]}" >&2
else
echo "Found ${MINION_NAMES[$i]} at ${minion_ip}"
KUBE_MINION_IP_ADDRESSES+=("${minion_ip}")
fi
done
if [[ -z "${KUBE_MINION_IP_ADDRESSES-}" ]]; then
echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" >&2
exit 1
fi
}
function trap-add {
local handler="$1"
local signal="${2-EXIT}"
local cur
cur="$(eval "sh -c 'echo \$3' -- $(trap -p ${signal})")"
if [[ -n "${cur}" ]]; then
handler="${cur}; ${handler}"
fi
trap "${handler}" ${signal}
}
function verify-prereqs {
which "govc" >/dev/null || {
echo "Can't find govc in PATH, please install and retry."
echo ""
echo " go install github.com/vmware/govmomi/govc"
echo ""
exit 1
}
}
function verify-ssh-prereqs {
local rc
rc=0
ssh-add -L 1> /dev/null 2> /dev/null || rc="$?"
# "Could not open a connection to your authentication agent."
if [[ "${rc}" -eq 2 ]]; then
eval "$(ssh-agent)" > /dev/null
trap-add "kill ${SSH_AGENT_PID}" EXIT
fi
rc=0
ssh-add -L 1> /dev/null 2> /dev/null || rc="$?"
# "The agent has no identities."
if [[ "${rc}" -eq 1 ]]; then
# Try adding one of the default identities, with or without passphrase.
ssh-add || true
fi
# Expect at least one identity to be available.
if ! ssh-add -L 1> /dev/null 2> /dev/null; then
echo "Could not find or add an SSH identity."
echo "Please start ssh-agent, add your identity, and retry."
exit 1
fi
}
# Create a temp dir that'll be deleted at the end of this bash session.
#
# Vars set:
# KUBE_TEMP
function ensure-temp-dir {
if [[ -z ${KUBE_TEMP-} ]]; then
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
trap-add 'rm -rf "${KUBE_TEMP}"' EXIT
fi
}
# Verify and find the various tar files that we are going to use on the server.
#
# Vars set:
# SERVER_BINARY_TAR
# SALT_TAR
function find-release-tars {
SERVER_BINARY_TAR="${KUBE_ROOT}/server/kubernetes-server-linux-amd64.tar.gz"
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
SERVER_BINARY_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz"
fi
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
echo "!!! Cannot find kubernetes-server-linux-amd64.tar.gz"
exit 1
fi
SALT_TAR="${KUBE_ROOT}/server/kubernetes-salt.tar.gz"
if [[ ! -f "$SALT_TAR" ]]; then
SALT_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-salt.tar.gz"
fi
if [[ ! -f "$SALT_TAR" ]]; then
echo "!!! Cannot find kubernetes-salt.tar.gz"
exit 1
fi
}
# Take the local tar files and upload them to the master.
#
# Assumed vars:
# MASTER_NAME
# SERVER_BINARY_TAR
# SALT_TAR
function upload-server-tars {
local vm_ip
vm_ip=$(govc vm.ip "${MASTER_NAME}")
kube-ssh ${vm_ip} "mkdir -p /home/kube/cache/kubernetes-install"
local tar
for tar in "${SERVER_BINARY_TAR}" "${SALT_TAR}"; do
kube-scp ${vm_ip} "${tar}" "/home/kube/cache/kubernetes-install/${tar##*/}"
done
}
# Ensure that we have a password created for validating to the master. Will
# read from $HOME/.kubernetes_auth if available.
#
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
function get-password {
local file="$HOME/.kubernetes_auth"
if [[ -r "$file" ]]; then
KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]')
KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]')
return
fi
KUBE_USER=admin
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
# Store password for reuse.
cat << EOF > "$file"
{
"User": "$KUBE_USER",
"Password": "$KUBE_PASSWORD"
}
EOF
chmod 0600 "$file"
}
# Run command over ssh
function kube-ssh {
local host="$1"
shift
ssh ${SSH_OPTS-} "kube@${host}" "$@" 2> /dev/null
}
# Copy file over ssh
function kube-scp {
local host="$1"
local src="$2"
local dst="$3"
scp ${SSH_OPTS-} "${src}" "kube@${host}:${dst}"
}
# Instantiate a generic kubernetes virtual machine (master or minion)
#
# Usage:
# kube-up-vm VM_NAME [options to pass to govc vm.create]
#
# Example:
# kube-up-vm "vm-name" -c 2 -m 4096
#
# Assumed vars:
# DISK
# GUEST_ID
function kube-up-vm {
local vm_name="$1"
shift
govc vm.create \
-debug \
-disk="${DISK}" \
-g="${GUEST_ID}" \
-link=true \
"$@" \
"${vm_name}"
# Retrieve IP first, to confirm the guest operations agent is running.
govc vm.ip "${vm_name}" > /dev/null
govc guest.mkdir \
-vm="${vm_name}" \
-p \
/home/kube/.ssh
ssh-add -L > "${KUBE_TEMP}/${vm_name}-authorized_keys"
govc guest.upload \
-vm="${vm_name}" \
-f \
"${KUBE_TEMP}/${vm_name}-authorized_keys" \
/home/kube/.ssh/authorized_keys
}
# Kick off a local script on a kubernetes virtual machine (master or minion)
#
# Usage:
# kube-run VM_NAME LOCAL_FILE
function kube-run {
local vm_name="$1"
local file="$2"
local dst="/tmp/$(basename "${file}")"
govc guest.upload -vm="${vm_name}" -f -perm=0755 "${file}" "${dst}"
local vm_ip
vm_ip=$(govc vm.ip "${vm_name}")
kube-ssh ${vm_ip} "nohup sudo ${dst} < /dev/null 1> ${dst}.out 2> ${dst}.err &"
}
# Instantiate a kubernetes cluster
#
# Assumed vars:
# KUBE_ROOT
# <Various vars set in config file>
function kube-up {
verify-ssh-prereqs
find-release-tars
ensure-temp-dir
get-password
python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \
-b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD"
local htpasswd
htpasswd=$(cat "${KUBE_TEMP}/htpasswd")
echo "Starting master VM (this can take a minute)..."
(
echo "#! /bin/bash"
echo "readonly MY_NAME=${MASTER_NAME}"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/hostname.sh"
echo "cd /home/kube/cache/kubernetes-install"
echo "readonly MASTER_NAME='${MASTER_NAME}'"
echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-minion'"
echo "readonly PORTAL_NET='${PORTAL_NET}'"
echo "readonly SERVER_BINARY_TAR='${SERVER_BINARY_TAR##*/}'"
echo "readonly SALT_TAR='${SALT_TAR##*/}'"
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/create-dynamic-salt-files.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/install-release.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/salt-master.sh"
) > "${KUBE_TEMP}/master-start.sh"
kube-up-vm ${MASTER_NAME} -c ${MASTER_CPU-1} -m ${MASTER_MEMORY_MB-1024}
upload-server-tars
kube-run ${MASTER_NAME} "${KUBE_TEMP}/master-start.sh"
# Print master IP, so user can log in for debugging.
detect-master
echo
echo "Starting minion VMs (this can take a minute)..."
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
(
echo "#! /bin/bash"
echo "readonly MY_NAME=${MINION_NAMES[$i]}"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/hostname.sh"
echo "KUBE_MASTER=${KUBE_MASTER}"
echo "KUBE_MASTER_IP=${KUBE_MASTER_IP}"
echo "MINION_IP_RANGE=${MINION_IP_RANGES[$i]}"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/salt-minion.sh"
) > "${KUBE_TEMP}/minion-start-${i}.sh"
(
kube-up-vm "${MINION_NAMES[$i]}" -c ${MINION_CPU-1} -m ${MINION_MEMORY_MB-1024}
kube-run "${MINION_NAMES[$i]}" "${KUBE_TEMP}/minion-start-${i}.sh"
) &
done
local fail=0
local job
for job in $(jobs -p); do
wait "${job}" || fail=$((fail + 1))
done
if (( $fail != 0 )); then
echo "${fail} commands failed. Exiting." >&2
exit 2
fi
# Print minion IPs, so user can log in for debugging.
detect-minions
echo
echo "Waiting for master and minion initialization."
echo
echo " This will continually check to see if the API for kubernetes is reachable."
echo " This might loop forever if there was some uncaught error during start up."
echo
printf "Waiting for ${KUBE_MASTER} to become available..."
until curl --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" --max-time 5 \
--fail --output /dev/null --silent "https://${KUBE_MASTER_IP}/api/v1beta1/pods"; do
printf "."
sleep 2
done
printf " OK\n"
local i
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
printf "Waiting for ${MINION_NAMES[$i]} to become available..."
until curl --max-time 5 \
--fail --output /dev/null --silent "http://${KUBE_MINION_IP_ADDRESSES[$i]}:10250/healthz"; do
printf "."
sleep 2
done
printf " OK\n"
done
echo
echo "Sanity checking cluster..."
sleep 5
# Basic sanity checking
local i
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
# Make sure docker is installed
kube-ssh "${KUBE_MINION_IP_ADDRESSES[$i]}" which docker > /dev/null || {
echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely" >&2
echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2
echo "cluster. (sorry!)" >&2
exit 1
}
done
echo
echo "Kubernetes cluster is running. The master is running at:"
echo
echo " https://${KUBE_MASTER_IP}"
echo
echo "The user name and password to use is located in ~/.kubernetes_auth."
echo
local kube_cert=".kubecfg.crt"
local kube_key=".kubecfg.key"
local ca_cert=".kubernetes.ca.crt"
(
umask 077
kube-ssh "${KUBE_MASTER_IP}" sudo cat /usr/share/nginx/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null
kube-ssh "${KUBE_MASTER_IP}" sudo cat /usr/share/nginx/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null
kube-ssh "${KUBE_MASTER_IP}" sudo cat /usr/share/nginx/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null
cat << EOF > ~/.kubernetes_auth
{
"User": "$KUBE_USER",
"Password": "$KUBE_PASSWORD",
"CAFile": "$HOME/$ca_cert",
"CertFile": "$HOME/$kube_cert",
"KeyFile": "$HOME/$kube_key"
}
EOF
chmod 0600 ~/.kubernetes_auth "${HOME}/${kube_cert}" \
"${HOME}/${kube_key}" "${HOME}/${ca_cert}"
)
}
# Delete a kubernetes cluster
function kube-down {
govc vm.destroy ${MASTER_NAME} &
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
govc vm.destroy ${MINION_NAMES[i]} &
done
wait
}
# Update a kubernetes cluster with latest source
function kube-push {
verify-ssh-prereqs
find-release-tars
detect-master
upload-server-tars
(
echo "#! /bin/bash"
echo "cd /home/kube/cache/kubernetes-install"
echo "readonly SERVER_BINARY_TAR='${SERVER_BINARY_TAR##*/}'"
echo "readonly SALT_TAR='${SALT_TAR##*/}'"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/install-release.sh"
echo "echo Executing configuration"
echo "sudo salt '*' mine.update"
echo "sudo salt --force-color '*' state.highstate"
) | kube-ssh "${KUBE_MASTER_IP}"
get-password
echo
echo "Kubernetes cluster is running. The master is running at:"
echo
echo " https://${KUBE_MASTER_IP}"
echo
echo "The user name and password to use is located in ~/.kubernetes_auth."
echo
}
# Execute prior to running tests to build a release if required for env
function test-build-release {
echo "TODO"
}
# Execute prior to running tests to initialize required structure
function test-setup {
echo "TODO"
}
# Execute after running tests to perform any required clean-up
function test-teardown {
echo "TODO"
}

View File

@ -1,10 +1,10 @@
# WARNING
These instructions are broken at git HEAD. Please either:
* Sync back to `v0.3` with `git checkout v0.3`
* Download a [snapshot of `v0.3`](https://github.com/GoogleCloudPlatform/kubernetes/archive/v0.3.tar.gz)
## Getting started with vSphere
The example below creates a Kubernetes cluster with 4 worker node Virtual
Machines and a master Virtual Machine (i.e. 5 VMs in your cluster). This
cluster is set up and controlled from your workstation (or wherever you find
convenient).
### Prerequisites
1. You need administrator credentials to an ESXi machine or vCenter instance.
@ -23,15 +23,7 @@ These instructions are broken at git HEAD. Please either:
go get github.com/vmware/govmomi/govc
```
5. Install godep (optional, only required when modifying package dependencies). [Instructions here](https://github.com/GoogleCloudPlatform/kubernetes#installing-godep)
6. Get the Kubernetes source:
```sh
mkdir -p $GOPATH/src/github.com/GoogleCloudPlatform
git clone https://github.com/GoogleCloudPlatform/kubernetes.git
cd kubernetes
```
5. Get or build a [binary release](binary_release.md)
### Setup
@ -46,7 +38,7 @@ gzip -d kube.vmdk.gz
Upload this VMDK to your vSphere instance:
```sh
export GOVC_URL='https://user:pass@hostname/sdk'
export GOVC_URL='user:pass@hostname'
export GOVC_INSECURE=1 # If the host above uses a self-signed cert
export GOVC_DATASTORE='target datastore'
export GOVC_RESOURCE_POOL='resource pool or cluster with access to datastore'
@ -63,18 +55,13 @@ govc datastore.ls ./kube/
Take a look at the file `cluster/vsphere/config-common.sh` fill in the required
parameters. The guest login for the image that you imported is `kube:kube`.
Now, let's continue with deploying Kubernetes:
### Starting a cluster
Now, let's continue with deploying Kubernetes.
This process takes about ~10 minutes.
```sh
cd kubernetes
# Build source
hack/build-go.sh
# Build a release (argument is the instance prefix)
release/build-release.sh kubernetes
# Deploy Kubernetes (takes ~5 minutes, provided everything works out)
cd kubernetes # Extracted binary release OR repository root
export KUBERNETES_PROVIDER=vsphere
cluster/kube-up.sh
```
@ -84,3 +71,10 @@ Engine. Once you have successfully reached this point, your vSphere Kubernetes
deployment works just as any other one!
**Enjoy!**
### Extra: debugging deployment failure
The output of `kube-up.sh` displays the IP addresses of the VMs it deploys. You
can log into any VM as the `kube` user to poke around and figure out what is
going on (find yourself authorized with your SSH key, or use the password
`kube` otherwise).

View File

@ -1,306 +0,0 @@
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constants for the local config.
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
# config-default.sh.
source $(dirname ${BASH_SOURCE})/${KUBE_CONFIG_FILE-"config-default.sh"}
function detect-master {
KUBE_MASTER=${MASTER_NAME}
if [ -z "$KUBE_MASTER_IP" ]; then
KUBE_MASTER_IP=$(govc vm.ip ${MASTER_NAME})
fi
if [ -z "$KUBE_MASTER_IP" ]; then
echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'"
exit 1
fi
echo "Found ${KUBE_MASTER} at ${KUBE_MASTER_IP}"
}
function detect-minions {
KUBE_MINION_IP_ADDRESSES=()
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
local minion_ip=$(govc vm.ip ${MINION_NAMES[$i]})
echo "Found ${MINION_NAMES[$i]} at ${minion_ip}"
KUBE_MINION_IP_ADDRESSES+=("${minion_ip}")
done
if [ -z "$KUBE_MINION_IP_ADDRESSES" ]; then
echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'"
exit 1
fi
}
# Verify prereqs on host machine
function verify-prereqs {
if [ "$(which govc)" == "" ]; then
echo "Can't find govc in PATH, please install and retry."
echo ""
echo " go install github.com/vmware/govmomi/govc"
echo ""
exit 1
fi
}
# Run command over ssh
function kube-ssh {
local host=$1
shift
ssh ${SSH_OPTS} kube@${host} "$*" 2> /dev/null
}
# Instantiate a generic kubernetes virtual machine (master or minion)
function kube-up-vm {
local vm_name=$1
local vm_memory=$2
local vm_cpu=$3
local vm_ip=
govc vm.create \
-debug \
-m ${vm_memory} \
-c ${vm_cpu} \
-disk ${DISK} \
-g ${GUEST_ID} \
-link=true \
${vm_name}
# Retrieve IP first, to confirm the guest operations agent is running.
vm_ip=$(govc vm.ip ${vm_name})
govc guest.mkdir \
-vm ${vm_name} \
-p \
/home/kube/.ssh
govc guest.upload \
-vm ${vm_name} \
-f \
${PUBLIC_KEY_FILE} \
/home/kube/.ssh/authorized_keys
}
# Instantiate a kubernetes cluster
function kube-up {
# Build up start up script for master
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
trap "rm -rf ${KUBE_TEMP}" EXIT
get-password
python $(dirname $0)/../third_party/htpasswd/htpasswd.py -b -c ${KUBE_TEMP}/htpasswd $user $passwd
HTPASSWD=$(cat ${KUBE_TEMP}/htpasswd)
echo "Starting master VM (this can take a minute)..."
kube-up-vm ${MASTER_NAME} ${MASTER_MEMORY_MB-1024} ${MASTER_CPU-1}
# Prints master IP, so user can log in for debugging.
detect-master
echo
echo "Starting minion VMs (this can take a minute)..."
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
(
echo "#! /bin/bash"
echo "MY_NAME=${MINION_NAMES[$i]}"
grep -v "^#" $(dirname $0)/vsphere/templates/hostname.sh
echo "MASTER_NAME=${MASTER_NAME}"
echo "MASTER_IP=${KUBE_MASTER_IP}"
echo "MINION_IP_RANGE=${MINION_IP_RANGES[$i]}"
grep -v "^#" $(dirname $0)/vsphere/templates/salt-minion.sh
) > ${KUBE_TEMP}/minion-start-${i}.sh
(
kube-up-vm ${MINION_NAMES[$i]} ${MINION_MEMORY_MB-1024} ${MINION_CPU-1}
MINION_IP=$(govc vm.ip ${MINION_NAMES[$i]})
govc guest.upload \
-vm ${MINION_NAMES[$i]} \
-perm 0700 \
-f \
${KUBE_TEMP}/minion-start-${i}.sh \
/home/kube/minion-start.sh
# Kickstart start script
kube-ssh ${MINION_IP} "nohup sudo ~/minion-start.sh < /dev/null 1> minion-start.out 2> minion-start.err &"
) &
done
FAIL=0
for job in `jobs -p`
do
wait $job || let "FAIL+=1"
done
if (( $FAIL != 0 )); then
echo "${FAIL} commands failed. Exiting."
exit 2
fi
# Print minion IPs, so user can log in for debugging.
detect-minions
echo
# Continue provisioning the master.
(
echo "#! /bin/bash"
echo "MY_NAME=${MASTER_NAME}"
grep -v "^#" $(dirname $0)/vsphere/templates/hostname.sh
echo "MASTER_NAME=${MASTER_NAME}"
echo "MASTER_HTPASSWD='${HTPASSWD}'"
grep -v "^#" $(dirname $0)/vsphere/templates/install-release.sh
grep -v "^#" $(dirname $0)/vsphere/templates/salt-master.sh
) > ${KUBE_TEMP}/master-start.sh
govc guest.upload \
-vm ${MASTER_NAME} \
-perm 0700 \
-f \
${KUBE_TEMP}/master-start.sh \
/home/kube/master-start.sh
govc guest.upload \
-vm ${MASTER_NAME} \
-f \
./_output/release/master-release.tgz \
/home/kube/master-release.tgz
# Kickstart start script
kube-ssh ${KUBE_MASTER_IP} "nohup sudo ~/master-start.sh < /dev/null 1> master-start.out 2> master-start.err &"
echo "Waiting for cluster initialization."
echo
echo " This will continually check to see if the API for kubernetes is reachable."
echo " This might loop forever if there was some uncaught error during start up."
echo
until $(curl --insecure --user ${user}:${passwd} --max-time 5 \
--fail --output /dev/null --silent https://${KUBE_MASTER_IP}/api/v1beta1/pods); do
printf "."
sleep 2
done
echo "Kubernetes cluster created."
echo
echo "Sanity checking cluster..."
sleep 5
# Don't bail on errors, we want to be able to print some info.
set +e
# Basic sanity checking
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
# Make sure docker is installed
kube-ssh ${KUBE_MINION_IP_ADDRESSES[$i]} which docker > /dev/null
if [ "$?" != "0" ]; then
echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely to work correctly."
echo "Please run ./cluster/kube-down.sh and re-create the cluster. (sorry!)"
exit 1
fi
done
echo
echo "Kubernetes cluster is running. The master is running at:"
echo
echo " https://${KUBE_MASTER_IP}"
echo
echo "The user name and password to use is located in ~/.kubernetes_auth."
echo
echo "Security note: The server above uses a self signed certificate."
echo "This is subject to \"Man in the middle\" type attacks."
echo
}
# Delete a kubernetes cluster
function kube-down {
govc vm.destroy ${MASTER_NAME} &
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
govc vm.destroy ${MINION_NAMES[i]} &
done
wait
}
# Update a kubernetes cluster with latest source
function kube-push {
detect-master
govc guest.upload \
-vm ${MASTER_NAME} \
-f \
./_output/release/master-release.tgz \
/home/kube/master-release.tgz
(
grep -v "^#" $(dirname $0)/vsphere/templates/install-release.sh
echo "echo Executing configuration"
echo "sudo salt '*' mine.update"
echo "sudo salt --force-color '*' state.highstate"
) | kube-ssh ${KUBE_MASTER_IP} bash
get-password
echo
echo "Kubernetes cluster is updated. The master is running at:"
echo
echo " https://${KUBE_MASTER_IP}"
echo
echo "The user name and password to use is located in ~/.kubernetes_auth."
echo
}
# Execute prior to running tests to build a release if required for env
function test-build-release {
echo "TODO"
}
# Execute prior to running tests to initialize required structure
function test-setup {
echo "TODO"
}
# Execute after running tests to perform any required clean-up
function test-teardown {
echo "TODO"
}
# Set the {user} and {password} environment values required to interact with provider
function get-password {
file=${HOME}/.kubernetes_auth
if [ -e ${file} ]; then
user=$(cat $file | python -c 'import json,sys;print(json.load(sys.stdin)["User"])')
passwd=$(cat $file | python -c 'import json,sys;print(json.load(sys.stdin)["Password"])')
return
fi
user=admin
passwd=$(python -c 'import string,random; print("".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16)))')
# Store password for reuse.
cat << EOF > ~/.kubernetes_auth
{
"User": "$user",
"Password": "$passwd"
}
EOF
chmod 0600 ~/.kubernetes_auth
}