Rework vagrant cluster set up.

* Have a single config file that mirrors other cluster providers
* Warn users not to use 'vagrant up' directly
* Allow 'extra' parameters to the docker daemon.  Fixes #2685
* Renumbers things so that they are more sane.  Master/minions are 10.245.1.x, container subnets are 10.246.x.1/24, portal is 10.247.0.0/16
This commit is contained in:
Joe Beda
2014-12-12 11:08:22 -08:00
parent ff305003f0
commit 428aeacf6d
10 changed files with 214 additions and 168 deletions

View File

@@ -16,31 +16,43 @@
## Contains configuration values for interacting with the Vagrant cluster
# NUMBER OF MINIONS IN THE CLUSTER
NUM_MINIONS=${KUBERNETES_NUM_MINIONS-"3"}
# Number of minions in the cluster
NUM_MINIONS=${NUM_MINIONS-"3"}
export NUM_MINIONS
# IP LOCATIONS FOR INTERACTING WITH THE MASTER
export KUBE_MASTER_IP="10.245.1.2"
# The IP of the master
export MASTER_IP="10.245.1.2"
INSTANCE_PREFIX=kubernetes
MASTER_NAME="${INSTANCE_PREFIX}-master"
MASTER_TAG="${INSTANCE_PREFIX}-master"
MINION_TAG="${INSTANCE_PREFIX}-minion"
# Unable to use hostnames yet because DNS is not in cluster, so we revert external look-up name to use the minion IP
#MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}}))
export INSTANCE_PREFIX=kubernetes
export MASTER_NAME="${INSTANCE_PREFIX}-master"
# IP LOCATIONS FOR INTERACTING WITH THE MINIONS
MINION_IP_BASE="10.245.2."
# Map out the IPs, names and container subnets of each minion
export MINION_IP_BASE="10.245.1."
MINION_CONTAINER_SUBNET_BASE="10.246"
CONTAINER_SUBNET="${MINION_CONTAINER_SUBNET_BASE}.0.0/16"
for ((i=0; i < NUM_MINIONS; i++)) do
KUBE_MINION_IP_ADDRESSES[$i]="${MINION_IP_BASE}$[$i+2]"
MINION_IP[$i]="${MINION_IP_BASE}$[$i+2]"
MINION_NAMES[$i]="${MINION_IP[$i]}"
VAGRANT_MINION_NAMES[$i]="minion-$[$i+1]"
MINION_IPS[$i]="${MINION_IP_BASE}$((i+3))"
MINION_NAMES[$i]="${INSTANCE_PREFIX}-minion-$((i+1))"
MINION_CONTAINER_SUBNETS[$i]="${MINION_CONTAINER_SUBNET_BASE}.${i}.1/24"
MINION_CONTAINER_ADDRS[$i]="${MINION_CONTAINER_SUBNET_BASE}.${i}.1"
MINION_CONTAINER_NETMASKS[$i]="255.255.255.0"
VAGRANT_MINION_NAMES[$i]="minion-$((i+1))"
done
PORTAL_NET=10.247.0.0/16
# Since this isn't exposed on the network, default to a simple user/passwd
MASTER_USER=vagrant
MASTER_PASSWD=vagrant
# Optional: Install node monitoring.
ENABLE_NODE_MONITORING=true
# Optional: Enable node logging.
ENABLE_NODE_LOGGING=true
LOGGING_DESTINATION=elasticsearch
# Extra options to set on the Docker command line. This is useful for setting
# --insecure-registry for local registries.
DOCKER_OPTS=""

View File

@@ -59,47 +59,47 @@ cd "${KUBE_ROOT}"
echo All verbose output will be redirected to $logfile, use --logfile option to change.
printf "Start the cluster with 2 minions .. "
export KUBERNETES_NUM_MINIONS=2
export NUM_MINIONS=2
export KUBERNETES_PROVIDER=vagrant
(cluster/kube-up.sh &>> $logfile) || true
(cluster/kube-up.sh >>"$logfile" 2>&1) || true
echoOK $?
printf "Check if minion-1 can reach kubernetes master .. "
vagrant ssh minion-1 -- ping -c 10 kubernetes-master &>> $logfile
vagrant ssh minion-1 -- ping -c 10 kubernetes-master >>"$logfile" 2>&1
echoOK $?
printf "Check if minion-2 can reach kubernetes master .. "
vagrant ssh minion-2 -- ping -c 10 kubernetes-master &>> $logfile
vagrant ssh minion-2 -- ping -c 10 kubernetes-master >>"$logfile" 2>&1
echoOK $?
printf "Pull an image that runs a web server on minion-1 .. "
vagrant ssh minion-1 -- 'sudo docker pull dockerfile/nginx' &>> $logfile
vagrant ssh minion-1 -- 'sudo docker pull kubernetes/serve_hostname' >>"$logfile" 2>&1
echoOK $?
printf "Pull an image that runs a web server on minion-2 .. "
vagrant ssh minion-2 -- 'sudo docker pull dockerfile/nginx' &>> $logfile
vagrant ssh minion-2 -- 'sudo docker pull kubernetes/serve_hostname' >>"$logfile" 2>&1
echoOK $?
printf "Run the server on minion-1 .. "
vagrant ssh minion-1 -- sudo docker run -d dockerfile/nginx &>> $logfile
vagrant ssh minion-1 -- sudo docker run -d kubernetes/serve_hostname >>"$logfile" 2>&1
echoOK $?
printf "Run the server on minion-2 .. "
vagrant ssh minion-2 -- sudo docker run -d dockerfile/nginx &>> $logfile
vagrant ssh minion-2 -- sudo docker run -d kubernetes/serve_hostname >>"$logfile" 2>&1
echoOK $?
printf "Run ping from minion-1 to docker bridges and to the containers on both minions .. "
vagrant ssh minion-1 -- 'ping -c 20 10.244.1.1 && ping -c 20 10.244.2.1 && ping -c 20 10.244.1.3 && ping -c 20 10.244.2.3' &>> $logfile
vagrant ssh minion-1 -- 'ping -c 20 10.246.0.1 && ping -c 20 10.246.1.1 && ping -c 20 10.246.0.2 && ping -c 20 10.246.1.2' >>"$logfile" 2>&1
echoOK $?
printf "Same pinch from minion-2 .. "
vagrant ssh minion-2 -- 'ping -c 20 10.244.1.1 && ping -c 20 10.244.2.1 && ping -c 20 10.244.1.3 && ping -c 20 10.244.2.3' &>> $logfile
vagrant ssh minion-2 -- 'ping -c 20 10.246.0.1 && ping -c 20 10.246.1.1 && ping -c 20 10.246.0.2 && ping -c 20 10.246.1.2' >>"$logfile" 2>&1
echoOK $?
printf "tcp check, curl to both the running webservers from minion-1 .. "
vagrant ssh minion-1 -- 'curl 10.244.1.3:80 && curl 10.244.2.3:80' &>> $logfile
vagrant ssh minion-1 -- 'curl -sS 10.246.0.2:9376 && curl -sS 10.246.1.2:9376' >>"$logfile" 2>&1
echoOK $?
printf "tcp check, curl to both the running webservers from minion-2 .. "
vagrant ssh minion-2 -- 'curl 10.244.1.3:80 && curl 10.244.2.3:80' &>> $logfile
vagrant ssh minion-2 -- 'curl -sS 10.246.0.2:9376 && curl -sS 10.246.1.2:9376' >>"$logfile" 2>&1
echoOK $?
printf "All good, destroy the cluster .. "
vagrant destroy -f &>> $logfile
vagrant destroy -f >>"$logfile" 2>&1
echoOK $?

View File

@@ -1,37 +0,0 @@
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Passed as arguments to provisioning from Vagrantfile
MASTER_IP=$1
NUM_MINIONS=$2
MINION_IPS=$3
INSTANCE_PREFIX=kubernetes
MASTER_NAME="${INSTANCE_PREFIX}-master"
MASTER_TAG="${INSTANCE_PREFIX}-master"
MINION_TAG="${INSTANCE_PREFIX}-minion"
MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}}))
MINION_IP_RANGES=($(eval echo "10.245.{2..${NUM_MINIONS}}.2/24"))
MINION_SCOPES=""
# simplified setup for local vagrant 2 node cluster
MASTER_USER=vagrant
MASTER_PASSWD=vagrant
# Location to hold temp files for provision process
KUBE_TEMP=/var/kube-temp
PORTAL_NET=10.0.0.0/16

View File

@@ -17,9 +17,6 @@
# exit on any error
set -e
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/vagrant/provision-config.sh"
function release_not_found() {
echo "It looks as if you don't have a compiled version of Kubernetes. If you" >&2
echo "are running from a clone of the git repo, please run ./build/release.sh." >&2
@@ -50,10 +47,9 @@ fi
# Setup hosts file to support ping by hostname to each minion in the cluster from apiserver
minion_ip_array=(${MINION_IPS//,/ })
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
minion=${MINION_NAMES[$i]}
ip=${minion_ip_array[$i]}
ip=${MINION_IPS[$i]}
if [ ! "$(cat /etc/hosts | grep $minion)" ]; then
echo "Adding $minion to hosts file"
echo "$ip $minion" >> /etc/hosts
@@ -109,27 +105,39 @@ cat <<EOF >/etc/salt/master.d/salt-output.conf
# Minimize the amount of output to terminal
state_verbose: False
state_output: mixed
log_level: debug
log_level_logfile: debug
EOF
cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
log_level: debug
log_level_logfile: debug
EOF
# Generate and distribute a shared secret (bearer token) to
# apiserver and kubelet so that kubelet can authenticate to
# apiserver to send events.
kubelet_token=$(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null)
mkdir -p /srv/salt-overlay/salt/kube-apiserver
known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
(umask u=rw,go= ; echo "$kubelet_token,kubelet,kubelet" > $known_tokens_file)
if [[ ! -f "${known_tokens_file}" ]]; then
kubelet_token=$(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null)
mkdir -p /srv/salt-overlay/salt/kubelet
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
(umask u=rw,go= ; echo "{\"BearerToken\": \"$kubelet_token\", \"Insecure\": true }" > $kubelet_auth_file)
mkdir -p /srv/salt-overlay/salt/kube-apiserver
known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
(umask u=rw,go= ; echo "$kubelet_token,kubelet,kubelet" > $known_tokens_file)
mkdir -p /srv/salt-overlay/salt/kubelet
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
(umask u=rw,go= ; echo "{\"BearerToken\": \"$kubelet_token\", \"Insecure\": true }" > $kubelet_auth_file)
fi
# Configure nginx authorization
mkdir -p "$KUBE_TEMP"
mkdir -p /srv/salt-overlay/salt/nginx
python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" -b -c "${KUBE_TEMP}/htpasswd" "$MASTER_USER" "$MASTER_PASSWD"
MASTER_HTPASSWD=$(cat "${KUBE_TEMP}/htpasswd")
echo $MASTER_HTPASSWD > /srv/salt-overlay/salt/nginx/htpasswd
if [[ ! -f /srv/salt-overlay/salt/nginx/htpasswd ]]; then
python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \
-b -c "/srv/salt-overlay/salt/nginx/htpasswd" \
"$MASTER_USER" "$MASTER_PASSWD"
fi
echo "Running release install script"
rm -rf /kube-install
@@ -141,7 +149,7 @@ pushd /kube-install
popd
# we will run provision to update code each time we test, so we do not want to do salt installs each time
if ! which salt-master >/dev/null 2>&1; then
if ! which salt-master &>/dev/null; then
# Configure the salt-api
cat <<EOF >/etc/salt/master.d/salt-api.conf
@@ -173,7 +181,6 @@ EOF
# enabling the service (which is not an error) from being printed to stderr.
SYSTEMD_LOG_LEVEL=notice systemctl enable salt-api
systemctl start salt-api
fi
if ! which salt-minion >/dev/null 2>&1; then
@@ -186,5 +193,5 @@ else
# set up to run highstate as new minions join for the first time.
echo "Executing configuration"
salt '*' mine.update
salt --force-color '*' state.highstate
salt --show-timeout --force-color '*' state.highstate
fi

View File

@@ -16,10 +16,6 @@
# exit on any error
set -e
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/vagrant/provision-config.sh"
MINION_IP=$4
# Setup hosts file to support ping by hostname to master
if [ ! "$(cat /etc/hosts | grep $MASTER_NAME)" ]; then
@@ -28,10 +24,9 @@ if [ ! "$(cat /etc/hosts | grep $MASTER_NAME)" ]; then
fi
# Setup hosts file to support ping by hostname to each minion in the cluster
minion_ip_array=(${MINION_IPS//,/ })
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
minion=${MINION_NAMES[$i]}
ip=${minion_ip_array[$i]}
ip=${MINION_IPS[$i]}
if [ ! "$(cat /etc/hosts | grep $minion)" ]; then
echo "Adding $minion to hosts file"
echo "$ip $minion" >> /etc/hosts
@@ -44,6 +39,11 @@ cat <<EOF >/etc/salt/minion.d/master.conf
master: '$(echo "$MASTER_NAME" | sed -e "s/'/''/g")'
EOF
cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
log_level: debug
log_level_logfile: debug
EOF
# Our minions will have a pool role to distinguish them from the master.
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
@@ -56,7 +56,7 @@ grains:
roles:
- kubernetes-pool
- kubernetes-pool-vagrant
cbr-cidr: '$(echo "$MINION_IP_RANGE" | sed -e "s/'/''/g")'
cbr-cidr: '$(echo "$CONTAINER_SUBNET" | sed -e "s/'/''/g")'
minion_ip: '$(echo "$MINION_IP" | sed -e "s/'/''/g")'
EOF
@@ -64,7 +64,8 @@ EOF
if ! which salt-minion >/dev/null 2>&1; then
# Install Salt
curl -sS -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s
else
# Sometimes the minion gets wedged when it comes up along with the master.
# Restarting it here un-wedges it.
systemctl restart salt-minion.service
fi
# run the networking setup
"${KUBE_ROOT}/cluster/vagrant/provision-network.sh" $@

View File

@@ -14,20 +14,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# exit on any error
set -e
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/vagrant/provision-config.sh"
MINION_IP=$4
MINION_ID=$5
DOCKER_BRIDGE=kbr0
OVS_SWITCH=obr0
GRE_TUNNEL_BASE=gre
BRIDGE_BASE=10.244
BRIDGE_ADDRESS=${BRIDGE_BASE}.${MINION_ID}.1
BRIDGE_NETWORK=${BRIDGE_ADDRESS}/24
BRIDGE_NETMASK=255.255.255.0
NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/
POST_NETWORK_SCRIPT_DIR=/kubernetes-vagrant
POST_NETWORK_SCRIPT=${POST_NETWORK_SCRIPT_DIR}/network_closure.sh
@@ -42,8 +31,8 @@ DEVICE=${DOCKER_BRIDGE}
ONBOOT=yes
TYPE=Bridge
BOOTPROTO=static
IPADDR=${BRIDGE_ADDRESS}
NETMASK=${BRIDGE_NETMASK}
IPADDR=${MINION_CONTAINER_ADDR}
NETMASK=${MINION_CONTAINER_NETMASK}
STP=yes
EOF
@@ -59,10 +48,8 @@ BRIDGE=${DOCKER_BRIDGE}
EOF
# now loop through all other minions and create persistent gre tunnels
MINION_IPS=$3
MINION_IP_ARRAY=(`echo ${MINION_IPS} | tr "," "\n"`)
GRE_NUM=0
for remote_ip in "${MINION_IP_ARRAY[@]}"
for remote_ip in "${MINION_IPS[@]}"
do
if [ "${remote_ip}" == "${MINION_IP}" ]; then
continue
@@ -82,8 +69,8 @@ EOF
done
# add ip route rules such that all pod traffic flows through docker bridge and consequently to the gre tunnels
cat <<EOF > /${NETWORK_CONF_PATH}route-${DOCKER_BRIDGE}
${BRIDGE_BASE}.0.0/16 dev ${DOCKER_BRIDGE} scope link src ${BRIDGE_ADDRESS}
cat <<EOF > ${NETWORK_CONF_PATH}route-${DOCKER_BRIDGE}
${CONTAINER_SUBNET} dev ${DOCKER_BRIDGE} scope link src ${MINION_CONTAINER_ADDR}
EOF
# generate the post-configure script to be called by salt as cmd.wait
@@ -92,7 +79,7 @@ cat <<EOF > ${POST_NETWORK_SCRIPT}
set -e
# Only do this operation once, otherwise, we get docker.servicee files output on disk, and the command line arguments get applied multiple times
# Only do this operation once, otherwise, we get docker.service files output on disk, and the command line arguments get applied multiple times
grep -q kbr0 /etc/sysconfig/docker || {
# Stop docker before making these updates
systemctl stop docker
@@ -106,7 +93,7 @@ grep -q kbr0 /etc/sysconfig/docker || {
# modify the docker service file such that it uses the kube docker bridge and not its own
#echo "OPTIONS=-b=kbr0 --iptables=false --selinux-enabled" > /etc/sysconfig/docker
echo "OPTIONS='-b=kbr0 --iptables=false --selinux-enabled'" >/etc/sysconfig/docker
echo "OPTIONS='-b=kbr0 --iptables=false --selinux-enabled ${DOCKER_OPTS}'" >/etc/sysconfig/docker
systemctl daemon-reload
systemctl restart docker.service

View File

@@ -20,15 +20,18 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/vagrant/${KUBE_CONFIG_FILE-"config-default.sh"}"
function detect-master () {
KUBE_MASTER_IP=$MASTER_IP
echo "KUBE_MASTER_IP: ${KUBE_MASTER_IP}"
}
# Get minion IP addresses and store in KUBE_MINION_IP_ADDRESSES[]
function detect-minions {
echo "Minions already detected"
KUBE_MINION_IP_ADDRESSES=("${MINION_IPS[@]}")
}
# Verify prereqs on host machine
# Verify prereqs on host machine Also sets exports USING_KUBE_SCRIPTS=true so
# that our Vagrantfile doesn't error out.
function verify-prereqs {
for x in vagrant virtualbox; do
if ! which "$x" >/dev/null; then
@@ -36,37 +39,59 @@ function verify-prereqs {
exit 1
fi
done
export USING_KUBE_SCRIPTS=true
}
# Instantiate a kubernetes cluster
function kube-up {
get-password
vagrant up
local kube_cert=".kubecfg.vagrant.crt"
local kube_key=".kubecfg.vagrant.key"
local ca_cert=".kubernetes.vagrant.ca.crt"
(umask 077
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null
vagrant ssh master -- sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null
cat << EOF > ~/.kubernetes_vagrant_auth
{
"User": "$KUBE_USER",
"Password": "$KUBE_PASSWORD",
"CAFile": "$HOME/$ca_cert",
"CertFile": "$HOME/$kube_cert",
"KeyFile": "$HOME/$kube_key"
# Create a temp dir that'll be deleted at the end of this bash session.
#
# Vars set:
# KUBE_TEMP
function ensure-temp-dir {
if [[ -z ${KUBE_TEMP-} ]]; then
export KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
trap 'rm -rf "${KUBE_TEMP}"' EXIT
fi
}
EOF
chmod 0600 ~/.kubernetes_vagrant_auth "${HOME}/${kube_cert}" \
"${HOME}/${kube_key}" "${HOME}/${ca_cert}"
)
# Create a set of provision scripts for the master and each of the minions
function create-provision-scripts {
ensure-temp-dir
echo "Each machine instance has been created."
(
echo "#! /bin/bash"
echo "KUBE_ROOT=/vagrant"
echo "MASTER_NAME='${INSTANCE_PREFIX}-master'"
echo "MASTER_IP='${MASTER_IP}'"
echo "MINION_NAMES=(${MINION_NAMES[@]})"
echo "MINION_IPS=(${MINION_IPS[@]})"
echo "PORTAL_NET='${PORTAL_NET}'"
echo "MASTER_USER='${MASTER_USER}'"
echo "MASTER_PASSWD='${MASTER_PASSWD}'"
grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-master.sh"
) > "${KUBE_TEMP}/master-start.sh"
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
(
echo "#! /bin/bash"
echo "MASTER_NAME='${MASTER_NAME}'"
echo "MASTER_IP='${MASTER_IP}'"
echo "MINION_NAMES=(${MINION_NAMES[@]})"
echo "MINION_IPS=(${MINION_IPS[@]})"
echo "MINION_IP='${MINION_IPS[$i]}'"
echo "MINION_ID='$i'"
echo "MINION_CONTAINER_ADDR='${MINION_CONTAINER_ADDRS[$i]}'"
echo "MINION_CONTAINER_NETMASK='${MINION_CONTAINER_NETMASKS[$i]}'"
echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'"
echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS-}'"
grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-minion.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-network.sh"
) > "${KUBE_TEMP}/minion-start-${i}.sh"
done
}
function verify-cluster {
echo "Each machine instance has been created/updated."
echo " Now waiting for the Salt provisioning process to complete on each machine."
echo " This can take some time based on your network, disk, and cpu speed."
echo " It is possible for an error to occur during Salt provision of cluster and this could loop forever."
@@ -110,13 +135,13 @@ EOF
echo
echo "Waiting for each minion to be registered with cloud provider"
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
local machine="${MINION_NAMES[$i]}"
for (( i=0; i<${#MINION_IPS[@]}; i++)); do
local machine="${MINION_IPS[$i]}"
local count="0"
until [[ "$count" == "1" ]]; do
local minions
minions=$("${KUBE_ROOT}/cluster/kubecfg.sh" -template '{{range.items}}{{.id}}:{{end}}' list minions)
count=$(echo $minions | grep -c "${MINION_NAMES[i]}") || {
count=$(echo $minions | grep -c "${MINION_IPS[i]}") || {
printf "."
sleep 2
count="0"
@@ -124,17 +149,49 @@ EOF
done
done
echo
echo "Kubernetes cluster created."
echo
echo "Kubernetes cluster is running. The master is running at:"
echo
echo " https://${KUBE_MASTER_IP}"
echo " https://${MASTER_IP}"
echo
echo "The user name and password to use is located in ~/.kubernetes_vagrant_auth."
echo
}
# Instantiate a kubernetes cluster
function kube-up {
get-password
create-provision-scripts
vagrant up
local kube_cert=".kubecfg.vagrant.crt"
local kube_key=".kubecfg.vagrant.key"
local ca_cert=".kubernetes.vagrant.ca.crt"
(umask 077
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null
vagrant ssh master -- sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null
cat <<EOF >"${HOME}/.kubernetes_vagrant_auth"
{
"User": "$KUBE_USER",
"Password": "$KUBE_PASSWORD",
"CAFile": "$HOME/$ca_cert",
"CertFile": "$HOME/$kube_cert",
"KeyFile": "$HOME/$kube_key"
}
EOF
chmod 0600 ~/.kubernetes_vagrant_auth "${HOME}/${kube_cert}" \
"${HOME}/${kube_key}" "${HOME}/${ca_cert}"
)
verify-cluster
}
# Delete a kubernetes cluster
function kube-down {
vagrant destroy -f
@@ -142,6 +199,8 @@ function kube-down {
# Update a kubernetes cluster with latest source
function kube-push {
get-password
create-provision-scripts
vagrant provision
}