Azure/ubuntu/saltstack support re-instated

This first reverts commit 8e8437dad8.
Also resolves conflicts with docs on f334fc41
And resolves conflicts with https://github.com/kubernetes/kubernetes/pull/22231/commits
to make people switching between two different methods of setting up by
setting env variables.

Conflicts:
	cluster/get-kube.sh
	cluster/saltbase/salt/README.md
	cluster/saltbase/salt/kube-proxy/default
	cluster/saltbase/salt/top.sls
This commit is contained in:
Weixu Zhuang 2016-01-11 19:11:52 -08:00 committed by weixu
parent 076bf81b8b
commit 4523429b20
19 changed files with 1107 additions and 21 deletions

2
cluster/azure-legacy/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
_deployments
config-real.sh

View File

@ -0,0 +1,58 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
INSTANCE_PREFIX=kubernetes
AZ_LOCATION='West US'
TAG=testing
AZ_CS_PREFIX=kube
AZ_VNET=MyVnet
AZ_SUBNET=Subnet-1
AZ_IMAGE=b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04_1-LTS-amd64-server-20140927-en-us-30GB
AZ_CS="" # is set in azure/util.sh verify-prereqs
AZ_SSH_KEY=$HOME/.ssh/azure_rsa
AZ_SSH_CERT=$HOME/.ssh/azure.pem
NUM_MINIONS=4
MASTER_NAME="${INSTANCE_PREFIX}-master"
MASTER_TAG="${INSTANCE_PREFIX}-master"
MINION_TAG="${INSTANCE_PREFIX}-minion"
MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}}))
MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
MINION_SCOPES=""
SERVICE_CLUSTER_IP_RANGE="10.250.0.0/16" # formerly PORTAL_NET
# Optional: Install node logging
ENABLE_NODE_LOGGING=false
LOGGING_DESTINATION=elasticsearch # options: elasticsearch, gcp
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
ENABLE_CLUSTER_LOGGING=false
ELASTICSEARCH_LOGGING_REPLICAS=1
# Optional: Cluster monitoring to setup as part of the cluster bring up:
# none - No cluster monitoring setup
# influxdb - Heapster, InfluxDB, and Grafana
# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
# Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota

View File

@ -0,0 +1,58 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Retry a download until we get it.
#
# $1 is the URL to download
download-or-bust() {
local -r url="$1"
local -r file="${url##*/}"
rm -f "$file"
until [[ -e "${file}" ]]; do
curl --ipv4 -Lo "$file" --connect-timeout 20 --retry 6 --retry-delay 10 "$url"
md5sum "$file"
done
}
# Install salt from GCS. See README.md for instructions on how to update these
# debs.
#
# $1 If set to --master, also install the master
install-salt() {
apt-get update
mkdir -p /var/cache/salt-install
cd /var/cache/salt-install
TARS=(
libzmq3_3.2.3+dfsg-1~bpo70~dst+1_amd64.deb
python-zmq_13.1.0-1~bpo70~dst+1_amd64.deb
salt-common_2014.1.13+ds-1~bpo70+1_all.deb
salt-minion_2014.1.13+ds-1~bpo70+1_all.deb
)
if [[ ${1-} == '--master' ]]; then
TARS+=(salt-master_2014.1.13+ds-1~bpo70+1_all.deb)
fi
URL_BASE="https://storage.googleapis.com/kubernetes-release/salt"
for tar in "${TARS[@]}"; do
download-or-bust "${URL_BASE}/${tar}"
dpkg -i "${tar}"
done
# This will install any of the unmet dependencies from above.
apt-get install -f -y
}

View File

@ -0,0 +1,30 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Create the overlay files for the salt tree. We create these in a separate
# place so that we can blow away the rest of the salt configs on a kube-push and
# re-apply these.
mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
node_instance_prefix: $NODE_INSTANCE_PREFIX
service_cluster_ip_range: $SERVICE_CLUSTER_IP_RANGE
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
EOF
mkdir -p /srv/salt-overlay/salt/nginx
echo $MASTER_HTPASSWD > /srv/salt-overlay/salt/nginx/htpasswd

View File

@ -0,0 +1,35 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Download and install release
# This script assumes that the environment variable MASTER_RELEASE_TAR contains
# the release tar to download and unpack. It is meant to be pushed to the
# master and run.
echo "Downloading binary release tar ($SERVER_BINARY_TAR_URL)"
download-or-bust "$SERVER_BINARY_TAR_URL"
echo "Downloading binary release tar ($SALT_TAR_URL)"
download-or-bust "$SALT_TAR_URL"
echo "Unpacking Salt tree"
rm -rf kubernetes
tar xzf "${SALT_TAR_URL##*/}"
echo "Running release install script"
sudo kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR_URL##*/}"

View File

@ -0,0 +1,68 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Prepopulate the name of the Master
mkdir -p /etc/salt/minion.d
echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
roles:
- kubernetes-master
cloud: azure
EOF
# Auto accept all keys from minions that try to join
mkdir -p /etc/salt/master.d
cat <<EOF >/etc/salt/master.d/auto-accept.conf
auto_accept: True
EOF
cat <<EOF >/etc/salt/master.d/reactor.conf
# React to new minions starting by running highstate on them.
reactor:
- 'salt/minion/*/start':
- /srv/reactor/highstate-new.sls
EOF
mkdir -p /srv/salt/nginx
echo $MASTER_HTPASSWD > /srv/salt/nginx/htpasswd
mkdir -p /etc/openvpn
umask=$(umask)
umask 0066
echo "$CA_CRT" > /etc/openvpn/ca.crt
echo "$SERVER_CRT" > /etc/openvpn/server.crt
echo "$SERVER_KEY" > /etc/openvpn/server.key
umask $umask
cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
log_level: debug
log_level_logfile: debug
EOF
cat <<EOF >/etc/salt/master.d/log-level-debug.conf
log_level: debug
log_level_logfile: debug
EOF
install-salt --master
# Wait a few minutes and trigger another Salt run to better recover from
# any transient errors.
echo "Sleeping 180"
sleep 180
salt-call state.highstate || true

View File

@ -0,0 +1,57 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
mkdir -p /etc/openvpn
umask=$(umask)
umask 0066
echo "$CA_CRT" > /etc/openvpn/ca.crt
echo "$CLIENT_CRT" > /etc/openvpn/client.crt
echo "$CLIENT_KEY" > /etc/openvpn/client.key
umask $umask
# Prepopulate the name of the Master
mkdir -p /etc/salt/minion.d
echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf
cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
log_level: debug
log_level_logfile: debug
EOF
hostnamef=$(uname -n)
apt-get install -y ipcalc
netmask=$(ipcalc $MINION_IP_RANGE | grep Netmask | awk '{ print $2 }')
network=$(ipcalc $MINION_IP_RANGE | grep Address | awk '{ print $2 }')
cbrstring="$network $netmask"
# Our minions will have a pool role to distinguish them from the master.
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
roles:
- kubernetes-pool
cbr-cidr: $MINION_IP_RANGE
cloud: azure
hostnamef: $hostnamef
cbr-string: $cbrstring
EOF
install-salt
# Wait a few minutes and trigger another Salt run to better recover from
# any transient errors.
echo "Sleeping 180"
sleep 180
salt-call state.highstate || true

View File

@ -0,0 +1,501 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for the local config.
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
# config-default.sh.
set -e
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/azure-legacy/${KUBE_CONFIG_FILE-"config-default.sh"}"
source "${KUBE_ROOT}/cluster/common.sh"
function azure_call {
local -a params=()
local param
# the '... in "$@"' is implicit on a for, so doesn't need to be stated.
for param; do
params+=("${param}")
done
local rc=0
local stderr
local count=0
while [[ count -lt 10 ]]; do
stderr=$(azure "${params[@]}" 2>&1 >&3) && break
rc=$?
if [[ "${stderr}" != *"getaddrinfo ENOTFOUND"* ]]; then
break
fi
count=$(($count + 1))
done 3>&1
if [[ "${rc}" -ne 0 ]]; then
echo "${stderr}" >&2
return "${rc}"
fi
}
function json_val () {
python -c 'import json,sys;obj=json.load(sys.stdin);print obj'$1'';
}
# Verify prereqs
function verify-prereqs {
if [[ -z "$(which azure)" ]]; then
echo "Couldn't find azure in PATH"
echo " please install with 'npm install azure-cli'"
exit 1
fi
if [[ -z "$(azure_call account list | grep true)" ]]; then
echo "Default azure account not set"
echo " please set with 'azure account set'"
exit 1
fi
account=$(azure_call account list | grep true)
if which md5 > /dev/null 2>&1; then
AZ_HSH=$(md5 -q -s "$account")
else
AZ_HSH=$(echo -n "$account" | md5sum)
fi
AZ_HSH=${AZ_HSH:0:7}
AZ_STG=kube$AZ_HSH
echo "==> AZ_STG: $AZ_STG"
AZ_CS="$AZ_CS_PREFIX-$AZ_HSH"
echo "==> AZ_CS: $AZ_CS"
CONTAINER=kube-$TAG
echo "==> CONTAINER: $CONTAINER"
}
# Create a temp dir that'll be deleted at the end of this bash session.
#
# Vars set:
# KUBE_TEMP
function ensure-temp-dir {
if [[ -z ${KUBE_TEMP-} ]]; then
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
trap 'rm -rf "${KUBE_TEMP}"' EXIT
fi
}
# Take the local tar files and upload them to Azure Storage. They will then be
# downloaded by the master as part of the start up script for the master.
#
# Assumed vars:
# SERVER_BINARY_TAR
# SALT_TAR
# Vars set:
# SERVER_BINARY_TAR_URL
# SALT_TAR_URL
function upload-server-tars() {
SERVER_BINARY_TAR_URL=
SALT_TAR_URL=
echo "==> SERVER_BINARY_TAR: $SERVER_BINARY_TAR"
echo "==> SALT_TAR: $SALT_TAR"
echo "+++ Staging server tars to Azure Storage: $AZ_STG"
local server_binary_url="${SERVER_BINARY_TAR##*/}"
local salt_url="${SALT_TAR##*/}"
SERVER_BINARY_TAR_URL="https://${AZ_STG}.blob.core.windows.net/$CONTAINER/$server_binary_url"
SALT_TAR_URL="https://${AZ_STG}.blob.core.windows.net/$CONTAINER/$salt_url"
echo "==> SERVER_BINARY_TAR_URL: $SERVER_BINARY_TAR_URL"
echo "==> SALT_TAR_URL: $SALT_TAR_URL"
echo "--> Checking storage exists..."
if [[ -z "$(azure_call storage account show $AZ_STG 2>/dev/null | \
grep data)" ]]; then
echo "--> Creating storage..."
azure_call storage account create -l "$AZ_LOCATION" $AZ_STG --type LRS
fi
echo "--> Getting storage key..."
stg_key=$(azure_call storage account keys list $AZ_STG --json | \
json_val '["primaryKey"]')
echo "--> Checking storage container exists..."
if [[ -z "$(azure_call storage container show -a $AZ_STG -k "$stg_key" \
$CONTAINER 2>/dev/null | grep data)" ]]; then
echo "--> Creating storage container..."
azure_call storage container create \
-a $AZ_STG \
-k "$stg_key" \
-p Blob \
$CONTAINER
fi
echo "--> Checking server binary exists in the container..."
if [[ -n "$(azure_call storage blob show -a $AZ_STG -k "$stg_key" \
$CONTAINER $server_binary_url 2>/dev/null | grep data)" ]]; then
echo "--> Deleting server binary in the container..."
azure_call storage blob delete \
-a $AZ_STG \
-k "$stg_key" \
$CONTAINER \
$server_binary_url
fi
echo "--> Uploading server binary to the container..."
azure_call storage blob upload \
-a $AZ_STG \
-k "$stg_key" \
$SERVER_BINARY_TAR \
$CONTAINER \
$server_binary_url
echo "--> Checking salt data exists in the container..."
if [[ -n "$(azure_call storage blob show -a $AZ_STG -k "$stg_key" \
$CONTAINER $salt_url 2>/dev/null | grep data)" ]]; then
echo "--> Deleting salt data in the container..."
azure_call storage blob delete \
-a $AZ_STG \
-k "$stg_key" \
$CONTAINER \
$salt_url
fi
echo "--> Uploading salt data to the container..."
azure_call storage blob upload \
-a $AZ_STG \
-k "$stg_key" \
$SALT_TAR \
$CONTAINER \
$salt_url
}
# Detect the information about the minions
#
# Assumed vars:
# MINION_NAMES
# ZONE
# Vars set:
#
function detect-minions () {
if [[ -z "$AZ_CS" ]]; then
verify-prereqs-local
fi
ssh_ports=($(eval echo "2200{1..$NUM_MINIONS}"))
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
MINION_NAMES[$i]=$(ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net hostname -f)
done
}
# Detect the IP for the master
#
# Assumed vars:
# MASTER_NAME
# ZONE
# Vars set:
# KUBE_MASTER
# KUBE_MASTER_IP
function detect-master () {
if [[ -z "$AZ_CS" ]]; then
verify-prereqs-local
fi
KUBE_MASTER=${MASTER_NAME}
KUBE_MASTER_IP="${AZ_CS}.cloudapp.net"
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)"
}
# Instantiate a kubernetes cluster
#
# Assumed vars
# KUBE_ROOT
# <Various vars set in config file>
function kube-up {
# Make sure we have the tar files staged on Azure Storage
find-release-tars
upload-server-tars
ensure-temp-dir
gen-kube-basicauth
python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \
-b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD"
local htpasswd
htpasswd=$(cat "${KUBE_TEMP}/htpasswd")
# Generate openvpn certs
echo "--> Generating openvpn certs"
echo 01 > ${KUBE_TEMP}/ca.srl
openssl genrsa -out ${KUBE_TEMP}/ca.key
openssl req -new -x509 -days 1095 \
-key ${KUBE_TEMP}/ca.key \
-out ${KUBE_TEMP}/ca.crt \
-subj "/CN=openvpn-ca"
openssl genrsa -out ${KUBE_TEMP}/server.key
openssl req -new \
-key ${KUBE_TEMP}/server.key \
-out ${KUBE_TEMP}/server.csr \
-subj "/CN=server"
openssl x509 -req -days 1095 \
-in ${KUBE_TEMP}/server.csr \
-CA ${KUBE_TEMP}/ca.crt \
-CAkey ${KUBE_TEMP}/ca.key \
-CAserial ${KUBE_TEMP}/ca.srl \
-out ${KUBE_TEMP}/server.crt
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
openssl genrsa -out ${KUBE_TEMP}/${MINION_NAMES[$i]}.key
openssl req -new \
-key ${KUBE_TEMP}/${MINION_NAMES[$i]}.key \
-out ${KUBE_TEMP}/${MINION_NAMES[$i]}.csr \
-subj "/CN=${MINION_NAMES[$i]}"
openssl x509 -req -days 1095 \
-in ${KUBE_TEMP}/${MINION_NAMES[$i]}.csr \
-CA ${KUBE_TEMP}/ca.crt \
-CAkey ${KUBE_TEMP}/ca.key \
-CAserial ${KUBE_TEMP}/ca.srl \
-out ${KUBE_TEMP}/${MINION_NAMES[$i]}.crt
done
# Build up start up script for master
echo "--> Building up start up script for master"
(
echo "#!/bin/bash"
echo "CA_CRT=\"$(cat ${KUBE_TEMP}/ca.crt)\""
echo "SERVER_CRT=\"$(cat ${KUBE_TEMP}/server.crt)\""
echo "SERVER_KEY=\"$(cat ${KUBE_TEMP}/server.key)\""
echo "mkdir -p /var/cache/kubernetes-install"
echo "cd /var/cache/kubernetes-install"
echo "readonly MASTER_NAME='${MASTER_NAME}'"
echo "readonly INSTANCE_PREFIX='${INSTANCE_PREFIX}'"
echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-minion'"
echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'"
echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
echo "readonly ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'"
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/common.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/create-dynamic-salt-files.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/download-release.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/salt-master.sh"
) > "${KUBE_TEMP}/master-start.sh"
if [[ ! -f $AZ_SSH_KEY ]]; then
ssh-keygen -f $AZ_SSH_KEY -N ''
fi
if [[ ! -f $AZ_SSH_CERT ]]; then
openssl req -new -x509 -days 1095 -key $AZ_SSH_KEY -out $AZ_SSH_CERT \
-subj "/CN=azure-ssh-key"
fi
if [[ -z "$(azure_call network vnet show "$AZ_VNET" 2>/dev/null | grep data)" ]]; then
echo error create vnet $AZ_VNET with subnet $AZ_SUBNET
exit 1
fi
echo "--> Starting VM"
azure_call vm create \
-w "$AZ_VNET" \
-n $MASTER_NAME \
-l "$AZ_LOCATION" \
-t $AZ_SSH_CERT \
-e 22000 -P \
-d ${KUBE_TEMP}/master-start.sh \
-b $AZ_SUBNET \
$AZ_CS $AZ_IMAGE $USER
ssh_ports=($(eval echo "2200{1..$NUM_MINIONS}"))
#Build up start up script for minions
echo "--> Building up start up script for minions"
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
(
echo "#!/bin/bash"
echo "MASTER_NAME='${MASTER_NAME}'"
echo "CA_CRT=\"$(cat ${KUBE_TEMP}/ca.crt)\""
echo "CLIENT_CRT=\"$(cat ${KUBE_TEMP}/${MINION_NAMES[$i]}.crt)\""
echo "CLIENT_KEY=\"$(cat ${KUBE_TEMP}/${MINION_NAMES[$i]}.key)\""
echo "MINION_IP_RANGE='${MINION_IP_RANGES[$i]}'"
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/common.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/salt-minion.sh"
) > "${KUBE_TEMP}/minion-start-${i}.sh"
echo "--> Starting VM"
azure_call vm create \
-c -w "$AZ_VNET" \
-n ${MINION_NAMES[$i]} \
-l "$AZ_LOCATION" \
-t $AZ_SSH_CERT \
-e ${ssh_ports[$i]} -P \
-d ${KUBE_TEMP}/minion-start-${i}.sh \
-b $AZ_SUBNET \
$AZ_CS $AZ_IMAGE $USER
done
echo "--> Creating endpoint"
azure_call vm endpoint create $MASTER_NAME 443
detect-master > /dev/null
echo "==> KUBE_MASTER_IP: ${KUBE_MASTER_IP}"
echo "Waiting for cluster initialization."
echo
echo " This will continually check to see if the API for kubernetes is reachable."
echo " This might loop forever if there was some uncaught error during start"
echo " up."
echo
until curl --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" --max-time 5 \
--fail --output /dev/null --silent "https://${KUBE_MASTER_IP}/healthz"; do
printf "."
sleep 2
done
printf "\n"
echo "Kubernetes cluster created."
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
export CONTEXT="azure_${INSTANCE_PREFIX}"
# TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
# config file. Distribute the same way the htpasswd is done.
(umask 077
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null
create-kubeconfig
)
echo "Sanity checking cluster..."
echo
echo " This will continually check the minions to ensure docker is"
echo " installed. This is usually a good indicator that salt has"
echo " successfully provisioned. This might loop forever if there was"
echo " some uncaught error during start up."
echo
# Basic sanity checking
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
# Make sure docker is installed
echo "--> Making sure docker is installed on ${MINION_NAMES[$i]}."
until ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} \
$AZ_CS.cloudapp.net which docker > /dev/null 2>&1; do
printf "."
sleep 2
done
done
# ensures KUBECONFIG is set
get-kubeconfig-basicauth
echo
echo "Kubernetes cluster is running. The master is running at:"
echo
echo " https://${KUBE_MASTER_IP}"
echo
echo "The user name and password to use is located in ${KUBECONFIG}."
echo
}
# Delete a kubernetes cluster
function kube-down {
echo "Bringing down cluster"
set +e
azure_call vm delete $MASTER_NAME -b -q
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
azure_call vm delete ${MINION_NAMES[$i]} -b -q
done
wait
}
# Update a kubernetes cluster with latest source
#function kube-push {
# detect-project
# detect-master
# Make sure we have the tar files staged on Azure Storage
# find-release-tars
# upload-server-tars
# (
# echo "#! /bin/bash"
# echo "mkdir -p /var/cache/kubernetes-install"
# echo "cd /var/cache/kubernetes-install"
# echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'"
# echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
# grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh"
# grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/download-release.sh"
# echo "echo Executing configuration"
# echo "sudo salt '*' mine.update"
# echo "sudo salt --force-color '*' state.highstate"
# ) | gcutil ssh --project "$PROJECT" --zone "$ZONE" "$KUBE_MASTER" sudo bash
# get-kubeconfig-basicauth
# echo
# echo "Kubernetes cluster is running. The master is running at:"
# echo
# echo " https://${KUBE_MASTER_IP}"
# echo
# echo "The user name and password to use is located in ${KUBECONFIG:-$DEFAULT_KUBECONFIG}."
# echo
#}
# -----------------------------------------------------------------------------
# Cluster specific test helpers used from hack/e2e-test.sh
# Execute prior to running tests to build a release if required for env.
#
# Assumed Vars:
# KUBE_ROOT
function test-build-release {
# Make a release
"${KUBE_ROOT}/build/release.sh"
}
# SSH to a node by name ($1) and run a command ($2).
function ssh-to-node {
local node="$1"
local cmd="$2"
ssh --ssh_arg "-o LogLevel=quiet" "${node}" "${cmd}"
}
# Restart the kube-proxy on a node ($1)
function restart-kube-proxy {
ssh-to-node "$1" "sudo /etc/init.d/kube-proxy restart"
}
# Restart the kube-proxy on the master ($1)
function restart-apiserver {
ssh-to-node "$1" "sudo /etc/init.d/kube-apiserver restart"
}

View File

@ -30,6 +30,8 @@
# * export KUBERNETES_PROVIDER=aws; wget -q -O - https://get.k8s.io | bash
# Libvirt (with CoreOS as a guest operating system)
# * export KUBERNETES_PROVIDER=libvirt-coreos; wget -q -O - https://get.k8s.io | bash
# Microsoft Azure
# * export KUBERNETES_PROVIDER=azure; wget -q -O - https://get.k8s.io | bash
# Vagrant (local virtual machines)
# * export KUBERNETES_PROVIDER=vagrant; wget -q -O - https://get.k8s.io | bash
# VMWare VSphere

View File

@ -4,11 +4,11 @@ This is the root of the SaltStack configuration for Kubernetes. A high
level overview for the Kubernetes SaltStack configuration can be found [in the docs tree.](../../docs/admin/salt.md)
This SaltStack configuration currently applies to default
configurations for Debian-on-GCE, Fedora-on-Vagrant, and Ubuntu-on-AWS.
(That doesn't mean it can't be made to apply to an
configurations for Debian-on-GCE, Fedora-on-Vagrant, Ubuntu-on-AWS and
Ubuntu-on-Azure. (That doesn't mean it can't be made to apply to an
arbitrary configuration, but those are only the in-tree OS/IaaS
combinations supported today.) As you peruse the configuration, these
are shorthanded as `gce`, `vagrant`, `aws` in `grains.cloud`;
are shorthanded as `gce`, `vagrant`, `aws`, `azure` in `grains.cloud`;
the documentation in this tree uses this same shorthand for convenience.
See more:

View File

@ -8,24 +8,27 @@ and is only used for the [docker](docker/) config.)
Key: M = Config applies to master, n = config applies to nodes
Config | GCE | Vagrant | AWS |
----------------------------------------------------|-------|---------|-----|
[debian-auto-upgrades](debian-auto-upgrades/) | M n | M n | M n |
[docker](docker/) | M n | M n | M n |
[etcd](etcd/) | M | M | M |
[fluentd-es](fluentd-es/) (pillar conditional) | M n | M n | M n |
[fluentd-gcp](fluentd-gcp/) (pillar conditional) | M n | M n | M n |
[generate-cert](generate-cert/) | M | M | M |
[kube-addons](kube-addons/) | M | M | M |
[kube-apiserver](kube-apiserver/) | M | M | M |
[kube-controller-manager](kube-controller-manager/) | M | M | M |
[kube-proxy](kube-proxy/) | n | n | n |
[kube-scheduler](kube-scheduler/) | M | M | M |
[kubelet](kubelet/) | M n | M n | M n |
[logrotate](logrotate/) | M n | n | M n |
[supervisord](supervisor/) | M n | M n | M n |
[base](base.sls) | M n | M n | M n |
[kube-client-tools](kube-client-tools.sls) | M | M | M |
Config | GCE | Vagrant | AWS | Azure
----------------------------------------------------|-------|---------|-----|------
[debian-auto-upgrades](debian-auto-upgrades/) | M n | M n | M n | M n
[docker](docker/) | M n | M n | M n | M n
[etcd](etcd/) | M | M | M | M
[fluentd-es](fluentd-es/) (pillar conditional) | M n | M n | M n | M n
[fluentd-gcp](fluentd-gcp/) (pillar conditional) | M n | M n | M n | M n
[generate-cert](generate-cert/) | M | M | M | M
[kube-addons](kube-addons/) | M | M | M | M
[kube-apiserver](kube-apiserver/) | M | M | M | M
[kube-controller-manager](kube-controller-manager/) | M | M | M | M
[kube-proxy](kube-proxy/) | n | n | n | n
[kube-scheduler](kube-scheduler/) | M | M | M | M
[kubelet](kubelet/) | M n | M n | M n | M n
[logrotate](logrotate/) | M n | n | M n | M n
[supervisord](supervisor/) | M n | M n | M n | M n
[nginx](nginx/) | | | | M
[openvpn-client](openvpn-client/) | | | | n
[openvpn](openvpn/) | | | | M
[base](base.sls) | M n | M n | M n | M n
[kube-client-tools](kube-client-tools.sls) | M | M | M | M
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/saltbase/salt/README.md?pixel)]()

View File

@ -6,6 +6,9 @@
{% if grains.cloud == 'aws' %}
{% set cert_ip='_use_aws_external_ip_' %}
{% endif %}
{% if grains.cloud == 'azure' %}
{% set cert_ip='_use_azure_dns_name_' %}
{% endif %}
{% if grains.cloud == 'vsphere' or grains.cloud == 'photon-controller' %}
{% set cert_ip=grains.ip_interfaces.eth0[0] %}
{% endif %}

View File

@ -51,6 +51,11 @@ if [ "$cert_ip" == "_use_aws_external_ip_" ]; then
fi
fi
if [ "$cert_ip" == "_use_azure_dns_name_" ]; then
cert_ip=$(uname -n | awk -F. '{ print $2 }').cloudapp.net
use_cn=true
fi
sans="IP:${cert_ip}"
if [[ -n "${extra_sans}" ]]; then
sans="${sans},${extra_sans}"

View File

@ -0,0 +1,35 @@
{% set daemon_args = "$DAEMON_ARGS" -%}
{% if grains['os_family'] == 'RedHat' -%}
{% set daemon_args = "" -%}
{% endif -%}
{# TODO(azure-maintainer): add support for distributing kubeconfig with token to kube-proxy #}
{# so it can use https #}
{% if grains['cloud'] is defined and grains['cloud'] == 'azure' -%}
{% set api_servers = "--master=http://" + ips[0][0] -%}
{% set api_servers_with_port = api_servers + ":7080" -%}
{% set kubeconfig = "" -%}
{% else -%}
{% set kubeconfig = "--kubeconfig=/var/lib/kube-proxy/kubeconfig" -%}
{% if grains.api_servers is defined -%}
{% set api_servers = "--master=https://" + grains.api_servers -%}
{% else -%}
{% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%}
{% set api_servers = "--master=https://" + ips[0][0] -%}
{% endif -%}
# TODO: remove nginx for other cloud providers.
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant' ] %}
{% set api_servers_with_port = api_servers -%}
{% else -%}
{% set api_servers_with_port = api_servers + ":6443" -%}
{% endif -%}
{% endif -%}
{% set test_args = "" -%}
{% if pillar['kubeproxy_test_args'] is defined -%}
{% set test_args=pillar['kubeproxy_test_args'] %}
{% endif -%}
# test_args has to be kept at the end, so they'll overwrite any prior configuration
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{kubeconfig}} {{pillar['log_level']}} {{test_args}}"

View File

@ -0,0 +1,53 @@
# Specify that we are a client and that we
# will be pulling certain config file directives
# from the server.
client
# Use the same setting as you are using on
# the server.
# On most systems, the VPN will not function
# unless you partially or fully disable
# the firewall for the TUN/TAP interface.
dev tun
# Are we connecting to a TCP or
# UDP server? Use the same setting as
# on the server.
proto udp
# The hostname/IP and port of the server.
# You can have multiple remote entries
# to load balance between the servers.
remote {{ salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').keys()[0] }} 1194
# Keep trying indefinitely to resolve the
# host name of the OpenVPN server. Very useful
# on machines which are not permanently connected
# to the internet such as laptops.
resolv-retry infinite
# Most clients don't need to bind to
# a specific local port number.
nobind
# Try to preserve some state across restarts.
persist-key
persist-tun
# SSL/TLS parms.
# See the server config file for more
# description. It's best to use
# a separate .crt/.key file pair
# for each client. A single ca
# file can be used for all clients.
ca /etc/openvpn/ca.crt
cert /etc/openvpn/client.crt
key /etc/openvpn/client.key
# Enable compression on the VPN link.
# Don't enable this unless it is also
# enabled in the server config file.
comp-lzo
# Set log file verbosity.
verb 3

View File

@ -0,0 +1,16 @@
/etc/openvpn/client.conf:
file.managed:
- source: salt://openvpn-client/client.conf
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: True
openvpn:
pkg:
- latest
service.running:
- enable: True
- watch:
- file: /etc/openvpn/client.conf

View File

@ -0,0 +1,31 @@
/etc/openvpn/server.conf:
file.managed:
- source: salt://openvpn/server.conf
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: True
{% for (minion, grains) in salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').items() %}
/etc/openvpn/ccd/{{ minion }}:
file.managed:
- contents: "iroute {{ grains['cbr-string'] }}\n"
- user: root
- group: root
- mode: 644
- makedirs: True
{% endfor %}
openssl dhparam -out /etc/openvpn/dh1024.pem 1024:
cmd.run:
- creates: /etc/openvpn/dh1024.pem
- unless: file /etc/openvpn/dh1024.pem
openvpn:
pkg:
- latest
service.running:
- enable: True
- watch:
- file: /etc/openvpn/server.conf

View File

@ -0,0 +1,123 @@
# Which TCP/UDP port should OpenVPN listen on?
# If you want to run multiple OpenVPN instances
# on the same machine, use a different port
# number for each one. You will need to
# open up this port on your firewall.
port 1194
# TCP or UDP server?
proto udp
# "dev tun" will create a routed IP tunnel,
# "dev tap" will create an ethernet tunnel.
# Use "dev tap0" if you are ethernet bridging
# and have precreated a tap0 virtual interface
# and bridged it with your ethernet interface.
# If you want to control access policies
# over the VPN, you must create firewall
# rules for the the TUN/TAP interface.
# On non-Windows systems, you can give
# an explicit unit number, such as tun0.
# On Windows, use "dev-node" for this.
# On most systems, the VPN will not function
# unless you partially or fully disable
# the firewall for the TUN/TAP interface.
dev tun
# SSL/TLS root certificate (ca), certificate
# (cert), and private key (key). Each client
# and the server must have their own cert and
# key file. The server and all clients will
# use the same ca file.
#
# See the "easy-rsa" directory for a series
# of scripts for generating RSA certificates
# and private keys. Remember to use
# a unique Common Name for the server
# and each of the client certificates.
#
# Any X509 key management system can be used.
# OpenVPN can also use a PKCS #12 formatted key file
# (see "pkcs12" directive in man page).
ca /etc/openvpn/ca.crt
cert /etc/openvpn/server.crt
key /etc/openvpn/server.key # This file should be kept secret
# Diffie hellman parameters.
# Generate your own with:
# openssl dhparam -out dh1024.pem 1024
# Substitute 2048 for 1024 if you are using
# 2048 bit keys.
dh /etc/openvpn/dh1024.pem
# Configure server mode and supply a VPN subnet
# for OpenVPN to draw client addresses from.
# The server will take 10.8.0.1 for itself,
# the rest will be made available to clients.
# Each client will be able to reach the server
# on 10.8.0.1. Comment this line out if you are
# ethernet bridging. See the man page for more info.
server 10.8.0.0 255.255.255.0
# Maintain a record of client <-> virtual IP address
# associations in this file. If OpenVPN goes down or
# is restarted, reconnecting clients can be assigned
# the same virtual IP address from the pool that was
# previously assigned.
ifconfig-pool-persist ipp.txt
# To assign specific IP addresses to specific
# clients or if a connecting client has a private
# subnet behind it that should also have VPN access,
# use the subdirectory "ccd" for client-specific
# configuration files (see man page for more info).
client-config-dir /etc/openvpn/ccd
{% for minion in salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values() %}
push "route {{ minion['cbr-string'] }}"
route {{ minion['cbr-string'] }}
{% endfor %}
# Uncomment this directive to allow different
# clients to be able to "see" each other.
# By default, clients will only see the server.
# To force clients to only see the server, you
# will also need to appropriately firewall the
# server's TUN/TAP interface.
client-to-client
# The keepalive directive causes ping-like
# messages to be sent back and forth over
# the link so that each side knows when
# the other side has gone down.
# Ping every 10 seconds, assume that remote
# peer is down if no ping received during
# a 120 second time period.
keepalive 10 120
# Enable compression on the VPN link.
# If you enable it here, you must also
# enable it in the client config file.
comp-lzo
# The persist options will try to avoid
# accessing certain resources on restart
# that may no longer be accessible because
# of the privilege downgrade.
persist-key
persist-tun
# Output a short status file showing
# current connections, truncated
# and rewritten every minute.
status openvpn-status.log
# Set the appropriate level of log
# file verbosity.
#
# 0 is silent, except for fatal errors
# 4 is reasonable for general usage
# 5 and 6 can help to debug connection problems
# 9 is extremely verbose
verb 3

View File

@ -19,6 +19,9 @@ base:
- cni
{% elif pillar.get('network_provider', '').lower() == 'cni' %}
- cni
{% endif %}
{% if grains['cloud'] is defined and grains['cloud'] == 'azure' %}
- openvpn-client
{% endif %}
- helpers
- kube-client-tools
@ -78,6 +81,9 @@ base:
- logrotate
{% endif %}
- kube-addons
{% if grains['cloud'] is defined and grains['cloud'] == 'azure' %}
- openvpn
{% endif %}
{% if grains['cloud'] is defined and grains['cloud'] in [ 'vagrant', 'gce', 'aws', 'vsphere', 'photon-controller', 'openstack'] %}
- docker
- kubelet