Merge pull request #21207 from WeixuZhuang/azure-push

Automatic merge from submit-queue

Enable setting up Kubernetes cluster in Ubuntu on Azure

Implement basic cloud provider functionality to deploy Kubernetes on
Azure.  SaltStack is used to deploy Kubernetes on top of Ubuntu 
virtual machines.  OpenVpn provides network connectivity.  For
kubelet authentication, we use basic authentication (username and 
password).  The scripts use the legacy Azure Service Management APIs.
  
We have set up a nightly test job in our Jenkins server for federated
testing to run the e2e test suite on Azure.  With the cloud provider
scripts in this commit, 14 e2e test cases pass in this environment.
We plan to implement additional Azure functionality to support more
test cases.

<!-- Reviewable:start -->
---
This change is [<img src="http://reviewable.k8s.io/review_button.svg" height="35" align="absmiddle" alt="Reviewable"/>](http://reviewable.k8s.io/reviews/kubernetes/kubernetes/21207)
<!-- Reviewable:end -->
This commit is contained in:
k8s-merge-robot 2016-06-27 11:11:45 -07:00 committed by GitHub
commit 95a3737305
30 changed files with 1595 additions and 33 deletions

2
cluster/azure-legacy/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
_deployments
config-real.sh

View File

@ -0,0 +1,60 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
INSTANCE_PREFIX=kubernetes
AZ_LOCATION='West US'
TAG=testing
AZ_CS_PREFIX=kube
AZ_VNET=${AZ_VNET:-MyVnet}
AZ_SUBNET=${AZ_SUBNET:-Subnet-1}
AZ_IMAGE=b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04_1-LTS-amd64-server-20140927-en-us-30GB
AZ_CS="" # is set in azure-legacy/util.sh verify-prereqs
AZ_SSH_KEY=$HOME/.ssh/azure_rsa
AZ_SSH_CERT=$HOME/.ssh/azure.pem
NUM_MINIONS=${NUM_MINIONS:-4}
MASTER_SIZE='Medium'
MINION_SIZE='Medium'
MASTER_NAME="${INSTANCE_PREFIX}-master"
MASTER_TAG="${INSTANCE_PREFIX}-master"
MINION_TAG="${INSTANCE_PREFIX}-minion"
MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}}))
MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
MINION_SCOPES=""
SERVICE_CLUSTER_IP_RANGE="10.244.244.0/16" # formerly PORTAL_NET
# Optional: Install node logging
ENABLE_NODE_LOGGING=false
LOGGING_DESTINATION=elasticsearch # options: elasticsearch, gcp
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
ENABLE_CLUSTER_LOGGING=false
ELASTICSEARCH_LOGGING_REPLICAS=1
# Optional: Cluster monitoring to setup as part of the cluster bring up:
# none - No cluster monitoring setup
# influxdb - Heapster, InfluxDB, and Grafana
# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
# Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota

View File

@ -0,0 +1,58 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Retry a download until we get it.
#
# $1 is the URL to download
download-or-bust() {
local -r url="$1"
local -r file="${url##*/}"
rm -f "$file"
until [[ -e "${file}" ]]; do
curl --ipv4 -Lo "$file" --connect-timeout 20 --retry 6 --retry-delay 10 "$url"
md5sum "$file"
done
}
# Install salt from GCS. See README.md for instructions on how to update these
# debs.
#
# $1 If set to --master, also install the master
install-salt() {
apt-get update
mkdir -p /var/cache/salt-install
cd /var/cache/salt-install
TARS=(
libzmq3_3.2.3+dfsg-1~bpo70~dst+1_amd64.deb
python-zmq_13.1.0-1~bpo70~dst+1_amd64.deb
salt-common_2014.1.13+ds-1~bpo70+1_all.deb
salt-minion_2014.1.13+ds-1~bpo70+1_all.deb
)
if [[ ${1-} == '--master' ]]; then
TARS+=(salt-master_2014.1.13+ds-1~bpo70+1_all.deb)
fi
URL_BASE="https://storage.googleapis.com/kubernetes-release/salt"
for tar in "${TARS[@]}"; do
download-or-bust "${URL_BASE}/${tar}"
dpkg -i "${tar}"
done
# This will install any of the unmet dependencies from above.
apt-get install -f -y
}

View File

@ -0,0 +1,30 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Create the overlay files for the salt tree. We create these in a separate
# place so that we can blow away the rest of the salt configs on a kube-push and
# re-apply these.
mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
node_instance_prefix: $NODE_INSTANCE_PREFIX
service_cluster_ip_range: $SERVICE_CLUSTER_IP_RANGE
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
EOF
mkdir -p /srv/salt-overlay/salt/nginx
echo $MASTER_HTPASSWD > /srv/salt-overlay/salt/nginx/htpasswd

View File

@ -0,0 +1,77 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Create the kube config file for kubelet and kube-proxy in minions.
# password and username required
function create-salt-kubelet-auth() {
local -r kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig"
mkdir -p /srv/salt-overlay/salt/kubelet
(umask 077;
cat > "${kubelet_kubeconfig_file}" <<EOF
apiVersion: v1
clusters:
- cluster:
insecure-skip-tls-verify: true
server: https://${KUBE_MASTER_IP}
name: azure_kubernetes
contexts:
- context:
cluster: azure_kubernetes
user: kubelet
name: azure_kubernetes
current-context: azure_kubernetes
kind: Config
preferences: {}
users:
- name: kubelet
user:
password: ${KUBE_PASSWORD}
username: ${KUBE_USER}
EOF
)
}
function create-salt-kube-proxy-auth() {
local -r kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig"
mkdir -p /srv/salt-overlay/salt/kube-proxy
(umask 077;
cat > "${kubelet_kubeconfig_file}" <<EOF
apiVersion: v1
clusters:
- cluster:
insecure-skip-tls-verify: true
server: https://${KUBE_MASTER_IP}
name: azure_kubernetes
contexts:
- context:
cluster: azure_kubernetes
user: kube-proxy
name: azure_kubernetes
current-context: azure_kubernetes
kind: Config
preferences: {}
users:
- name: kube-proxy
user:
password: ${KUBE_PASSWORD}
username: ${KUBE_USER}
EOF
)
}
create-salt-kubelet-auth
create-salt-kube-proxy-auth

View File

@ -0,0 +1,35 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Download and install release
# This script assumes that the environment variable MASTER_RELEASE_TAR contains
# the release tar to download and unpack. It is meant to be pushed to the
# master and run.
echo "Downloading binary release tar ($SERVER_BINARY_TAR_URL)"
download-or-bust "$SERVER_BINARY_TAR_URL"
echo "Downloading binary release tar ($SALT_TAR_URL)"
download-or-bust "$SALT_TAR_URL"
echo "Unpacking Salt tree"
rm -rf kubernetes
tar xzf "${SALT_TAR_URL##*/}"
echo "Running release install script"
sudo kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR_URL##*/}"

View File

@ -0,0 +1,92 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Prepopulate the name of the Master
mkdir -p /etc/salt/minion.d
echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
roles:
- kubernetes-master
cloud: azure-legacy
EOF
# Helper that sets a salt grain in grains.conf, if the upper-cased key is a non-empty env
function env_to_salt {
local key=$1
local env_key=`echo $key | tr '[:lower:]' '[:upper:]'`
local value=${!env_key}
if [[ -n "${value}" ]]; then
# Note this is yaml, so indentation matters
cat <<EOF >>/etc/salt/minion.d/grains.conf
${key}: '$(echo "${value}" | sed -e "s/'/''/g")'
EOF
fi
}
env_to_salt docker_opts
env_to_salt docker_root
env_to_salt kubelet_root
env_to_salt master_extra_sans
env_to_salt runtime_config
# Auto accept all keys from minions that try to join
mkdir -p /etc/salt/master.d
cat <<EOF >/etc/salt/master.d/auto-accept.conf
auto_accept: True
EOF
cat <<EOF >/etc/salt/master.d/reactor.conf
# React to new minions starting by running highstate on them.
reactor:
- 'salt/minion/*/start':
- /srv/reactor/highstate-new.sls
EOF
mkdir -p /srv/salt/nginx
echo $MASTER_HTPASSWD > /srv/salt/nginx/htpasswd
mkdir -p /etc/openvpn
umask=$(umask)
umask 0066
echo "$CA_CRT" > /etc/openvpn/ca.crt
echo "$SERVER_CRT" > /etc/openvpn/server.crt
echo "$SERVER_KEY" > /etc/openvpn/server.key
umask $umask
cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
log_level: debug
log_level_logfile: debug
EOF
cat <<EOF >/etc/salt/master.d/log-level-debug.conf
log_level: debug
log_level_logfile: debug
EOF
echo "Sleep 150 to wait minion to be up"
sleep 150
install-salt --master
# Wait a few minutes and trigger another Salt run to better recover from
# any transient errors.
echo "Sleeping 180"
sleep 180
salt-call state.highstate || true

View File

@ -0,0 +1,75 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
mkdir -p /etc/openvpn
umask=$(umask)
umask 0066
echo "$CA_CRT" > /etc/openvpn/ca.crt
echo "$CLIENT_CRT" > /etc/openvpn/client.crt
echo "$CLIENT_KEY" > /etc/openvpn/client.key
umask $umask
# Prepopulate the name of the Master
mkdir -p /etc/salt/minion.d
echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf
cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
log_level: debug
log_level_logfile: debug
EOF
hostnamef=$(uname -n)
apt-get install -y ipcalc
netmask=$(ipcalc $MINION_IP_RANGE | grep Netmask | awk '{ print $2 }')
network=$(ipcalc $MINION_IP_RANGE | grep Address | awk '{ print $2 }')
cbrstring="$network $netmask"
# Our minions will have a pool role to distinguish them from the master.
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
roles:
- kubernetes-pool
cbr-cidr: $MINION_IP_RANGE
cloud: azure-legacy
hostnamef: $hostnamef
cbr-string: $cbrstring
EOF
if [[ -n "${DOCKER_OPTS}" ]]; then
cat <<EOF >>/etc/salt/minion.d/grains.conf
docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")'
EOF
fi
if [[ -n "${DOCKER_ROOT}" ]]; then
cat <<EOF >>/etc/salt/minion.d/grains.conf
docker_root: '$(echo "$DOCKER_ROOT" | sed -e "s/'/''/g")'
EOF
fi
if [[ -n "${KUBELET_ROOT}" ]]; then
cat <<EOF >>/etc/salt/minion.d/grains.conf
kubelet_root: '$(echo "$KUBELET_ROOT" | sed -e "s/'/''/g")'
EOF
fi
install-salt
# Wait a few minutes and trigger another Salt run to better recover from
# any transient errors.
echo "Sleeping 180"
sleep 180
salt-call state.highstate || true

View File

@ -0,0 +1,551 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for the local config.
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
# config-default.sh.
set -e
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/azure-legacy/${KUBE_CONFIG_FILE-"config-default.sh"}"
source "${KUBE_ROOT}/cluster/common.sh"
function prepare-e2e() {
# (e2e script runs detect-project, I don't think we need to anything)
# Note: we can't print anything here, or else the test tools will break with the extra output
return
}
function azure_call {
local -a params=()
local param
# the '... in "$@"' is implicit on a for, so doesn't need to be stated.
for param; do
params+=("${param}")
done
local rc=0
local stderr
local count=0
while [[ count -lt 10 ]]; do
stderr=$(azure "${params[@]}" 2>&1 >&3) && break
rc=$?
if [[ "${stderr}" != *"getaddrinfo ENOTFOUND"* ]]; then
break
fi
count=$(($count + 1))
done 3>&1
if [[ "${rc}" -ne 0 ]]; then
echo "${stderr}" >&2
return "${rc}"
fi
}
function json_val () {
python -c 'import json,sys;obj=json.load(sys.stdin);print obj'$1'';
}
# Verify prereqs
function verify-prereqs {
if [[ -z "$(which azure)" ]]; then
echo "Couldn't find azure in PATH"
echo " please install with 'npm install azure-cli'"
exit 1
fi
if [[ -z "$(azure_call account list | grep true)" ]]; then
echo "Default azure account not set"
echo " please set with 'azure account set'"
exit 1
fi
account=$(azure_call account list | grep true)
if which md5 > /dev/null 2>&1; then
AZ_HSH=$(md5 -q -s "$account")
else
AZ_HSH=$(echo -n "$account" | md5sum)
fi
AZ_HSH=${AZ_HSH:0:7}
AZ_STG=kube$AZ_HSH
echo "==> AZ_STG: $AZ_STG"
AZ_CS="$AZ_CS_PREFIX-$AZ_HSH"
echo "==> AZ_CS: $AZ_CS"
CONTAINER=kube-$TAG
echo "==> CONTAINER: $CONTAINER"
}
# Create a temp dir that'll be deleted at the end of this bash session.
#
# Vars set:
# KUBE_TEMP
function ensure-temp-dir {
if [[ -z ${KUBE_TEMP-} ]]; then
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
trap 'rm -rf "${KUBE_TEMP}"' EXIT
fi
}
# Take the local tar files and upload them to Azure Storage. They will then be
# downloaded by the master as part of the start up script for the master.
#
# Assumed vars:
# SERVER_BINARY_TAR
# SALT_TAR
# Vars set:
# SERVER_BINARY_TAR_URL
# SALT_TAR_URL
function upload-server-tars() {
SERVER_BINARY_TAR_URL=
SALT_TAR_URL=
echo "==> SERVER_BINARY_TAR: $SERVER_BINARY_TAR"
echo "==> SALT_TAR: $SALT_TAR"
echo "+++ Staging server tars to Azure Storage: $AZ_STG"
local server_binary_url="${SERVER_BINARY_TAR##*/}"
local salt_url="${SALT_TAR##*/}"
SERVER_BINARY_TAR_URL="https://${AZ_STG}.blob.core.windows.net/$CONTAINER/$server_binary_url"
SALT_TAR_URL="https://${AZ_STG}.blob.core.windows.net/$CONTAINER/$salt_url"
echo "==> SERVER_BINARY_TAR_URL: $SERVER_BINARY_TAR_URL"
echo "==> SALT_TAR_URL: $SALT_TAR_URL"
echo "--> Checking storage exists..."
if [[ -z "$(azure_call storage account show $AZ_STG 2>/dev/null | \
grep data)" ]]; then
echo "--> Creating storage..."
azure_call storage account create -l "$AZ_LOCATION" $AZ_STG --type LRS
fi
echo "--> Getting storage key..."
stg_key=$(azure_call storage account keys list $AZ_STG --json | \
json_val '["primaryKey"]')
echo "--> Checking storage container exists..."
if [[ -z "$(azure_call storage container show -a $AZ_STG -k "$stg_key" \
$CONTAINER 2>/dev/null | grep data)" ]]; then
echo "--> Creating storage container..."
azure_call storage container create \
-a $AZ_STG \
-k "$stg_key" \
-p Blob \
$CONTAINER
fi
echo "--> Checking server binary exists in the container..."
if [[ -n "$(azure_call storage blob show -a $AZ_STG -k "$stg_key" \
$CONTAINER $server_binary_url 2>/dev/null | grep data)" ]]; then
echo "--> Deleting server binary in the container..."
azure_call storage blob delete \
-a $AZ_STG \
-k "$stg_key" \
$CONTAINER \
$server_binary_url
fi
echo "--> Uploading server binary to the container..."
azure_call storage blob upload \
-a $AZ_STG \
-k "$stg_key" \
$SERVER_BINARY_TAR \
$CONTAINER \
$server_binary_url
echo "--> Checking salt data exists in the container..."
if [[ -n "$(azure_call storage blob show -a $AZ_STG -k "$stg_key" \
$CONTAINER $salt_url 2>/dev/null | grep data)" ]]; then
echo "--> Deleting salt data in the container..."
azure_call storage blob delete \
-a $AZ_STG \
-k "$stg_key" \
$CONTAINER \
$salt_url
fi
echo "--> Uploading salt data to the container..."
azure_call storage blob upload \
-a $AZ_STG \
-k "$stg_key" \
$SALT_TAR \
$CONTAINER \
$salt_url
}
# Detect the information about the minions
#
# Assumed vars:
# MINION_NAMES
# ZONE
# Vars set:
#
function detect-minions () {
if [[ -z "$AZ_CS" ]]; then
verify-prereqs-local
fi
ssh_ports=($(eval echo "2200{1..$NUM_MINIONS}"))
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
MINION_NAMES[$i]=$(ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net hostname -f)
done
}
# Detect the IP for the master
#
# Assumed vars:
# MASTER_NAME
# ZONE
# Vars set:
# KUBE_MASTER
# KUBE_MASTER_IP
function detect-master () {
if [[ -z "$AZ_CS" ]]; then
verify-prereqs-local
fi
KUBE_MASTER=${MASTER_NAME}
KUBE_MASTER_IP="${AZ_CS}.cloudapp.net"
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)"
}
# Instantiate a kubernetes cluster
#
# Assumed vars
# KUBE_ROOT
# <Various vars set in config file>
function kube-up {
# Make sure we have the tar files staged on Azure Storage
find-release-tars
upload-server-tars
ensure-temp-dir
gen-kube-basicauth
python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \
-b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD"
local htpasswd
htpasswd=$(cat "${KUBE_TEMP}/htpasswd")
# Generate openvpn certs
echo "--> Generating openvpn certs"
echo 01 > ${KUBE_TEMP}/ca.srl
openssl genrsa -out ${KUBE_TEMP}/ca.key
openssl req -new -x509 -days 1095 \
-key ${KUBE_TEMP}/ca.key \
-out ${KUBE_TEMP}/ca.crt \
-subj "/CN=openvpn-ca"
openssl genrsa -out ${KUBE_TEMP}/server.key
openssl req -new \
-key ${KUBE_TEMP}/server.key \
-out ${KUBE_TEMP}/server.csr \
-subj "/CN=server"
openssl x509 -req -days 1095 \
-in ${KUBE_TEMP}/server.csr \
-CA ${KUBE_TEMP}/ca.crt \
-CAkey ${KUBE_TEMP}/ca.key \
-CAserial ${KUBE_TEMP}/ca.srl \
-out ${KUBE_TEMP}/server.crt
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
openssl genrsa -out ${KUBE_TEMP}/${MINION_NAMES[$i]}.key
openssl req -new \
-key ${KUBE_TEMP}/${MINION_NAMES[$i]}.key \
-out ${KUBE_TEMP}/${MINION_NAMES[$i]}.csr \
-subj "/CN=${MINION_NAMES[$i]}"
openssl x509 -req -days 1095 \
-in ${KUBE_TEMP}/${MINION_NAMES[$i]}.csr \
-CA ${KUBE_TEMP}/ca.crt \
-CAkey ${KUBE_TEMP}/ca.key \
-CAserial ${KUBE_TEMP}/ca.srl \
-out ${KUBE_TEMP}/${MINION_NAMES[$i]}.crt
done
KUBE_MASTER_IP="${AZ_CS}.cloudapp.net"
# Build up start up script for master
echo "--> Building up start up script for master"
(
echo "#!/bin/bash"
echo "CA_CRT=\"$(cat ${KUBE_TEMP}/ca.crt)\""
echo "SERVER_CRT=\"$(cat ${KUBE_TEMP}/server.crt)\""
echo "SERVER_KEY=\"$(cat ${KUBE_TEMP}/server.key)\""
echo "mkdir -p /var/cache/kubernetes-install"
echo "cd /var/cache/kubernetes-install"
echo "readonly MASTER_NAME='${MASTER_NAME}'"
echo "readonly INSTANCE_PREFIX='${INSTANCE_PREFIX}'"
echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-minion'"
echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'"
echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
echo "readonly ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'"
echo "readonly KUBE_USER='${KUBE_USER}'"
echo "readonly KUBE_PASSWORD='${KUBE_PASSWORD}'"
echo "readonly KUBE_MASTER_IP='${KUBE_MASTER_IP}'"
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/common.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/create-dynamic-salt-files.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/create-kubeconfig.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/download-release.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/salt-master.sh"
) > "${KUBE_TEMP}/master-start.sh"
if [[ ! -f $AZ_SSH_KEY ]]; then
ssh-keygen -f $AZ_SSH_KEY -N ''
fi
if [[ ! -f $AZ_SSH_CERT ]]; then
openssl req -new -x509 -days 1095 -key $AZ_SSH_KEY -out $AZ_SSH_CERT \
-subj "/CN=azure-ssh-key"
fi
if [[ -z "$(azure_call network vnet show "$AZ_VNET" 2>/dev/null | grep data)" ]]; then
echo error create vnet $AZ_VNET with subnet $AZ_SUBNET
exit 1
fi
echo "--> Starting VM"
azure_call vm create \
-z "$MASTER_SIZE" \
-w "$AZ_VNET" \
-n $MASTER_NAME \
-l "$AZ_LOCATION" \
-t $AZ_SSH_CERT \
-e 22000 -P \
-d ${KUBE_TEMP}/master-start.sh \
-b $AZ_SUBNET \
$AZ_CS $AZ_IMAGE $USER
ssh_ports=($(eval echo "2200{1..$NUM_MINIONS}"))
#Build up start up script for minions
echo "--> Building up start up script for minions"
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
(
echo "#!/bin/bash"
echo "MASTER_NAME='${MASTER_NAME}'"
echo "CA_CRT=\"$(cat ${KUBE_TEMP}/ca.crt)\""
echo "CLIENT_CRT=\"$(cat ${KUBE_TEMP}/${MINION_NAMES[$i]}.crt)\""
echo "CLIENT_KEY=\"$(cat ${KUBE_TEMP}/${MINION_NAMES[$i]}.key)\""
echo "MINION_IP_RANGE='${MINION_IP_RANGES[$i]}'"
echo "readonly KUBE_USER='${KUBE_USER}'"
echo "readonly KUBE_PASSWORD='${KUBE_PASSWORD}'"
echo "readonly KUBE_MASTER_IP='${KUBE_MASTER_IP}'"
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/common.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/create-kubeconfig.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/salt-minion.sh"
) > "${KUBE_TEMP}/minion-start-${i}.sh"
echo "--> Starting VM"
azure_call vm create \
-z "$MINION_SIZE" \
-c -w "$AZ_VNET" \
-n ${MINION_NAMES[$i]} \
-l "$AZ_LOCATION" \
-t $AZ_SSH_CERT \
-e ${ssh_ports[$i]} -P \
-d ${KUBE_TEMP}/minion-start-${i}.sh \
-b $AZ_SUBNET \
$AZ_CS $AZ_IMAGE $USER
done
echo "--> Creating endpoint"
azure_call vm endpoint create $MASTER_NAME 443
detect-master > /dev/null
echo "==> KUBE_MASTER_IP: ${KUBE_MASTER_IP}"
echo "Waiting for cluster initialization."
echo
echo " This will continually check to see if the API for kubernetes is reachable."
echo " This might loop forever if there was some uncaught error during start"
echo " up."
echo
until curl --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" --max-time 5 \
--fail --output /dev/null --silent "https://${KUBE_MASTER_IP}/healthz"; do
printf "."
sleep 2
done
printf "\n"
echo "Kubernetes cluster created."
export CONTEXT="azure_${INSTANCE_PREFIX}"
create-kubeconfig
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
# TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
# config file. Distribute the same way the htpasswd is done.
(umask 077
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null
)
echo "Sanity checking cluster..."
echo
echo " This will continually check the minions to ensure docker is"
echo " installed. This is usually a good indicator that salt has"
echo " successfully provisioned. This might loop forever if there was"
echo " some uncaught error during start up."
echo
# Basic sanity checking
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
# Make sure docker is installed
echo "--> Making sure docker is installed on ${MINION_NAMES[$i]}."
until ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} \
$AZ_CS.cloudapp.net which docker > /dev/null 2>&1; do
printf "."
sleep 2
done
done
sleep 60
KUBECONFIG_NAME="kubeconfig"
KUBECONFIG="${HOME}/.kube/config"
echo "Distributing kubeconfig for kubelet to master kubelet"
scp -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -P 22000 ${KUBECONFIG} \
$AZ_CS.cloudapp.net:${KUBECONFIG_NAME}
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
sudo cp ${KUBECONFIG_NAME} /var/lib/kubelet/${KUBECONFIG_NAME}
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
sudo service kubelet restart
echo "Distributing kubeconfig for kubelet to all minions"
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
scp -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -P ${ssh_ports[$i]} ${KUBECONFIG} \
$AZ_CS.cloudapp.net:${KUBECONFIG_NAME}
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net \
sudo cp ${KUBECONFIG_NAME} /var/lib/kubelet/${KUBECONFIG_NAME}
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net \
sudo cp ${KUBECONFIG_NAME} /var/lib/kube-proxy/${KUBECONFIG_NAME}
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net \
sudo service kubelet restart
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net \
sudo killall kube-proxy
done
# ensures KUBECONFIG is set
get-kubeconfig-basicauth
echo
echo "Kubernetes cluster is running. The master is running at:"
echo
echo " https://${KUBE_MASTER_IP}"
echo
echo "The user name and password to use is located in ${KUBECONFIG}."
echo
}
# Delete a kubernetes cluster
function kube-down {
echo "Bringing down cluster"
set +e
azure_call vm delete $MASTER_NAME -b -q
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
azure_call vm delete ${MINION_NAMES[$i]} -b -q
done
wait
}
# Update a kubernetes cluster with latest source
#function kube-push {
# detect-project
# detect-master
# Make sure we have the tar files staged on Azure Storage
# find-release-tars
# upload-server-tars
# (
# echo "#! /bin/bash"
# echo "mkdir -p /var/cache/kubernetes-install"
# echo "cd /var/cache/kubernetes-install"
# echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'"
# echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
# grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh"
# grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/download-release.sh"
# echo "echo Executing configuration"
# echo "sudo salt '*' mine.update"
# echo "sudo salt --force-color '*' state.highstate"
# ) | gcutil ssh --project "$PROJECT" --zone "$ZONE" "$KUBE_MASTER" sudo bash
# get-kubeconfig-basicauth
# echo
# echo "Kubernetes cluster is running. The master is running at:"
# echo
# echo " https://${KUBE_MASTER_IP}"
# echo
# echo "The user name and password to use is located in ${KUBECONFIG:-$DEFAULT_KUBECONFIG}."
# echo
#}
# -----------------------------------------------------------------------------
# Cluster specific test helpers used from hack/e2e-test.sh
# Execute prior to running tests to build a release if required for env.
#
# Assumed Vars:
# KUBE_ROOT
function test-build-release {
# Make a release
"${KUBE_ROOT}/build/release.sh"
}
# SSH to a node by name ($1) and run a command ($2).
function ssh-to-node {
local node="$1"
local cmd="$2"
ssh --ssh_arg "-o LogLevel=quiet" "${node}" "${cmd}"
}
# Restart the kube-proxy on a node ($1)
function restart-kube-proxy {
ssh-to-node "$1" "sudo /etc/init.d/kube-proxy restart"
}
# Restart the kube-proxy on the master ($1)
function restart-apiserver {
ssh-to-node "$1" "sudo /etc/init.d/kube-apiserver restart"
}
function test-setup {
"${KUBE_ROOT}/cluster/kube-up.sh"
}
function test-teardown {
"${KUBE_ROOT}/cluster/kube-down.sh"
}

View File

@ -30,6 +30,8 @@
# * export KUBERNETES_PROVIDER=aws; wget -q -O - https://get.k8s.io | bash
# Libvirt (with CoreOS as a guest operating system)
# * export KUBERNETES_PROVIDER=libvirt-coreos; wget -q -O - https://get.k8s.io | bash
# Microsoft Azure
# * export KUBERNETES_PROVIDER=azure-legacy; wget -q -O - https://get.k8s.io | bash
# Vagrant (local virtual machines)
# * export KUBERNETES_PROVIDER=vagrant; wget -q -O - https://get.k8s.io | bash
# VMWare VSphere

View File

@ -4,11 +4,11 @@ This is the root of the SaltStack configuration for Kubernetes. A high
level overview for the Kubernetes SaltStack configuration can be found [in the docs tree.](../../docs/admin/salt.md)
This SaltStack configuration currently applies to default
configurations for Debian-on-GCE, Fedora-on-Vagrant, and Ubuntu-on-AWS.
(That doesn't mean it can't be made to apply to an
configurations for Debian-on-GCE, Fedora-on-Vagrant, Ubuntu-on-AWS and
Ubuntu-on-Azure. (That doesn't mean it can't be made to apply to an
arbitrary configuration, but those are only the in-tree OS/IaaS
combinations supported today.) As you peruse the configuration, these
are shorthanded as `gce`, `vagrant`, `aws` in `grains.cloud`;
are shorthanded as `gce`, `vagrant`, `aws`, `azure-legacy` in `grains.cloud`;
the documentation in this tree uses this same shorthand for convenience.
See more:

View File

@ -8,24 +8,27 @@ and is only used for the [docker](docker/) config.)
Key: M = Config applies to master, n = config applies to nodes
Config | GCE | Vagrant | AWS |
----------------------------------------------------|-------|---------|-----|
[debian-auto-upgrades](debian-auto-upgrades/) | M n | M n | M n |
[docker](docker/) | M n | M n | M n |
[etcd](etcd/) | M | M | M |
[fluentd-es](fluentd-es/) (pillar conditional) | M n | M n | M n |
[fluentd-gcp](fluentd-gcp/) (pillar conditional) | M n | M n | M n |
[generate-cert](generate-cert/) | M | M | M |
[kube-addons](kube-addons/) | M | M | M |
[kube-apiserver](kube-apiserver/) | M | M | M |
[kube-controller-manager](kube-controller-manager/) | M | M | M |
[kube-proxy](kube-proxy/) | n | n | n |
[kube-scheduler](kube-scheduler/) | M | M | M |
[kubelet](kubelet/) | M n | M n | M n |
[logrotate](logrotate/) | M n | n | M n |
[supervisord](supervisor/) | M n | M n | M n |
[base](base.sls) | M n | M n | M n |
[kube-client-tools](kube-client-tools.sls) | M | M | M |
Config | GCE | Vagrant | AWS | Azure
----------------------------------------------------|-------|---------|-----|------
[debian-auto-upgrades](debian-auto-upgrades/) | M n | M n | M n | M n
[docker](docker/) | M n | M n | M n | M n
[etcd](etcd/) | M | M | M | M
[fluentd-es](fluentd-es/) (pillar conditional) | M n | M n | M n | M n
[fluentd-gcp](fluentd-gcp/) (pillar conditional) | M n | M n | M n | M n
[generate-cert](generate-cert/) | M | M | M | M
[kube-addons](kube-addons/) | M | M | M | M
[kube-apiserver](kube-apiserver/) | M | M | M | M
[kube-controller-manager](kube-controller-manager/) | M | M | M | M
[kube-proxy](kube-proxy/) | n | n | n | n
[kube-scheduler](kube-scheduler/) | M | M | M | M
[kubelet](kubelet/) | M n | M n | M n | M n
[logrotate](logrotate/) | M n | n | M n | M n
[supervisord](supervisor/) | M n | M n | M n | M n
[nginx](nginx/) | | | | M
[openvpn-client](openvpn-client/) | | | | n
[openvpn](openvpn/) | | | | M
[base](base.sls) | M n | M n | M n | M n
[kube-client-tools](kube-client-tools.sls) | M | M | M | M
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/saltbase/salt/README.md?pixel)]()

View File

@ -47,6 +47,93 @@ docker:
- pkg: docker-io
{% endif %}
{% elif grains.cloud is defined and grains.cloud == 'azure-legacy' %}
{% if pillar.get('is_systemd') %}
{{ pillar.get('systemd_system_path') }}/docker.service:
file.managed:
- source: salt://docker/docker.service
- template: jinja
- user: root
- group: root
- mode: 644
- defaults:
environment_file: {{ environment_file }}
# The docker service.running block below doesn't work reliably
# Instead we run our script which e.g. does a systemd daemon-reload
# But we keep the service block below, so it can be used by dependencies
# TODO: Fix this
fix-service-docker:
cmd.wait:
- name: /opt/kubernetes/helpers/services bounce docker
- watch:
- file: {{ pillar.get('systemd_system_path') }}/docker.service
- file: {{ environment_file }}
{% endif %}
{{ environment_file }}:
file.managed:
- source: salt://docker/docker-defaults
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: true
- require:
- pkg: docker-engine
apt-key:
pkgrepo.managed:
- humanname: Dotdeb
- name: deb https://apt.dockerproject.org/repo ubuntu-trusty main
- dist: ubuntu-trusty
- file: /etc/apt/sources.list.d/docker.list
- keyid: 58118E89F3A912897C070ADBF76221572C52609D
- keyserver: hkp://p80.pool.sks-keyservers.net:80
lxc-docker:
pkg:
- purged
docker-io:
pkg:
- purged
cbr0:
network.managed:
- enabled: True
- type: bridge
{% if grains['roles'][0] == 'kubernetes-pool' %}
- proto: none
{% else %}
- proto: dhcp
{% endif %}
- ports: none
- bridge: cbr0
{% if grains['roles'][0] == 'kubernetes-pool' %}
- ipaddr: {{ grains['cbr-cidr'] }}
{% endif %}
- delay: 0
- bypassfirewall: True
- require_in:
- service: docker
docker-engine:
pkg:
- installed
- require:
- pkgrepo: 'apt-key'
docker:
service.running:
- enable: True
- require:
- file: {{ environment_file }}
- watch:
- file: {{ environment_file }}
{% elif grains.cloud is defined and grains.cloud in ['vsphere', 'photon-controller'] and grains.os == 'Debian' and grains.osrelease_info[0] >=8 %}
{% if pillar.get('is_systemd') %}
@ -294,6 +381,7 @@ docker-upgrade:
- name: /opt/kubernetes/helpers/pkg install-no-start {{ docker_pkg_name }} {{ override_docker_ver }} /var/cache/docker-install/{{ override_deb }}
- require:
- file: /var/cache/docker-install/{{ override_deb }}
{% endif %} # end override_docker_ver != ''
{% if pillar.get('is_systemd') %}

View File

@ -6,6 +6,9 @@
{% if grains.cloud == 'aws' %}
{% set cert_ip='_use_aws_external_ip_' %}
{% endif %}
{% if grains.cloud == 'azure-legacy' %}
{% set cert_ip='_use_azure_dns_name_' %}
{% endif %}
{% if grains.cloud == 'vsphere' or grains.cloud == 'photon-controller' %}
{% set cert_ip=grains.ip_interfaces.eth0[0] %}
{% endif %}

View File

@ -51,6 +51,11 @@ if [ "$cert_ip" == "_use_aws_external_ip_" ]; then
fi
fi
if [ "$cert_ip" == "_use_azure_dns_name_" ]; then
cert_ip=$(uname -n | awk -F. '{ print $2 }').cloudapp.net
use_cn=true
fi
sans="IP:${cert_ip}"
if [[ -n "${extra_sans}" ]]; then
sans="${sans},${extra_sans}"

View File

@ -14,7 +14,7 @@
{% set srv_sshproxy_path = "/srv/sshproxy" -%}
{% if grains.cloud is defined -%}
{% if grains.cloud not in ['vagrant', 'vsphere', 'photon-controller'] -%}
{% if grains.cloud not in ['vagrant', 'vsphere', 'photon-controller', 'azure-legacy'] -%}
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
{% endif -%}

View File

@ -36,7 +36,7 @@
{% set srv_kube_path = "/srv/kubernetes" -%}
{% if grains.cloud is defined -%}
{% if grains.cloud not in ['vagrant', 'vsphere', 'photon-controller'] -%}
{% if grains.cloud not in ['vagrant', 'vsphere', 'photon-controller', 'azure-legacy'] -%}
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
{% endif -%}
{% set service_account_key = "--service-account-private-key-file=/srv/kubernetes/server.key" -%}
@ -59,7 +59,7 @@
{% set root_ca_file = "" -%}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack'] %}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack', 'azure-legacy'] %}
{% set root_ca_file = "--root-ca-file=/srv/kubernetes/ca.crt" -%}
{% endif -%}

View File

@ -24,7 +24,7 @@ function load-docker-images() {
if which docker 1>/dev/null 2>&1; then
timeout 30 docker load -i /srv/salt/kube-bins/kube-apiserver.tar 1>/dev/null 2>&1
timeout 120 docker load -i /srv/salt/kube-bins/kube-apiserver.tar 1>/dev/null 2>&1
rc=$?
if [[ $rc == 0 ]]; then
let loadedImageFlags="$loadedImageFlags|1"
@ -32,7 +32,7 @@ function load-docker-images() {
restart_docker=true
fi
timeout 30 docker load -i /srv/salt/kube-bins/kube-scheduler.tar 1>/dev/null 2>&1
timeout 120 docker load -i /srv/salt/kube-bins/kube-scheduler.tar 1>/dev/null 2>&1
rc=$?
if [[ $rc == 0 ]]; then
let loadedImageFlags="$loadedImageFlags|2"
@ -40,7 +40,7 @@ function load-docker-images() {
restart_docker=true
fi
timeout 30 docker load -i /srv/salt/kube-bins/kube-controller-manager.tar 1>/dev/null 2>&1
timeout 120 docker load -i /srv/salt/kube-bins/kube-controller-manager.tar 1>/dev/null 2>&1
rc=$?
if [[ $rc == 0 ]]; then
let loadedImageFlags="$loadedImageFlags|4"

View File

@ -22,7 +22,7 @@ while true; do
if which docker 1>/dev/null 2>&1; then
timeout 30 docker load -i /srv/salt/kube-bins/kube-proxy.tar 1>/dev/null 2>&1
timeout 120 docker load -i /srv/salt/kube-bins/kube-proxy.tar 1>/dev/null 2>&1
rc=$?
if [[ "${rc}" == 0 ]]; then
let loadedImageFlags="${loadedImageFlags}|1"

View File

@ -5,7 +5,7 @@
{% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%}
{% set api_servers = "--master=https://" + ips[0][0] -%}
{% endif -%}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack' ] %}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack', 'azure-legacy' ] %}
{% set api_servers_with_port = api_servers -%}
{% else -%}
{% set api_servers_with_port = api_servers + ":6443" -%}

View File

@ -16,7 +16,7 @@
{% endif -%}
# TODO: remove nginx for other cloud providers.
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack'] %}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack', 'azure-legacy'] %}
{% set api_servers_with_port = api_servers -%}
{% else -%}
{% set api_servers_with_port = api_servers + ":6443" -%}
@ -28,7 +28,7 @@
{% set reconcile_cidr_args = "" -%}
{% if grains['roles'][0] == 'kubernetes-master' -%}
{% if grains.cloud in ['aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack'] -%}
{% if grains.cloud in ['aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack', 'azure-legacy'] -%}
# Unless given a specific directive, disable registration for the kubelet
# running on the master.
@ -48,7 +48,7 @@
{% endif -%}
{% set cloud_provider = "" -%}
{% if grains.cloud is defined and grains.cloud not in ['vagrant', 'vsphere', 'photon-controller'] -%}
{% if grains.cloud is defined and grains.cloud not in ['vagrant', 'vsphere', 'photon-controller', 'azure-legacy'] -%}
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
{% endif -%}

View File

@ -0,0 +1,64 @@
nginx:
pkg:
- installed
/etc/nginx/nginx.conf:
file:
- managed
- source: salt://nginx/nginx.conf
- template: jinja
- user: root
- group: root
- mode: 644
/etc/nginx/sites-enabled/default:
file:
- managed
- makedirs: true
- source: salt://nginx/kubernetes-site
- user: root
- group: root
- mode: 644
/usr/share/nginx/htpasswd:
file:
- managed
- source: salt://nginx/htpasswd
- user: root
- group: root
- mode: 644
{% if grains.cloud is defined and grains.cloud in ['gce'] %}
/etc/kubernetes/manifests/nginx.json:
file:
- managed
- source: salt://nginx/nginx.json
- user: root
- group: root
- mode: 644
- require:
- file: /etc/nginx/nginx.conf
- file: /etc/nginx/sites-enabled/default
- file: /usr/share/nginx/htpasswd
- cmd: kubernetes-cert
#stop legacy nginx_service
stop_nginx-service:
service.dead:
- name: nginx
- enable: None
{% else %}
nginx-service:
service:
- running
- name: nginx
- watch:
- pkg: nginx
- file: /etc/nginx/nginx.conf
- file: /etc/nginx/sites-enabled/default
- file: /usr/share/nginx/htpasswd
- cmd: kubernetes-cert
{% endif %}

View File

@ -0,0 +1,66 @@
#server {
#listen 80; ## listen for ipv4; this line is default and implied
#listen [::]:80 default_server ipv6only=on; ## listen for ipv6
# root /usr/share/nginx/www;
# index index.html index.htm;
# Make site accessible from http://localhost/
# server_name localhost;
# location / {
# auth_basic "Restricted";
# auth_basic_user_file /usr/share/nginx/htpasswd;
# Proxy settings.
# proxy_pass http://localhost:8080/;
# proxy_connect_timeout 159s;
# proxy_send_timeout 600s;
# proxy_read_timeout 600s;
# proxy_buffer_size 64k;
# proxy_buffers 16 32k;
# proxy_busy_buffers_size 64k;
# proxy_temp_file_write_size 64k;
# }
#}
# HTTPS server
#
server {
listen 443;
server_name localhost;
root html;
index index.html index.htm;
ssl on;
ssl_certificate /srv/kubernetes/server.cert;
ssl_certificate_key /srv/kubernetes/server.key;
ssl_session_timeout 5m;
# don't use SSLv3 because of POODLE
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS;
ssl_prefer_server_ciphers on;
location / {
auth_basic "Restricted";
auth_basic_user_file /usr/share/nginx/htpasswd;
# Proxy settings
# disable buffering so that watch works
proxy_buffering off;
proxy_pass http://127.0.0.1:8080/;
proxy_connect_timeout 159s;
proxy_send_timeout 600s;
proxy_read_timeout 600s;
# Disable retry
proxy_next_upstream off;
# Support web sockets
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}

View File

@ -0,0 +1,61 @@
{% if grains['os_family'] == 'RedHat' %}
user nginx;
{% else %}
user www-data;
{% endif %}
worker_processes 4;
pid /var/run/nginx.pid;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}

View File

@ -0,0 +1,60 @@
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {"name":"nginx"},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "nginx",
"image": "gcr.io/google-containers/nginx:v1",
"resources": {
"limits": {
"cpu": "200m"
}
},
"command": [
"nginx",
"-g",
"daemon off;"
],
"ports":[
{ "name": "https",
"containerPort": 443,
"hostPort": 443}
],
"volumeMounts": [
{ "name": "nginx",
"mountPath": "/etc/nginx",
"readOnly": true},
{ "name": "k8s",
"mountPath": "/srv/kubernetes",
"readOnly": true},
{ "name": "logs",
"mountPath": "/var/log/nginx",
"readOnly": false},
{ "name": "passwd",
"mountPath": "/usr/share/nginx",
"readOnly": true}
]
}
],
"volumes":[
{ "name": "nginx",
"hostPath": {
"path": "/etc/nginx"}
},
{ "name": "k8s",
"hostPath": {
"path": "/srv/kubernetes"}
},
{ "name": "passwd",
"hostPath": {
"path": "/usr/share/nginx"}
},
{ "name": "logs",
"hostPath": {
"path": "/var/logs/nginx"}
}
]
}}

View File

@ -0,0 +1,53 @@
# Specify that we are a client and that we
# will be pulling certain config file directives
# from the server.
client
# Use the same setting as you are using on
# the server.
# On most systems, the VPN will not function
# unless you partially or fully disable
# the firewall for the TUN/TAP interface.
dev tun
# Are we connecting to a TCP or
# UDP server? Use the same setting as
# on the server.
proto udp
# The hostname/IP and port of the server.
# You can have multiple remote entries
# to load balance between the servers.
remote {{ salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').keys()[0] }} 1194
# Keep trying indefinitely to resolve the
# host name of the OpenVPN server. Very useful
# on machines which are not permanently connected
# to the internet such as laptops.
resolv-retry infinite
# Most clients don't need to bind to
# a specific local port number.
nobind
# Try to preserve some state across restarts.
persist-key
persist-tun
# SSL/TLS parms.
# See the server config file for more
# description. It's best to use
# a separate .crt/.key file pair
# for each client. A single ca
# file can be used for all clients.
ca /etc/openvpn/ca.crt
cert /etc/openvpn/client.crt
key /etc/openvpn/client.key
# Enable compression on the VPN link.
# Don't enable this unless it is also
# enabled in the server config file.
comp-lzo
# Set log file verbosity.
verb 3

View File

@ -0,0 +1,16 @@
/etc/openvpn/client.conf:
file.managed:
- source: salt://openvpn-client/client.conf
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: True
openvpn:
pkg:
- latest
service.running:
- enable: True
- watch:
- file: /etc/openvpn/client.conf

View File

@ -0,0 +1,31 @@
/etc/openvpn/server.conf:
file.managed:
- source: salt://openvpn/server.conf
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: True
{% for minion in salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values() %}
/etc/openvpn/ccd/{{ minion['hostnamef'] }}:
file.managed:
- contents: "iroute {{ minion['cbr-string'] }}\n"
- user: root
- group: root
- mode: 644
- makedirs: True
{% endfor %}
openssl dhparam -out /etc/openvpn/dh1024.pem 1024:
cmd.run:
- creates: /etc/openvpn/dh1024.pem
- unless: file /etc/openvpn/dh1024.pem
openvpn:
pkg:
- latest
service.running:
- enable: True
- watch:
- file: /etc/openvpn/server.conf

View File

@ -0,0 +1,123 @@
# Which TCP/UDP port should OpenVPN listen on?
# If you want to run multiple OpenVPN instances
# on the same machine, use a different port
# number for each one. You will need to
# open up this port on your firewall.
port 1194
# TCP or UDP server?
proto udp
# "dev tun" will create a routed IP tunnel,
# "dev tap" will create an ethernet tunnel.
# Use "dev tap0" if you are ethernet bridging
# and have precreated a tap0 virtual interface
# and bridged it with your ethernet interface.
# If you want to control access policies
# over the VPN, you must create firewall
# rules for the the TUN/TAP interface.
# On non-Windows systems, you can give
# an explicit unit number, such as tun0.
# On Windows, use "dev-node" for this.
# On most systems, the VPN will not function
# unless you partially or fully disable
# the firewall for the TUN/TAP interface.
dev tun
# SSL/TLS root certificate (ca), certificate
# (cert), and private key (key). Each client
# and the server must have their own cert and
# key file. The server and all clients will
# use the same ca file.
#
# See the "easy-rsa" directory for a series
# of scripts for generating RSA certificates
# and private keys. Remember to use
# a unique Common Name for the server
# and each of the client certificates.
#
# Any X509 key management system can be used.
# OpenVPN can also use a PKCS #12 formatted key file
# (see "pkcs12" directive in man page).
ca /etc/openvpn/ca.crt
cert /etc/openvpn/server.crt
key /etc/openvpn/server.key # This file should be kept secret
# Diffie hellman parameters.
# Generate your own with:
# openssl dhparam -out dh1024.pem 1024
# Substitute 2048 for 1024 if you are using
# 2048 bit keys.
dh /etc/openvpn/dh1024.pem
# Configure server mode and supply a VPN subnet
# for OpenVPN to draw client addresses from.
# The server will take 10.8.0.1 for itself,
# the rest will be made available to clients.
# Each client will be able to reach the server
# on 10.8.0.1. Comment this line out if you are
# ethernet bridging. See the man page for more info.
server 10.8.0.0 255.255.255.0
# Maintain a record of client <-> virtual IP address
# associations in this file. If OpenVPN goes down or
# is restarted, reconnecting clients can be assigned
# the same virtual IP address from the pool that was
# previously assigned.
ifconfig-pool-persist ipp.txt
# To assign specific IP addresses to specific
# clients or if a connecting client has a private
# subnet behind it that should also have VPN access,
# use the subdirectory "ccd" for client-specific
# configuration files (see man page for more info).
client-config-dir /etc/openvpn/ccd
{% for minion in salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values() %}
push "route {{ minion['cbr-string'] }}"
route {{ minion['cbr-string'] }}
{% endfor %}
# Uncomment this directive to allow different
# clients to be able to "see" each other.
# By default, clients will only see the server.
# To force clients to only see the server, you
# will also need to appropriately firewall the
# server's TUN/TAP interface.
client-to-client
# The keepalive directive causes ping-like
# messages to be sent back and forth over
# the link so that each side knows when
# the other side has gone down.
# Ping every 10 seconds, assume that remote
# peer is down if no ping received during
# a 120 second time period.
keepalive 10 120
# Enable compression on the VPN link.
# If you enable it here, you must also
# enable it in the client config file.
comp-lzo
# The persist options will try to avoid
# accessing certain resources on restart
# that may no longer be accessible because
# of the privilege downgrade.
persist-key
persist-tun
# Output a short status file showing
# current connections, truncated
# and rewritten every minute.
status openvpn-status.log
# Set the appropriate level of log
# file verbosity.
#
# 0 is silent, except for fatal errors
# 4 is reasonable for general usage
# 5 and 6 can help to debug connection problems
# 9 is extremely verbose
verb 3

View File

@ -22,6 +22,9 @@ base:
- cni
{% elif pillar.get('network_provider', '').lower() == 'cni' %}
- cni
{% endif %}
{% if grains['cloud'] is defined and grains['cloud'] == 'azure-legacy' %}
- openvpn-client
{% endif %}
- helpers
- kube-client-tools
@ -84,7 +87,11 @@ base:
- logrotate
{% endif %}
- kube-addons
{% if grains['cloud'] is defined and grains['cloud'] in [ 'vagrant', 'gce', 'aws', 'vsphere', 'photon-controller', 'openstack'] %}
{% if grains['cloud'] is defined and grains['cloud'] == 'azure-legacy' %}
- openvpn
- nginx
{% endif %}
{% if grains['cloud'] is defined and grains['cloud'] in [ 'vagrant', 'gce', 'aws', 'vsphere', 'photon-controller', 'openstack', 'azure-legacy'] %}
- docker
- kubelet
{% endif %}