mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Implement Azure cloud provider scripts
Implement basic cloud provider functionality to deploy Kubernetes on Azure. SaltStack is used to deploy Kubernetes on top of Ubuntu virtual machines. OpenVpn provides network connectivity. For kubelet authentication, we use basic authentication (username and password). The scripts use the legacy Azure Service Management APIs. We have set up a nightly test job in our Jenkins server for federated testing to run the e2e test suite on Azure. With the cloud provider scripts in this commit, 14 e2e test cases pass in this environment. We plan to implement additional Azure functionality to support more test cases.
This commit is contained in:
parent
4523429b20
commit
e35c1ccba2
@ -18,15 +18,17 @@ INSTANCE_PREFIX=kubernetes
|
||||
AZ_LOCATION='West US'
|
||||
TAG=testing
|
||||
AZ_CS_PREFIX=kube
|
||||
AZ_VNET=MyVnet
|
||||
AZ_SUBNET=Subnet-1
|
||||
AZ_VNET=${AZ_VNET:-MyVnet}
|
||||
AZ_SUBNET=${AZ_SUBNET:-Subnet-1}
|
||||
AZ_IMAGE=b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04_1-LTS-amd64-server-20140927-en-us-30GB
|
||||
AZ_CS="" # is set in azure/util.sh verify-prereqs
|
||||
AZ_CS="" # is set in azure-legacy/util.sh verify-prereqs
|
||||
|
||||
AZ_SSH_KEY=$HOME/.ssh/azure_rsa
|
||||
AZ_SSH_CERT=$HOME/.ssh/azure.pem
|
||||
|
||||
NUM_MINIONS=4
|
||||
NUM_MINIONS=${NUM_MINIONS:-4}
|
||||
MASTER_SIZE='Medium'
|
||||
MINION_SIZE='Medium'
|
||||
|
||||
MASTER_NAME="${INSTANCE_PREFIX}-master"
|
||||
MASTER_TAG="${INSTANCE_PREFIX}-master"
|
||||
@ -35,7 +37,7 @@ MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}}))
|
||||
MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
|
||||
MINION_SCOPES=""
|
||||
|
||||
SERVICE_CLUSTER_IP_RANGE="10.250.0.0/16" # formerly PORTAL_NET
|
||||
SERVICE_CLUSTER_IP_RANGE="10.244.244.0/16" # formerly PORTAL_NET
|
||||
|
||||
# Optional: Install node logging
|
||||
ENABLE_NODE_LOGGING=false
|
||||
|
77
cluster/azure-legacy/templates/create-kubeconfig.sh
Normal file
77
cluster/azure-legacy/templates/create-kubeconfig.sh
Normal file
@ -0,0 +1,77 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Create the kube config file for kubelet and kube-proxy in minions.
|
||||
# password and username required
|
||||
|
||||
function create-salt-kubelet-auth() {
|
||||
local -r kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig"
|
||||
mkdir -p /srv/salt-overlay/salt/kubelet
|
||||
(umask 077;
|
||||
cat > "${kubelet_kubeconfig_file}" <<EOF
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://${KUBE_MASTER_IP}
|
||||
name: azure_kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: azure_kubernetes
|
||||
user: kubelet
|
||||
name: azure_kubernetes
|
||||
current-context: azure_kubernetes
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
password: ${KUBE_PASSWORD}
|
||||
username: ${KUBE_USER}
|
||||
EOF
|
||||
)
|
||||
}
|
||||
|
||||
function create-salt-kube-proxy-auth() {
|
||||
local -r kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig"
|
||||
mkdir -p /srv/salt-overlay/salt/kube-proxy
|
||||
(umask 077;
|
||||
cat > "${kubelet_kubeconfig_file}" <<EOF
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://${KUBE_MASTER_IP}
|
||||
name: azure_kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: azure_kubernetes
|
||||
user: kube-proxy
|
||||
name: azure_kubernetes
|
||||
current-context: azure_kubernetes
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: kube-proxy
|
||||
user:
|
||||
password: ${KUBE_PASSWORD}
|
||||
username: ${KUBE_USER}
|
||||
EOF
|
||||
)
|
||||
}
|
||||
|
||||
create-salt-kubelet-auth
|
||||
create-salt-kube-proxy-auth
|
@ -22,9 +22,30 @@ cat <<EOF >/etc/salt/minion.d/grains.conf
|
||||
grains:
|
||||
roles:
|
||||
- kubernetes-master
|
||||
cloud: azure
|
||||
cloud: azure-legacy
|
||||
EOF
|
||||
|
||||
|
||||
# Helper that sets a salt grain in grains.conf, if the upper-cased key is a non-empty env
|
||||
function env_to_salt {
|
||||
local key=$1
|
||||
local env_key=`echo $key | tr '[:lower:]' '[:upper:]'`
|
||||
local value=${!env_key}
|
||||
if [[ -n "${value}" ]]; then
|
||||
# Note this is yaml, so indentation matters
|
||||
cat <<EOF >>/etc/salt/minion.d/grains.conf
|
||||
${key}: '$(echo "${value}" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
env_to_salt docker_opts
|
||||
env_to_salt docker_root
|
||||
env_to_salt kubelet_root
|
||||
env_to_salt master_extra_sans
|
||||
env_to_salt runtime_config
|
||||
|
||||
|
||||
# Auto accept all keys from minions that try to join
|
||||
mkdir -p /etc/salt/master.d
|
||||
cat <<EOF >/etc/salt/master.d/auto-accept.conf
|
||||
@ -59,6 +80,9 @@ log_level: debug
|
||||
log_level_logfile: debug
|
||||
EOF
|
||||
|
||||
echo "Sleep 150 to wait minion to be up"
|
||||
sleep 150
|
||||
|
||||
install-salt --master
|
||||
|
||||
# Wait a few minutes and trigger another Salt run to better recover from
|
||||
|
@ -43,11 +43,29 @@ grains:
|
||||
roles:
|
||||
- kubernetes-pool
|
||||
cbr-cidr: $MINION_IP_RANGE
|
||||
cloud: azure
|
||||
cloud: azure-legacy
|
||||
hostnamef: $hostnamef
|
||||
cbr-string: $cbrstring
|
||||
EOF
|
||||
|
||||
if [[ -n "${DOCKER_OPTS}" ]]; then
|
||||
cat <<EOF >>/etc/salt/minion.d/grains.conf
|
||||
docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
|
||||
if [[ -n "${DOCKER_ROOT}" ]]; then
|
||||
cat <<EOF >>/etc/salt/minion.d/grains.conf
|
||||
docker_root: '$(echo "$DOCKER_ROOT" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
|
||||
if [[ -n "${KUBELET_ROOT}" ]]; then
|
||||
cat <<EOF >>/etc/salt/minion.d/grains.conf
|
||||
kubelet_root: '$(echo "$KUBELET_ROOT" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
|
||||
install-salt
|
||||
|
||||
# Wait a few minutes and trigger another Salt run to better recover from
|
||||
|
@ -34,6 +34,12 @@ source "${KUBE_ROOT}/cluster/azure-legacy/${KUBE_CONFIG_FILE-"config-default.sh"
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
|
||||
|
||||
function prepare-e2e() {
|
||||
# (e2e script runs detect-project, I don't think we need to anything)
|
||||
# Note: we can't print anything here, or else the test tools will break with the extra output
|
||||
return
|
||||
}
|
||||
|
||||
function azure_call {
|
||||
local -a params=()
|
||||
local param
|
||||
@ -278,6 +284,8 @@ function kube-up {
|
||||
-out ${KUBE_TEMP}/${MINION_NAMES[$i]}.crt
|
||||
done
|
||||
|
||||
KUBE_MASTER_IP="${AZ_CS}.cloudapp.net"
|
||||
|
||||
# Build up start up script for master
|
||||
echo "--> Building up start up script for master"
|
||||
(
|
||||
@ -294,9 +302,13 @@ function kube-up {
|
||||
echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
|
||||
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
|
||||
echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
|
||||
echo "readonly ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'"
|
||||
echo "readonly ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'"
|
||||
echo "readonly KUBE_USER='${KUBE_USER}'"
|
||||
echo "readonly KUBE_PASSWORD='${KUBE_PASSWORD}'"
|
||||
echo "readonly KUBE_MASTER_IP='${KUBE_MASTER_IP}'"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/common.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/create-dynamic-salt-files.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/create-kubeconfig.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/download-release.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/salt-master.sh"
|
||||
) > "${KUBE_TEMP}/master-start.sh"
|
||||
@ -317,6 +329,7 @@ function kube-up {
|
||||
|
||||
echo "--> Starting VM"
|
||||
azure_call vm create \
|
||||
-z "$MASTER_SIZE" \
|
||||
-w "$AZ_VNET" \
|
||||
-n $MASTER_NAME \
|
||||
-l "$AZ_LOCATION" \
|
||||
@ -338,12 +351,17 @@ function kube-up {
|
||||
echo "CLIENT_CRT=\"$(cat ${KUBE_TEMP}/${MINION_NAMES[$i]}.crt)\""
|
||||
echo "CLIENT_KEY=\"$(cat ${KUBE_TEMP}/${MINION_NAMES[$i]}.key)\""
|
||||
echo "MINION_IP_RANGE='${MINION_IP_RANGES[$i]}'"
|
||||
echo "readonly KUBE_USER='${KUBE_USER}'"
|
||||
echo "readonly KUBE_PASSWORD='${KUBE_PASSWORD}'"
|
||||
echo "readonly KUBE_MASTER_IP='${KUBE_MASTER_IP}'"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/common.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/create-kubeconfig.sh"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/salt-minion.sh"
|
||||
) > "${KUBE_TEMP}/minion-start-${i}.sh"
|
||||
|
||||
echo "--> Starting VM"
|
||||
azure_call vm create \
|
||||
-z "$MINION_SIZE" \
|
||||
-c -w "$AZ_VNET" \
|
||||
-n ${MINION_NAMES[$i]} \
|
||||
-l "$AZ_LOCATION" \
|
||||
@ -377,10 +395,11 @@ function kube-up {
|
||||
printf "\n"
|
||||
echo "Kubernetes cluster created."
|
||||
|
||||
export CONTEXT="azure_${INSTANCE_PREFIX}"
|
||||
create-kubeconfig
|
||||
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
|
||||
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
|
||||
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
|
||||
export CONTEXT="azure_${INSTANCE_PREFIX}"
|
||||
|
||||
# TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
|
||||
# config file. Distribute the same way the htpasswd is done.
|
||||
@ -391,8 +410,6 @@ function kube-up {
|
||||
sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null
|
||||
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
|
||||
sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null
|
||||
|
||||
create-kubeconfig
|
||||
)
|
||||
|
||||
echo "Sanity checking cluster..."
|
||||
@ -413,6 +430,31 @@ function kube-up {
|
||||
done
|
||||
done
|
||||
|
||||
sleep 60
|
||||
KUBECONFIG_NAME="kubeconfig"
|
||||
KUBECONFIG="${HOME}/.kube/config"
|
||||
echo "Distributing kubeconfig for kubelet to master kubelet"
|
||||
scp -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -P 22000 ${KUBECONFIG} \
|
||||
$AZ_CS.cloudapp.net:${KUBECONFIG_NAME}
|
||||
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
|
||||
sudo cp ${KUBECONFIG_NAME} /var/lib/kubelet/${KUBECONFIG_NAME}
|
||||
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
|
||||
sudo service kubelet restart
|
||||
|
||||
echo "Distributing kubeconfig for kubelet to all minions"
|
||||
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
||||
scp -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -P ${ssh_ports[$i]} ${KUBECONFIG} \
|
||||
$AZ_CS.cloudapp.net:${KUBECONFIG_NAME}
|
||||
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net \
|
||||
sudo cp ${KUBECONFIG_NAME} /var/lib/kubelet/${KUBECONFIG_NAME}
|
||||
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net \
|
||||
sudo cp ${KUBECONFIG_NAME} /var/lib/kube-proxy/${KUBECONFIG_NAME}
|
||||
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net \
|
||||
sudo service kubelet restart
|
||||
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net \
|
||||
sudo killall kube-proxy
|
||||
done
|
||||
|
||||
# ensures KUBECONFIG is set
|
||||
get-kubeconfig-basicauth
|
||||
echo
|
||||
@ -499,3 +541,11 @@ function restart-kube-proxy {
|
||||
function restart-apiserver {
|
||||
ssh-to-node "$1" "sudo /etc/init.d/kube-apiserver restart"
|
||||
}
|
||||
|
||||
function test-setup {
|
||||
"${KUBE_ROOT}/cluster/kube-up.sh"
|
||||
}
|
||||
|
||||
function test-teardown {
|
||||
"${KUBE_ROOT}/cluster/kube-down.sh"
|
||||
}
|
||||
|
@ -31,7 +31,7 @@
|
||||
# Libvirt (with CoreOS as a guest operating system)
|
||||
# * export KUBERNETES_PROVIDER=libvirt-coreos; wget -q -O - https://get.k8s.io | bash
|
||||
# Microsoft Azure
|
||||
# * export KUBERNETES_PROVIDER=azure; wget -q -O - https://get.k8s.io | bash
|
||||
# * export KUBERNETES_PROVIDER=azure-legacy; wget -q -O - https://get.k8s.io | bash
|
||||
# Vagrant (local virtual machines)
|
||||
# * export KUBERNETES_PROVIDER=vagrant; wget -q -O - https://get.k8s.io | bash
|
||||
# VMWare VSphere
|
||||
|
@ -8,7 +8,7 @@ configurations for Debian-on-GCE, Fedora-on-Vagrant, Ubuntu-on-AWS and
|
||||
Ubuntu-on-Azure. (That doesn't mean it can't be made to apply to an
|
||||
arbitrary configuration, but those are only the in-tree OS/IaaS
|
||||
combinations supported today.) As you peruse the configuration, these
|
||||
are shorthanded as `gce`, `vagrant`, `aws`, `azure` in `grains.cloud`;
|
||||
are shorthanded as `gce`, `vagrant`, `aws`, `azure-legacy` in `grains.cloud`;
|
||||
the documentation in this tree uses this same shorthand for convenience.
|
||||
|
||||
See more:
|
||||
|
@ -47,6 +47,93 @@ docker:
|
||||
- pkg: docker-io
|
||||
|
||||
{% endif %}
|
||||
{% elif grains.cloud is defined and grains.cloud == 'azure-legacy' %}
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
|
||||
{{ pillar.get('systemd_system_path') }}/docker.service:
|
||||
file.managed:
|
||||
- source: salt://docker/docker.service
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- defaults:
|
||||
environment_file: {{ environment_file }}
|
||||
|
||||
# The docker service.running block below doesn't work reliably
|
||||
# Instead we run our script which e.g. does a systemd daemon-reload
|
||||
# But we keep the service block below, so it can be used by dependencies
|
||||
# TODO: Fix this
|
||||
fix-service-docker:
|
||||
cmd.wait:
|
||||
- name: /opt/kubernetes/helpers/services bounce docker
|
||||
- watch:
|
||||
- file: {{ pillar.get('systemd_system_path') }}/docker.service
|
||||
- file: {{ environment_file }}
|
||||
{% endif %}
|
||||
|
||||
{{ environment_file }}:
|
||||
file.managed:
|
||||
- source: salt://docker/docker-defaults
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- require:
|
||||
- pkg: docker-engine
|
||||
|
||||
apt-key:
|
||||
pkgrepo.managed:
|
||||
- humanname: Dotdeb
|
||||
- name: deb https://apt.dockerproject.org/repo ubuntu-trusty main
|
||||
- dist: ubuntu-trusty
|
||||
- file: /etc/apt/sources.list.d/docker.list
|
||||
- keyid: 58118E89F3A912897C070ADBF76221572C52609D
|
||||
- keyserver: hkp://p80.pool.sks-keyservers.net:80
|
||||
|
||||
lxc-docker:
|
||||
pkg:
|
||||
- purged
|
||||
|
||||
docker-io:
|
||||
pkg:
|
||||
- purged
|
||||
|
||||
cbr0:
|
||||
network.managed:
|
||||
- enabled: True
|
||||
- type: bridge
|
||||
{% if grains['roles'][0] == 'kubernetes-pool' %}
|
||||
- proto: none
|
||||
{% else %}
|
||||
- proto: dhcp
|
||||
{% endif %}
|
||||
- ports: none
|
||||
- bridge: cbr0
|
||||
{% if grains['roles'][0] == 'kubernetes-pool' %}
|
||||
- ipaddr: {{ grains['cbr-cidr'] }}
|
||||
{% endif %}
|
||||
- delay: 0
|
||||
- bypassfirewall: True
|
||||
- require_in:
|
||||
- service: docker
|
||||
|
||||
docker-engine:
|
||||
pkg:
|
||||
- installed
|
||||
- require:
|
||||
- pkgrepo: 'apt-key'
|
||||
|
||||
docker:
|
||||
service.running:
|
||||
- enable: True
|
||||
- require:
|
||||
- file: {{ environment_file }}
|
||||
- watch:
|
||||
- file: {{ environment_file }}
|
||||
|
||||
{% elif grains.cloud is defined and grains.cloud in ['vsphere', 'photon-controller'] and grains.os == 'Debian' and grains.osrelease_info[0] >=8 %}
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
@ -304,6 +391,7 @@ docker-upgrade:
|
||||
- name: /opt/kubernetes/helpers/pkg install-no-start {{ docker_pkg_name }} {{ override_docker_ver }} /var/cache/docker-install/{{ override_deb }}
|
||||
- require:
|
||||
- file: /var/cache/docker-install/{{ override_deb }}
|
||||
|
||||
{% endif %} # end override_docker_ver != ''
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
|
@ -6,7 +6,7 @@
|
||||
{% if grains.cloud == 'aws' %}
|
||||
{% set cert_ip='_use_aws_external_ip_' %}
|
||||
{% endif %}
|
||||
{% if grains.cloud == 'azure' %}
|
||||
{% if grains.cloud == 'azure-legacy' %}
|
||||
{% set cert_ip='_use_azure_dns_name_' %}
|
||||
{% endif %}
|
||||
{% if grains.cloud == 'vsphere' or grains.cloud == 'photon-controller' %}
|
||||
|
@ -14,7 +14,7 @@
|
||||
{% set srv_sshproxy_path = "/srv/sshproxy" -%}
|
||||
|
||||
{% if grains.cloud is defined -%}
|
||||
{% if grains.cloud not in ['vagrant', 'vsphere', 'photon-controller'] -%}
|
||||
{% if grains.cloud not in ['vagrant', 'vsphere', 'photon-controller', 'azure-legacy'] -%}
|
||||
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
|
||||
{% endif -%}
|
||||
|
||||
|
@ -36,7 +36,7 @@
|
||||
{% set srv_kube_path = "/srv/kubernetes" -%}
|
||||
|
||||
{% if grains.cloud is defined -%}
|
||||
{% if grains.cloud not in ['vagrant', 'vsphere', 'photon-controller'] -%}
|
||||
{% if grains.cloud not in ['vagrant', 'vsphere', 'photon-controller', 'azure-legacy'] -%}
|
||||
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
|
||||
{% endif -%}
|
||||
{% set service_account_key = "--service-account-private-key-file=/srv/kubernetes/server.key" -%}
|
||||
@ -54,7 +54,7 @@
|
||||
|
||||
{% set root_ca_file = "" -%}
|
||||
|
||||
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack'] %}
|
||||
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack', 'azure-legacy'] %}
|
||||
{% set root_ca_file = "--root-ca-file=/srv/kubernetes/ca.crt" -%}
|
||||
{% endif -%}
|
||||
|
||||
|
@ -24,7 +24,7 @@ function load-docker-images() {
|
||||
|
||||
if which docker 1>/dev/null 2>&1; then
|
||||
|
||||
timeout 30 docker load -i /srv/salt/kube-bins/kube-apiserver.tar 1>/dev/null 2>&1
|
||||
timeout 120 docker load -i /srv/salt/kube-bins/kube-apiserver.tar 1>/dev/null 2>&1
|
||||
rc=$?
|
||||
if [[ $rc == 0 ]]; then
|
||||
let loadedImageFlags="$loadedImageFlags|1"
|
||||
@ -32,7 +32,7 @@ function load-docker-images() {
|
||||
restart_docker=true
|
||||
fi
|
||||
|
||||
timeout 30 docker load -i /srv/salt/kube-bins/kube-scheduler.tar 1>/dev/null 2>&1
|
||||
timeout 120 docker load -i /srv/salt/kube-bins/kube-scheduler.tar 1>/dev/null 2>&1
|
||||
rc=$?
|
||||
if [[ $rc == 0 ]]; then
|
||||
let loadedImageFlags="$loadedImageFlags|2"
|
||||
@ -40,7 +40,7 @@ function load-docker-images() {
|
||||
restart_docker=true
|
||||
fi
|
||||
|
||||
timeout 30 docker load -i /srv/salt/kube-bins/kube-controller-manager.tar 1>/dev/null 2>&1
|
||||
timeout 120 docker load -i /srv/salt/kube-bins/kube-controller-manager.tar 1>/dev/null 2>&1
|
||||
rc=$?
|
||||
if [[ $rc == 0 ]]; then
|
||||
let loadedImageFlags="$loadedImageFlags|4"
|
||||
|
@ -22,7 +22,7 @@ while true; do
|
||||
|
||||
if which docker 1>/dev/null 2>&1; then
|
||||
|
||||
timeout 30 docker load -i /srv/salt/kube-bins/kube-proxy.tar 1>/dev/null 2>&1
|
||||
timeout 120 docker load -i /srv/salt/kube-bins/kube-proxy.tar 1>/dev/null 2>&1
|
||||
rc=$?
|
||||
if [[ "${rc}" == 0 ]]; then
|
||||
let loadedImageFlags="${loadedImageFlags}|1"
|
||||
|
@ -1,35 +0,0 @@
|
||||
{% set daemon_args = "$DAEMON_ARGS" -%}
|
||||
{% if grains['os_family'] == 'RedHat' -%}
|
||||
{% set daemon_args = "" -%}
|
||||
{% endif -%}
|
||||
{# TODO(azure-maintainer): add support for distributing kubeconfig with token to kube-proxy #}
|
||||
{# so it can use https #}
|
||||
{% if grains['cloud'] is defined and grains['cloud'] == 'azure' -%}
|
||||
{% set api_servers = "--master=http://" + ips[0][0] -%}
|
||||
{% set api_servers_with_port = api_servers + ":7080" -%}
|
||||
{% set kubeconfig = "" -%}
|
||||
{% else -%}
|
||||
{% set kubeconfig = "--kubeconfig=/var/lib/kube-proxy/kubeconfig" -%}
|
||||
{% if grains.api_servers is defined -%}
|
||||
{% set api_servers = "--master=https://" + grains.api_servers -%}
|
||||
{% else -%}
|
||||
{% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%}
|
||||
{% set api_servers = "--master=https://" + ips[0][0] -%}
|
||||
{% endif -%}
|
||||
|
||||
# TODO: remove nginx for other cloud providers.
|
||||
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant' ] %}
|
||||
{% set api_servers_with_port = api_servers -%}
|
||||
{% else -%}
|
||||
{% set api_servers_with_port = api_servers + ":6443" -%}
|
||||
{% endif -%}
|
||||
|
||||
{% endif -%}
|
||||
|
||||
{% set test_args = "" -%}
|
||||
{% if pillar['kubeproxy_test_args'] is defined -%}
|
||||
{% set test_args=pillar['kubeproxy_test_args'] %}
|
||||
{% endif -%}
|
||||
|
||||
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
||||
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{kubeconfig}} {{pillar['log_level']}} {{test_args}}"
|
@ -5,7 +5,7 @@
|
||||
{% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%}
|
||||
{% set api_servers = "--master=https://" + ips[0][0] -%}
|
||||
{% endif -%}
|
||||
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack' ] %}
|
||||
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack', 'azure-legacy' ] %}
|
||||
{% set api_servers_with_port = api_servers -%}
|
||||
{% else -%}
|
||||
{% set api_servers_with_port = api_servers + ":6443" -%}
|
||||
|
@ -16,7 +16,7 @@
|
||||
{% endif -%}
|
||||
|
||||
# TODO: remove nginx for other cloud providers.
|
||||
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack'] %}
|
||||
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack', 'azure-legacy'] %}
|
||||
{% set api_servers_with_port = api_servers -%}
|
||||
{% else -%}
|
||||
{% set api_servers_with_port = api_servers + ":6443" -%}
|
||||
@ -28,7 +28,7 @@
|
||||
|
||||
{% set reconcile_cidr_args = "" -%}
|
||||
{% if grains['roles'][0] == 'kubernetes-master' -%}
|
||||
{% if grains.cloud in ['aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack'] -%}
|
||||
{% if grains.cloud in ['aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack', 'azure-legacy'] -%}
|
||||
|
||||
# Unless given a specific directive, disable registration for the kubelet
|
||||
# running on the master.
|
||||
@ -48,7 +48,7 @@
|
||||
{% endif -%}
|
||||
|
||||
{% set cloud_provider = "" -%}
|
||||
{% if grains.cloud is defined and grains.cloud not in ['vagrant', 'vsphere', 'photon-controller'] -%}
|
||||
{% if grains.cloud is defined and grains.cloud not in ['vagrant', 'vsphere', 'photon-controller', 'azure-legacy'] -%}
|
||||
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
|
||||
{% endif -%}
|
||||
|
||||
|
64
cluster/saltbase/salt/nginx/init.sls
Normal file
64
cluster/saltbase/salt/nginx/init.sls
Normal file
@ -0,0 +1,64 @@
|
||||
nginx:
|
||||
pkg:
|
||||
- installed
|
||||
|
||||
/etc/nginx/nginx.conf:
|
||||
file:
|
||||
- managed
|
||||
- source: salt://nginx/nginx.conf
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
/etc/nginx/sites-enabled/default:
|
||||
file:
|
||||
- managed
|
||||
- makedirs: true
|
||||
- source: salt://nginx/kubernetes-site
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
/usr/share/nginx/htpasswd:
|
||||
file:
|
||||
- managed
|
||||
- source: salt://nginx/htpasswd
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
{% if grains.cloud is defined and grains.cloud in ['gce'] %}
|
||||
/etc/kubernetes/manifests/nginx.json:
|
||||
file:
|
||||
- managed
|
||||
- source: salt://nginx/nginx.json
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- require:
|
||||
- file: /etc/nginx/nginx.conf
|
||||
- file: /etc/nginx/sites-enabled/default
|
||||
- file: /usr/share/nginx/htpasswd
|
||||
- cmd: kubernetes-cert
|
||||
|
||||
|
||||
#stop legacy nginx_service
|
||||
stop_nginx-service:
|
||||
service.dead:
|
||||
- name: nginx
|
||||
- enable: None
|
||||
|
||||
{% else %}
|
||||
nginx-service:
|
||||
service:
|
||||
- running
|
||||
- name: nginx
|
||||
- watch:
|
||||
- pkg: nginx
|
||||
- file: /etc/nginx/nginx.conf
|
||||
- file: /etc/nginx/sites-enabled/default
|
||||
- file: /usr/share/nginx/htpasswd
|
||||
- cmd: kubernetes-cert
|
||||
{% endif %}
|
||||
|
66
cluster/saltbase/salt/nginx/kubernetes-site
Normal file
66
cluster/saltbase/salt/nginx/kubernetes-site
Normal file
@ -0,0 +1,66 @@
|
||||
#server {
|
||||
#listen 80; ## listen for ipv4; this line is default and implied
|
||||
#listen [::]:80 default_server ipv6only=on; ## listen for ipv6
|
||||
|
||||
# root /usr/share/nginx/www;
|
||||
# index index.html index.htm;
|
||||
|
||||
# Make site accessible from http://localhost/
|
||||
# server_name localhost;
|
||||
# location / {
|
||||
# auth_basic "Restricted";
|
||||
# auth_basic_user_file /usr/share/nginx/htpasswd;
|
||||
|
||||
# Proxy settings.
|
||||
# proxy_pass http://localhost:8080/;
|
||||
# proxy_connect_timeout 159s;
|
||||
# proxy_send_timeout 600s;
|
||||
# proxy_read_timeout 600s;
|
||||
# proxy_buffer_size 64k;
|
||||
# proxy_buffers 16 32k;
|
||||
# proxy_busy_buffers_size 64k;
|
||||
# proxy_temp_file_write_size 64k;
|
||||
# }
|
||||
#}
|
||||
|
||||
# HTTPS server
|
||||
#
|
||||
server {
|
||||
listen 443;
|
||||
server_name localhost;
|
||||
|
||||
root html;
|
||||
index index.html index.htm;
|
||||
|
||||
ssl on;
|
||||
ssl_certificate /srv/kubernetes/server.cert;
|
||||
ssl_certificate_key /srv/kubernetes/server.key;
|
||||
|
||||
ssl_session_timeout 5m;
|
||||
|
||||
# don't use SSLv3 because of POODLE
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
|
||||
ssl_ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS;
|
||||
ssl_prefer_server_ciphers on;
|
||||
|
||||
location / {
|
||||
auth_basic "Restricted";
|
||||
auth_basic_user_file /usr/share/nginx/htpasswd;
|
||||
|
||||
# Proxy settings
|
||||
# disable buffering so that watch works
|
||||
proxy_buffering off;
|
||||
proxy_pass http://127.0.0.1:8080/;
|
||||
proxy_connect_timeout 159s;
|
||||
proxy_send_timeout 600s;
|
||||
proxy_read_timeout 600s;
|
||||
|
||||
# Disable retry
|
||||
proxy_next_upstream off;
|
||||
|
||||
# Support web sockets
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}
|
||||
}
|
61
cluster/saltbase/salt/nginx/nginx.conf
Normal file
61
cluster/saltbase/salt/nginx/nginx.conf
Normal file
@ -0,0 +1,61 @@
|
||||
{% if grains['os_family'] == 'RedHat' %}
|
||||
user nginx;
|
||||
{% else %}
|
||||
user www-data;
|
||||
{% endif %}
|
||||
|
||||
worker_processes 4;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
events {
|
||||
worker_connections 768;
|
||||
# multi_accept on;
|
||||
}
|
||||
|
||||
http {
|
||||
|
||||
##
|
||||
# Basic Settings
|
||||
##
|
||||
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
# server_tokens off;
|
||||
|
||||
# server_names_hash_bucket_size 64;
|
||||
# server_name_in_redirect off;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
##
|
||||
# Logging Settings
|
||||
##
|
||||
|
||||
access_log /var/log/nginx/access.log;
|
||||
error_log /var/log/nginx/error.log;
|
||||
|
||||
##
|
||||
# Gzip Settings
|
||||
##
|
||||
|
||||
gzip on;
|
||||
gzip_disable "msie6";
|
||||
|
||||
# gzip_vary on;
|
||||
# gzip_proxied any;
|
||||
# gzip_comp_level 6;
|
||||
# gzip_buffers 16 8k;
|
||||
# gzip_http_version 1.1;
|
||||
# gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
|
||||
##
|
||||
# Virtual Host Configs
|
||||
##
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
include /etc/nginx/sites-enabled/*;
|
||||
}
|
60
cluster/saltbase/salt/nginx/nginx.json
Normal file
60
cluster/saltbase/salt/nginx/nginx.json
Normal file
@ -0,0 +1,60 @@
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {"name":"nginx"},
|
||||
"spec":{
|
||||
"hostNetwork": true,
|
||||
"containers":[
|
||||
{
|
||||
"name": "nginx",
|
||||
"image": "gcr.io/google-containers/nginx:v1",
|
||||
"resources": {
|
||||
"limits": {
|
||||
"cpu": "200m"
|
||||
}
|
||||
},
|
||||
"command": [
|
||||
"nginx",
|
||||
"-g",
|
||||
"daemon off;"
|
||||
],
|
||||
"ports":[
|
||||
{ "name": "https",
|
||||
"containerPort": 443,
|
||||
"hostPort": 443}
|
||||
],
|
||||
"volumeMounts": [
|
||||
{ "name": "nginx",
|
||||
"mountPath": "/etc/nginx",
|
||||
"readOnly": true},
|
||||
{ "name": "k8s",
|
||||
"mountPath": "/srv/kubernetes",
|
||||
"readOnly": true},
|
||||
{ "name": "logs",
|
||||
"mountPath": "/var/log/nginx",
|
||||
"readOnly": false},
|
||||
{ "name": "passwd",
|
||||
"mountPath": "/usr/share/nginx",
|
||||
"readOnly": true}
|
||||
]
|
||||
}
|
||||
],
|
||||
"volumes":[
|
||||
{ "name": "nginx",
|
||||
"hostPath": {
|
||||
"path": "/etc/nginx"}
|
||||
},
|
||||
{ "name": "k8s",
|
||||
"hostPath": {
|
||||
"path": "/srv/kubernetes"}
|
||||
},
|
||||
{ "name": "passwd",
|
||||
"hostPath": {
|
||||
"path": "/usr/share/nginx"}
|
||||
},
|
||||
{ "name": "logs",
|
||||
"hostPath": {
|
||||
"path": "/var/logs/nginx"}
|
||||
}
|
||||
]
|
||||
}}
|
@ -7,10 +7,10 @@
|
||||
- mode: 644
|
||||
- makedirs: True
|
||||
|
||||
{% for (minion, grains) in salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').items() %}
|
||||
/etc/openvpn/ccd/{{ minion }}:
|
||||
{% for minion in salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values() %}
|
||||
/etc/openvpn/ccd/{{ minion['hostnamef'] }}:
|
||||
file.managed:
|
||||
- contents: "iroute {{ grains['cbr-string'] }}\n"
|
||||
- contents: "iroute {{ minion['cbr-string'] }}\n"
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
@ -20,7 +20,7 @@ base:
|
||||
{% elif pillar.get('network_provider', '').lower() == 'cni' %}
|
||||
- cni
|
||||
{% endif %}
|
||||
{% if grains['cloud'] is defined and grains['cloud'] == 'azure' %}
|
||||
{% if grains['cloud'] is defined and grains['cloud'] == 'azure-legacy' %}
|
||||
- openvpn-client
|
||||
{% endif %}
|
||||
- helpers
|
||||
@ -81,10 +81,11 @@ base:
|
||||
- logrotate
|
||||
{% endif %}
|
||||
- kube-addons
|
||||
{% if grains['cloud'] is defined and grains['cloud'] == 'azure' %}
|
||||
{% if grains['cloud'] is defined and grains['cloud'] == 'azure-legacy' %}
|
||||
- openvpn
|
||||
- nginx
|
||||
{% endif %}
|
||||
{% if grains['cloud'] is defined and grains['cloud'] in [ 'vagrant', 'gce', 'aws', 'vsphere', 'photon-controller', 'openstack'] %}
|
||||
{% if grains['cloud'] is defined and grains['cloud'] in [ 'vagrant', 'gce', 'aws', 'vsphere', 'photon-controller', 'openstack', 'azure-legacy'] %}
|
||||
- docker
|
||||
- kubelet
|
||||
{% endif %}
|
||||
|
Loading…
Reference in New Issue
Block a user