Add config to run minions on GCE using Ubuntu.

It is for running nodes on Ubuntu image upto 14.04 LTS (Trusty).
The change for running master on Ubuntu will be added later.
The configuration consists of several upstart jobs, which is
passed to node instances through GCE metadata and parsed by cloud-init.
This commit is contained in:
Andy Zheng 2015-08-10 16:59:00 -07:00
parent ea59172874
commit fef1ede240
3 changed files with 447 additions and 2 deletions

View File

@ -0,0 +1,155 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for ubuntu os distro
# The code and configuration is for running node instances on Ubuntu images.
# The master is still on Debian. In addition, the configuration is based on
# upstart, which is in Ubuntu upto 14.04 LTS (Trusty). Ubuntu 15.04 and above
# replaced upstart with systemd as the init system. Consequently, the
# configuration cannot work on these images.
# $1: if 'true', we're building a master yaml, else a node
function build-kube-env {
local master=$1
local file=$2
rm -f ${file}
# TODO(andyzheng0831): master node is still running with Debian image. Switch it
# to Ubuntu trusty.
if [[ "${master}" == "true" ]]; then
cat >$file <<EOF
KUBERNETES_MASTER: "true"
ENV_TIMESTAMP: $(yaml-quote $(date -u +%Y-%m-%dT%T%z))
INSTANCE_PREFIX: $(yaml-quote ${INSTANCE_PREFIX})
NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX})
CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16})
SERVER_BINARY_TAR_URL: $(yaml-quote ${SERVER_BINARY_TAR_URL})
SERVER_BINARY_TAR_HASH: $(yaml-quote ${SERVER_BINARY_TAR_HASH})
SALT_TAR_URL: $(yaml-quote ${SALT_TAR_URL})
SALT_TAR_HASH: $(yaml-quote ${SALT_TAR_HASH})
SERVICE_CLUSTER_IP_RANGE: $(yaml-quote ${SERVICE_CLUSTER_IP_RANGE})
ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false})
ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none})
ENABLE_NODE_MONITORING: $(yaml-quote ${ENABLE_NODE_MONITORING:-false})
ENABLE_CLUSTER_LOGGING: $(yaml-quote ${ENABLE_CLUSTER_LOGGING:-false})
ENABLE_CLUSTER_UI: $(yaml-quote ${ENABLE_CLUSTER_UI:-false})
ENABLE_NODE_LOGGING: $(yaml-quote ${ENABLE_NODE_LOGGING:-false})
LOGGING_DESTINATION: $(yaml-quote ${LOGGING_DESTINATION:-})
ELASTICSEARCH_LOGGING_REPLICAS: $(yaml-quote ${ELASTICSEARCH_LOGGING_REPLICAS:-})
ENABLE_CLUSTER_DNS: $(yaml-quote ${ENABLE_CLUSTER_DNS:-false})
DNS_REPLICAS: $(yaml-quote ${DNS_REPLICAS:-})
DNS_SERVER_IP: $(yaml-quote ${DNS_SERVER_IP:-})
DNS_DOMAIN: $(yaml-quote ${DNS_DOMAIN:-})
KUBE_USER: $(yaml-quote ${KUBE_USER})
KUBE_PASSWORD: $(yaml-quote ${KUBE_PASSWORD})
KUBE_BEARER_TOKEN: $(yaml-quote ${KUBE_BEARER_TOKEN})
KUBELET_TOKEN: $(yaml-quote ${KUBELET_TOKEN:-})
KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-})
ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-})
MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE})
KUBERNETES_MASTER_NAME: $(yaml-quote ${MASTER_NAME})
KUBERNETES_CONTAINER_RUNTIME: $(yaml-quote ${CONTAINER_RUNTIME})
RKT_VERSION: $(yaml-quote ${RKT_VERSION})
CA_CERT: $(yaml-quote ${CA_CERT_BASE64})
MASTER_CERT: $(yaml-quote ${MASTER_CERT_BASE64:-})
MASTER_KEY: $(yaml-quote ${MASTER_KEY_BASE64:-})
KUBECFG_CERT: $(yaml-quote ${KUBECFG_CERT_BASE64:-})
KUBECFG_KEY: $(yaml-quote ${KUBECFG_KEY_BASE64:-})
EOF
else
cat >>$file <<EOF
ENV_TIMESTAMP="$(date -u +%Y-%m-%dT%T%z)"
INSTANCE_PREFIX=${INSTANCE_PREFIX}
NODE_INSTANCE_PREFIX=${NODE_INSTANCE_PREFIX}
SERVER_BINARY_TAR_URL=${SERVER_BINARY_TAR_URL}
SERVER_BINARY_TAR_HASH=${SERVER_BINARY_TAR_HASH}
SALT_TAR_URL=${SALT_TAR_URL}
SALT_TAR_HASH=${SALT_TAR_HASH}
SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE}
ENABLE_CLUSTER_MONITORING=${ENABLE_CLUSTER_MONITORING:-none}
ENABLE_NODE_MONITORING=${ENABLE_NODE_MONITORING:-false}
ENABLE_CLUSTER_LOGGING=${ENABLE_CLUSTER_LOGGING:-false}
ENABLE_NODE_LOGGING=${ENABLE_NODE_LOGGING:-false}
LOGGING_DESTINATION=${LOGGING_DESTINATION:-}
ELASTICSEARCH_LOGGING_REPLICAS=${ELASTICSEARCH_LOGGING_REPLICAS:-}
ENABLE_CLUSTER_DNS=${ENABLE_CLUSTER_DNS:-false}
DNS_REPLICAS=${DNS_REPLICAS:-}
DNS_SERVER_IP=${DNS_SERVER_IP:-}
DNS_DOMAIN=${DNS_DOMAIN:-}
KUBELET_TOKEN=${KUBELET_TOKEN:-}
KUBE_PROXY_TOKEN=${KUBE_PROXY_TOKEN:-}
ADMISSION_CONTROL=${ADMISSION_CONTROL:-}
MASTER_IP_RANGE=${MASTER_IP_RANGE}
KUBERNETES_MASTER_NAME=${MASTER_NAME}
ZONE=${ZONE}
EXTRA_DOCKER_OPTS=${EXTRA_DOCKER_OPTS:-}
PROJECT_ID=${PROJECT}
CA_CERT=${CA_CERT_BASE64}
KUBELET_CERT=${KUBELET_CERT_BASE64:-}
KUBELET_KEY=${KUBELET_KEY_BASE64:-}
EOF
fi
}
# create-master-instance creates the master instance. If called with
# an argument, the argument is used as the name to a reserved IP
# address for the master. (In the case of upgrade/repair, we re-use
# the same IP.)
#
# It requires a whole slew of assumed variables, partially due to to
# the call to write-master-env. Listing them would be rather
# futile. Instead, we list the required calls to ensure any additional
# variables are set:
# ensure-temp-dir
# detect-project
# get-bearer-token
#
# TODO(andyzheng0831): We are still running master on Debian.
# Convert master node to use Ubuntu trusty image too.
function create-master-instance {
local address_opt=""
[[ -n ${1:-} ]] && address_opt="--address ${1}"
write-master-env
gcloud compute instances create "${MASTER_NAME}" \
${address_opt} \
--project "${PROJECT}" \
--zone "${ZONE}" \
--machine-type "${MASTER_SIZE}" \
--image-project="${MASTER_IMAGE_PROJECT}" \
--image "${MASTER_IMAGE}" \
--tags "${MASTER_TAG}" \
--network "${NETWORK}" \
--scopes "storage-ro,compute-rw" \
--can-ip-forward \
--metadata-from-file \
"startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh,kube-env=${KUBE_TEMP}/master-kube-env.yaml" \
--disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no"
}
# TODO(andyzheng0831): Make $1 required.
# TODO(andyzheng0831): Document required vars (for this and call chain).
# $1 version
function create-node-instance-template {
local suffix=""
if [[ -n ${1:-} ]]; then
suffix="-${1}"
fi
create-node-template "${NODE_INSTANCE_PREFIX}-template${suffix}" "${scope_flags[*]}" \
"kube-env=${KUBE_TEMP}/node-kube-env.yaml" \
"user-data=${KUBE_ROOT}/cluster/gce/trusty/node.yaml"
}

View File

@ -0,0 +1,286 @@
From nobody Tue Aug 11 10:13:54 2015
Content-Type: multipart/mixed; boundary="===============6024533374511606659=="
MIME-Version: 1.0
--===============6024533374511606659==
MIME-Version: 1.0
Content-Type: text/upstart-job; charset="us-ascii"
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment; filename="kube-env.conf"
#upstart-job
description "Prepare kube node environment"
start on cloud-config
script
# Set the hostname to the short version.
short_hostname=$(hostname -s)
hostname $short_hostname
# We have seen that GCE image may have strict host firewall rules which drop most inbound/forwarded packets. In such a case, add rules to accept all TCP/UDP packets.
if iptables -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then
echo "Add rules to accpet all inbound TCP/UDP packets"
iptables -A INPUT -w -p TCP -j ACCEPT
iptables -A INPUT -w -p UDP -j ACCEPT
fi
if iptables -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then
echo "Add rules to accpet all forwarded TCP/UDP packets"
iptables -A FORWARD -w -p TCP -j ACCEPT
iptables -A FORWARD -w -p UDP -j ACCEPT
fi
# Create required directories.
mkdir -p /var/lib/kubelet
mkdir -p /var/lib/kube-proxy
mkdir -p /etc/kubernetes/manifests
# Fetch kube-env from GCE metadata server.
curl --fail --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o /etc/kube-env \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env
# Create the kubelet kubeconfig file.
. /etc/kube-env
cat > /var/lib/kubelet/kubeconfig << EOF
apiVersion: v1
kind: Config
users:
- name: kubelet
user:
token: ${KUBELET_TOKEN}
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context
EOF
# Create the kube-proxy config file.
cat > /var/lib/kube-proxy/kubeconfig << EOF
apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context
EOF
end script
--===============6024533374511606659==
MIME-Version: 1.0
Content-Type: text/upstart-job; charset="us-ascii"
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment; filename="kube-install-packages.conf"
#upstart-job
description "Install packages needed to run kubernetes"
start on cloud-config
script
apt-get update
# Install docker, brctl, and socat if they are not in the image.
if ! which docker > /dev/null; then
echo "Do not find docker. Install it."
# We should install the docker that passes qualification. At present, it is version 1.7.1.
curl -sSL https://get.docker.com/ubuntu/ | DOCKER_VERSION=1.7.1 sh
fi
if ! which brctl > /dev/null; then
echo "Do not find brctl. Install it."
apt-get install --yes bridge-utils
fi
if ! which socat > /dev/null; then
echo "Do not find socat. Install it."
apt-get install --yes socat
fi
end script
--===============6024533374511606659==
MIME-Version: 1.0
Content-Type: text/upstart-job; charset="us-ascii"
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment; filename="kube-install-minion.conf"
#upstart-job
description "Download and install k8s binaries and configurations"
start on stopped kube-env
script
. /etc/kube-env
# If kubelet or kube-proxy is not stalled in the image, pull release binaries and put them in /usr/bin.
if ! which kubelet > /dev/null || ! which kube-proxy > /dev/null; then
cd /tmp
k8s_sha1="${SERVER_BINARY_TAR_URL##*/}.sha1"
echo "Downloading k8s tar sha1 file ${k8s_sha1}"
curl -Lo "${k8s_sha1}" --connect-timeout 20 --retry 6 --retry-delay 2 "${SERVER_BINARY_TAR_URL}.sha1"
k8s_tar="${SERVER_BINARY_TAR_URL##*/}"
echo "Downloading k8s tar file ${k8s_tar}"
curl -Lo "${k8s_tar}" --connect-timeout 20 --retry 6 --retry-delay 2 "${SERVER_BINARY_TAR_URL}"
# Validate hash.
actual=$(sha1sum ${k8s_tar} | awk '{ print $1 }') || true
if [ "${actual}" != "${SERVER_BINARY_TAR_HASH}" ]; then
echo "== ${k8s_tar} corrupted, sha1 ${actual} doesn't match expected ${SERVER_BINARY_TAR_HASH} =="
else
echo "Validated ${SERVER_BINARY_TAR_URL} SHA1 = ${SERVER_BINARY_TAR_HASH}"
fi
tar xzf "/tmp/${k8s_tar}" -C /tmp/ --overwrite
cp /tmp/kubernetes/server/bin/kubelet /usr/bin/
cp /tmp/kubernetes/server/bin/kube-proxy /usr/bin/
rm -rf "/tmp/kubernetes"
rm "/tmp/${k8s_tar}"
rm "/tmp/${k8s_sha1}"
fi
# Put saltbase configuration files in /etc/saltbase. We will use the add-on yaml files.
mkdir -p /etc/saltbase
cd /etc/saltbase
salt_sha1="${SALT_TAR_URL##*/}.sha1"
echo "Downloading Salt tar sha1 file ${salt_sha1}"
curl -Lo "${salt_sha1}" --connect-timeout 20 --retry 6 --retry-delay 2 "${SALT_TAR_URL}.sha1"
salt_tar="${SALT_TAR_URL##*/}"
echo "Downloading Salt tar file ${salt_tar}"
curl -Lo "${salt_tar}" --connect-timeout 20 --retry 6 --retry-delay 2 "${SALT_TAR_URL}"
# Validate hash.
actual=$(sha1sum ${salt_tar} | awk '{ print $1 }') || true
if [ "${actual}" != "${SALT_TAR_HASH}" ]; then
echo "== ${salt_tar} corrupted, sha1 ${actual} doesn't match expected ${SALT_TAR_HASH} =="
else
echo "Validated ${SALT_TAR_URL} SHA1 = ${SALT_TAR_HASH}"
fi
tar xzf "/etc/saltbase/${salt_tar}" -C /etc/saltbase/ --overwrite
rm "/etc/saltbase/${salt_sha1}"
rm "/etc/saltbase/${salt_tar}"
end script
--===============6024533374511606659==
MIME-Version: 1.0
Content-Type: text/upstart-job; charset="us-ascii"
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment; filename="kubelet.conf"
#upstart-job
description "Run kubelet service"
start on stopped kube-install-minion and stopped kube-install-packages
respawn
script
# TODO(andyzheng0831): Add health check functionality.
. /etc/kube-env
/usr/bin/kubelet \
--api_servers=https://${KUBERNETES_MASTER_NAME} \
--enable-debugging-handlers=true \
--cloud_provider=gce \
--config=/etc/kubernetes/manifests \
--allow_privileged=false \
--v=2 \
--cluster_dns=10.0.0.10 \
--cluster_domain=cluster.local \
--configure-cbr0=true \
--cgroup_root=/ \
--system-container=/system
end script
# Wait for 10s to start kubelet again.
post-stop exec sleep 10
--===============6024533374511606659==
MIME-Version: 1.0
Content-Type: text/upstart-job; charset="us-ascii"
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment; filename="kube-proxy.conf"
#upstart-job
description "Start kube-proxy service"
start on stopped kube-install-minion and stopped kube-install-packages
respawn
script
. /etc/kube-env
/usr/bin/kube-proxy \
--master=https://${KUBERNETES_MASTER_NAME} \
--kubeconfig=/var/lib/kube-proxy/kubeconfig \
--v=2
end script
# Wait for 10s to start kube-proxy again.
post-stop exec sleep 10
--===============6024533374511606659==
MIME-Version: 1.0
Content-Type: text/upstart-job; charset="us-ascii"
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment; filename="kube-docker.conf"
#upstart-job
description "Restart docker daemon"
start on started kubelet
script
. /etc/kube-env
# Assemble docker deamon options
echo "DOCKER_OPTS=\"-p /var/run/docker.pid ${EXTRA_DOCKER_OPTS} --log-level=\"debug\" --bridge cbr0 --iptables=false --ip-masq=false\"" > /etc/default/docker
# Make sure the network interface cbr0 is created before restarting docker daemon
while ! [ -L /sys/class/net/cbr0 ]; do
echo "Sleep 1 second to wait for cbr0"
sleep 1
done
initctl restart docker
# Remove docker0
ifconfig docker0 down
brctl delbr docker0
end script
--===============6024533374511606659==
MIME-Version: 1.0
Content-Type: text/upstart-job; charset="us-ascii"
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment; filename="kube-addons.conf"
#upstart-job
description "Install kubelet add-on manifest files"
start on stopped kube-docker
script
# Configuration files are located at /etc/saltbase.
. /etc/kube-env
if [ "${ENABLE_NODE_LOGGING}" = "true" ]; then
if [ "${LOGGING_DESTINATION}" = "gcp" ]; then
cp /etc/saltbase/kubernetes/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml /etc/kubernetes/manifests/
elif [ "${LOGGING_DESTINATION}" = "elasticsearch" ]; then
cp /etc/saltbase/kubernetes/saltbase/salt/fluentd-es/fluentd-es.yaml /etc/kubernetes/manifests/
fi
fi
end script
--===============6024533374511606659==--

View File

@ -22,7 +22,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/gce/${KUBE_CONFIG_FILE-"config-default.sh"}"
source "${KUBE_ROOT}/cluster/common.sh"
if [[ "${OS_DISTRIBUTION}" == "debian" || "${OS_DISTRIBUTION}" == "coreos" ]]; then
if [[ "${OS_DISTRIBUTION}" == "debian" || "${OS_DISTRIBUTION}" == "coreos" || "${OS_DISTRIBUTION}" == "trusty" ]]; then
source "${KUBE_ROOT}/cluster/gce/${OS_DISTRIBUTION}/helper.sh"
else
echo "Cannot operate on cluster using os distro: ${OS_DISTRIBUTION}" >&2
@ -1196,7 +1196,11 @@ function ssh-to-node {
# Restart the kube-proxy on a node ($1)
function restart-kube-proxy {
ssh-to-node "$1" "sudo /etc/init.d/kube-proxy restart"
if [[ "${OS_DISTRIBUTION}" == "trusty" ]]; then
ssh-to-node "$1" "sudo initctl restart kube-proxy"
else
ssh-to-node "$1" "sudo /etc/init.d/kube-proxy restart"
fi
}
# Restart the kube-apiserver on a node ($1)