Merge pull request #33965 from euank/coreos-kube-up-now-with-less-cloud-init

Automatic merge from submit-queue

Coreos kube-up now with less cloud init

This update includes significant refactoring. It moves almost all of the
logic into bash scripts, modeled after the `gci` cluster scripts.

The reason to do this is:
1. Avoid duplicating the saltbase manifests by reusing gci's parsing logic (easier maintenance)
2. Take an incremental step towards sharing more code between gci/trusty/coreos, again for better maintenance
3. Pave the way for making future changes (e.g. improved rkt support, kubelet support) easier to share

The primary differences from the gci scripts are the following:
1. Use of the `/opt/kubernetes` directory over `/home/kubernetes`
2. Support for rkt as a runtime
3. No use of logrotate
4. No use of `/etc/default/`
5. No logic related to noexec mounts or gci-specific firewall-stuff

It will make sense to move 2 over to gci, as well as perhaps a few other small improvements. That will be a separate PR for ease of review.

Ref #29720, this is a part of that because it removes a copy of them.

Fixes #24165

cc @yifan-gu 

Since this logic largely duplicates logic from the gci folder, it would be nice if someone closely familiar with that gave an OK or made sure I didn't fall into any gotchas related to that, so cc @andyzheng0831
This commit is contained in:
Kubernetes Submit Queue
2016-12-20 01:13:45 -08:00
committed by GitHub
55 changed files with 1729 additions and 2647 deletions

View File

@@ -331,14 +331,12 @@ function kube::release::package_kube_manifests_tarball() {
cp "${KUBE_ROOT}/cluster/gce/gci/configure-helper.sh" "${dst_dir}/gci-configure-helper.sh"
cp "${KUBE_ROOT}/cluster/gce/gci/mounter/mounter" "${dst_dir}/gci-mounter"
cp "${KUBE_ROOT}/cluster/gce/gci/health-monitor.sh" "${dst_dir}/health-monitor.sh"
cp "${KUBE_ROOT}/cluster/gce/coreos/configure-helper.sh" "${dst_dir}/coreos-configure-helper.sh"
cp -r "${salt_dir}/kube-admission-controls/limit-range" "${dst_dir}"
local objects
objects=$(cd "${KUBE_ROOT}/cluster/addons" && find . \( -name \*.yaml -or -name \*.yaml.in -or -name \*.json \) | grep -v demo)
tar c -C "${KUBE_ROOT}/cluster/addons" ${objects} | tar x -C "${dst_dir}"
# This is for coreos only. ContainerVM, GCI, or Trusty does not use it.
cp -r "${KUBE_ROOT}/cluster/gce/coreos/kube-manifests"/* "${release_stage}/"
kube::release::clean_cruft
local package_name="${RELEASE_DIR}/kubernetes-manifests.tar.gz"

View File

@@ -1,4 +1,3 @@
# This file should be kept in sync with cluster/gce/coreos/kube-manifests/addons/dashboard/dashboard-controller.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:

View File

@@ -1,4 +1,3 @@
# This file should be kept in sync with cluster/gce/coreos/kube-manifests/addons/dashboard/dashboard-service.yaml
apiVersion: v1
kind: Service
metadata:

View File

@@ -0,0 +1,8 @@
# CoreOS image
The [CoreOS operating system](https://coreos.com/why/) is a Linux distribution optimized for running containers securely at scale.
CoreOS provides [an image](https://coreos.com/os/docs/latest/booting-on-google-compute-engine.html) for Google Cloud Platform (GCP).
This folder contains configuration and tooling to allow kube-up to create a Kubernetes cluster on Google Cloud Platform running on the official CoreOS image.
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/gce/coreos/README.md?pixel)]()

File diff suppressed because it is too large Load Diff

View File

@@ -1,352 +0,0 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
readonly KNOWN_TOKENS_FILE="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
readonly BASIC_AUTH_FILE="/srv/salt-overlay/salt/kube-apiserver/basic_auth.csv"
# evaluate-manifest evalutes the source manifest with the environment variables.
function evaluate-manifest() {
local src=$1
local dst=$2
cp ${src} ${dst}
sed -i 's/\"/\\\"/g' ${dst} # eval will remove the double quotes if they are not escaped
eval "echo \"$(< ${dst})\"" > ${dst}
}
# evaluate-manifests-dir evalutes the source manifests within $1 and put the result
# in $2.
function evaluate-manifests-dir() {
local src=$1
local dst=$2
mkdir -p ${dst}
for f in ${src}/*
do
evaluate-manifest $f ${dst}/${f##*/}
done
}
function configure-kube-proxy() {
echo "Configuring kube-proxy"
mkdir -p /var/lib/kube-proxy
evaluate-manifest ${MANIFESTS_DIR}/kubeproxy-config.yaml /var/lib/kube-proxy/kubeconfig
}
function configure-logging() {
if [[ "${LOGGING_DESTINATION}" == "gcp" ]];then
echo "Configuring fluentd-gcp"
# fluentd-gcp
evaluate-manifest ${MANIFESTS_DIR}/fluentd-gcp.yaml /etc/kubernetes/manifests/fluentd-gcp.yaml
elif [[ "${LOGGING_DESTINATION}" == "elasticsearch" ]];then
echo "Configuring fluentd-es"
# fluentd-es
evaluate-manifest ${MANIFESTS_DIR}/fluentd-es.yaml /etc/kubernetes/manifests/fluentd-es.yaml
fi
}
function configure-admission-controls() {
echo "Configuring admission controls"
mkdir -p /etc/kubernetes/admission-controls
cp -r ${SALT_DIR}/salt/kube-admission-controls/limit-range /etc/kubernetes/admission-controls/
}
function configure-etcd() {
echo "Configuring etcd"
touch /var/log/etcd.log
evaluate-manifest ${MANIFESTS_DIR}/etcd.yaml /etc/kubernetes/manifests/etcd.yaml
}
function configure-etcd-events() {
echo "Configuring etcd-events"
touch /var/log/etcd-events.log
evaluate-manifest ${MANIFESTS_DIR}/etcd-events.yaml /etc/kubernetes/manifests/etcd-events.yaml
}
function configure-addon-manager() {
echo "Configuring addon-manager"
evaluate-manifest ${MANIFESTS_DIR}/kube-addon-manager.yaml /etc/kubernetes/manifests/kube-addon-manager.yaml
}
function configure-kube-apiserver() {
echo "Configuring kube-apiserver"
# Wait for etcd to be up.
wait-url-up http://127.0.0.1:2379/version
touch /var/log/kube-apiserver.log
# Copying known_tokens and basic_auth file.
cp ${SALT_OVERLAY}/salt/kube-apiserver/*.csv /srv/kubernetes/
evaluate-manifest ${MANIFESTS_DIR}/kube-apiserver.yaml /etc/kubernetes/manifests/kube-apiserver.yaml
}
function configure-kube-scheduler() {
echo "Configuring kube-scheduler"
touch /var/log/kube-scheduler.log
evaluate-manifest ${MANIFESTS_DIR}/kube-scheduler.yaml /etc/kubernetes/manifests/kube-scheduler.yaml
}
function configure-kube-controller-manager() {
# Wait for api server.
wait-url-up http://127.0.0.1:8080/version
echo "Configuring kube-controller-manager"
touch /var/log/kube-controller-manager.log
evaluate-manifest ${MANIFESTS_DIR}/kube-controller-manager.yaml /etc/kubernetes/manifests/kube-controller-manager.yaml
}
# Wait until $1 become reachable.
function wait-url-up() {
until curl --silent $1
do
sleep 5
done
}
# Configure addon yamls, and run salt/kube-addons/kube-addons.sh
function configure-master-addons() {
echo "Configuring master addons"
local addon_dir=/etc/kubernetes/addons
mkdir -p ${addon_dir}
# Copy namespace.yaml
evaluate-manifest ${MANIFESTS_DIR}/addons/namespace.yaml ${addon_dir}/namespace.yaml
if [[ "${ENABLE_L7_LOADBALANCING}" == "glbc" ]]; then
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/cluster-loadbalancing/glbc ${addon_dir}/cluster-loadbalancing/glbc
fi
if [[ "${ENABLE_CLUSTER_DNS}" == "true" ]]; then
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/dns ${addon_dir}/dns
if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER}" == "true" ]]; then
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/dns-horizontal-autoscaler ${addon_dir}/dns-horizontal-autoscaler
fi
fi
if [[ "${ENABLE_CLUSTER_UI}" == "true" ]]; then
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/dashboard ${addon_dir}/dashboard
fi
if [[ "${ENABLE_CLUSTER_LOGGING}" == "true" ]]; then
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/fluentd-elasticsearch ${addon_dir}/fluentd-elasticsearch
fi
if [[ "${ENABLE_CLUSTER_MONITORING}" == "influxdb" ]]; then
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/cluster-monitoring/influxdb ${addon_dir}/cluster-monitoring/influxdb
elif [[ "${ENABLE_CLUSTER_MONITORING}" == "google" ]]; then
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/cluster-monitoring/google ${addon_dir}/cluster-monitoring/google
elif [[ "${ENABLE_CLUSTER_MONITORING}" == "standalone" ]]; then
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/cluster-monitoring/standalone ${addon_dir}/cluster-monitoring/standalone
elif [[ "${ENABLE_CLUSTER_MONITORING}" == "googleinfluxdb" ]]; then
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/cluster-monitoring/googleinfluxdb ${addon_dir}/cluster-monitoring/googleinfluxdb
fi
# Note that, KUBE_ENABLE_INSECURE_REGISTRY is not supported yet.
if [[ "${ENABLE_CLUSTER_REGISTRY}" == "true" ]]; then
CLUSTER_REGISTRY_DISK_SIZE=$(convert-bytes-gce-kube "${CLUSTER_REGISTRY_DISK_SIZE}")
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/registry ${addon_dir}/registry
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR}" == "true" ]]; then
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/node-problem-detector ${addon_dir}/node-problem-detector
fi
if [[ "${ENABLE_DEFAULT_STORAGE_CLASS:-}" == "true" ]]; then
setup-addon-manifests "addons" "storage-class/gce"
fi
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/storage-class/gce ${addon_dir}/storage-class
}
function configure-master-components() {
configure-admission-controls
configure-etcd
configure-etcd-events
configure-kube-apiserver
configure-kube-scheduler
configure-kube-controller-manager
configure-master-addons
configure-addon-manager
}
# TODO(yifan): Merge this with mount-master-pd() in configure-vm.sh
# Pass ${save_format_and_mount} as an argument.
function mount-master-pd() {
if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then
return
fi
device_info=$(ls -l /dev/disk/by-id/google-master-pd)
relative_path=${device_info##* }
device_path="/dev/disk/by-id/${relative_path}"
# Format and mount the disk, create directories on it for all of the master's
# persistent data, and link them to where they're used.
echo "Mounting master-pd"
mkdir -p /mnt/master-pd
safe_format_and_mount=${SALT_DIR}/salt/helpers/safe_format_and_mount
chmod +x ${safe_format_and_mount}
${safe_format_and_mount} -m "mkfs.ext4 -F" "${device_path}" /mnt/master-pd &>/var/log/master-pd-mount.log || \
{ echo "!!! master-pd mount failed, review /var/log/master-pd-mount.log !!!"; return 1; }
# Contains all the data stored in etcd
mkdir -m 700 -p /mnt/master-pd/var/etcd
# Contains the dynamically generated apiserver auth certs and keys
mkdir -p /mnt/master-pd/srv/kubernetes
# Contains the cluster's initial config parameters and auth tokens
mkdir -p /mnt/master-pd/srv/salt-overlay
# Directory for kube-apiserver to store SSH key (if necessary)
mkdir -p /mnt/master-pd/srv/sshproxy
ln -s -f /mnt/master-pd/var/etcd /var/etcd
ln -s -f /mnt/master-pd/srv/kubernetes /srv/kubernetes
ln -s -f /mnt/master-pd/srv/sshproxy /srv/sshproxy
ln -s -f /mnt/master-pd/srv/salt-overlay /srv/salt-overlay
# This is a bit of a hack to get around the fact that salt has to run after the
# PD and mounted directory are already set up. We can't give ownership of the
# directory to etcd until the etcd user and group exist, but they don't exist
# until salt runs if we don't create them here. We could alternatively make the
# permissions on the directory more permissive, but this seems less bad.
if ! id etcd &>/dev/null; then
useradd -s /sbin/nologin -d /var/etcd etcd
fi
chown -R etcd /mnt/master-pd/var/etcd
chgrp -R etcd /mnt/master-pd/var/etcd
}
# The job of this function is simple, but the basic regular expression syntax makes
# this difficult to read. What we want to do is convert from [0-9]+B, KB, KiB, MB, etc
# into [0-9]+, Ki, Mi, Gi, etc.
# This is done in two steps:
# 1. Convert from [0-9]+X?i?B into [0-9]X? (X denotes the prefix, ? means the field
# is optional.
# 2. Attach an 'i' to the end of the string if we find a letter.
# The two step process is needed to handle the edge case in which we want to convert
# a raw byte count, as the result should be a simple number (e.g. 5B -> 5).
#
# TODO(yifan): Reuse the one defined in configure-vm.sh to remove duplication.
function convert-bytes-gce-kube() {
local -r storage_space=$1
echo "${storage_space}" | sed -e 's/^\([0-9]\+\)\([A-Z]\)\?i\?B$/\1\2/g' -e 's/\([A-Z]\)$/\1i/'
}
# TODO(yifan): Use create-salt-master-auth() in configure-vm.sh
function create-salt-master-auth() {
if [[ ! -e /srv/kubernetes/ca.crt ]]; then
if [[ ! -z "${CA_CERT:-}" ]] && [[ ! -z "${MASTER_CERT:-}" ]] && [[ ! -z "${MASTER_KEY:-}" ]]; then
mkdir -p /srv/kubernetes
(umask 077;
echo "${CA_CERT}" | base64 --decode > /srv/kubernetes/ca.crt;
echo "${MASTER_CERT}" | base64 --decode > /srv/kubernetes/server.cert;
echo "${MASTER_KEY}" | base64 --decode > /srv/kubernetes/server.key;
# Kubecfg cert/key are optional and included for backwards compatibility.
# TODO(roberthbailey): Remove these two lines once GKE no longer requires
# fetching clients certs from the master VM.
echo "${KUBECFG_CERT:-}" | base64 --decode > /srv/kubernetes/kubecfg.crt;
echo "${KUBECFG_KEY:-}" | base64 --decode > /srv/kubernetes/kubecfg.key)
fi
fi
if [ ! -e "${BASIC_AUTH_FILE}" ]; then
mkdir -p /srv/salt-overlay/salt/kube-apiserver
(umask 077;
echo "${KUBE_PASSWORD},${KUBE_USER},admin" > "${BASIC_AUTH_FILE}")
fi
if [ ! -e "${KNOWN_TOKENS_FILE}" ]; then
mkdir -p /srv/salt-overlay/salt/kube-apiserver
(umask 077;
echo "${KUBE_BEARER_TOKEN},admin,admin" > "${KNOWN_TOKENS_FILE}";
echo "${KUBELET_TOKEN},kubelet,kubelet" >> "${KNOWN_TOKENS_FILE}";
echo "${KUBE_PROXY_TOKEN},kube_proxy,kube_proxy" >> "${KNOWN_TOKENS_FILE}")
# Generate tokens for other "service accounts". Append to known_tokens.
#
# NB: If this list ever changes, this script actually has to
# change to detect the existence of this file, kill any deleted
# old tokens and add any new tokens (to handle the upgrade case).
local -r service_accounts=("system:scheduler" "system:controller_manager" "system:logging" "system:monitoring")
for account in "${service_accounts[@]}"; do
token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
echo "${token},${account},${account}" >> "${KNOWN_TOKENS_FILE}"
done
fi
}
# $1 is the directory containing all of the docker images
function load-docker-images() {
local success
local restart_docker
while true; do
success=true
restart_docker=false
for image in "$1/"*; do
timeout 30 docker load -i "${image}" &>/dev/null
rc=$?
if [[ "$rc" == 124 ]]; then
restart_docker=true
elif [[ "$rc" != 0 ]]; then
success=false
fi
done
if [[ "$success" == "true" ]]; then break; fi
if [[ "$restart_docker" == "true" ]]; then systemctl restart docker; fi
sleep 15
done
}
function load-master-components-images() {
echo "Loading images for master components"
export RKT_BIN=/opt/rkt/rkt
export DOCKER2ACI_BIN=/opt/docker2aci/docker2aci
${SALT_DIR}/install.sh ${KUBE_BIN_TAR}
${SALT_DIR}/salt/kube-master-addons/kube-master-addons.sh
# Get the image tags.
KUBE_APISERVER_DOCKER_TAG=$(cat ${KUBE_BIN_DIR}/kube-apiserver.docker_tag)
KUBE_CONTROLLER_MANAGER_DOCKER_TAG=$(cat ${KUBE_BIN_DIR}/kube-controller-manager.docker_tag)
KUBE_SCHEDULER_DOCKER_TAG=$(cat ${KUBE_BIN_DIR}/kube-scheduler.docker_tag)
}
##########
# main #
##########
KUBE_BIN_TAR=/opt/downloads/kubernetes-server-linux-amd64.tar.gz
KUBE_BIN_DIR=/opt/kubernetes/server/bin
SALT_DIR=/opt/kubernetes/saltbase
SALT_OVERLAY=/srv/salt-overlay
MANIFESTS_DIR=/opt/kube-manifests/kubernetes
# On CoreOS, the hosts is in /usr/share/baselayout/hosts
# So we need to manually populdate the hosts file here on gce.
echo "127.0.0.1 localhost" >> /etc/hosts
echo "::1 localhost" >> /etc/hosts
if [[ "${KUBERNETES_MASTER}" == "true" ]]; then
mount-master-pd
create-salt-master-auth
load-master-components-images
configure-master-components
else
configure-kube-proxy
fi
if [[ "${ENABLE_NODE_LOGGING}" == "true" ]];then
configure-logging
fi
echo "Finish configuration successfully!"

176
cluster/gce/coreos/configure.sh Executable file
View File

@@ -0,0 +1,176 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
function download-kube-env {
# Fetch kube-env from GCE metadata server.
local -r tmp_kube_env="/tmp/kube-env.yaml"
curl --fail --retry 5 --retry-delay 3 --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o "${tmp_kube_env}" \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env
# Convert the yaml format file into a shell-style file.
sed 's/: /=/' < "${tmp_kube_env}" > "${KUBE_HOME}/kube-env"
rm -f "${tmp_kube_env}"
}
function validate-hash {
local -r file="$1"
local -r expected="$2"
actual=$(sha1sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} =="
return 1
fi
}
# Retry a download until we get it. Takes a hash and a set of URLs.
#
# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown.
# $2+ are the URLs to download.
function download-or-bust {
local -r hash="$1"
shift 1
local -r urls=( $* )
while true; do
for url in "${urls[@]}"; do
local file="${url##*/}"
rm -f "${file}"
if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 "${url}"; then
echo "== Failed to download ${url}. Retrying. =="
elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
}
function split-commas {
echo $1 | tr "," "\n"
}
# Downloads kubernetes binaries and kube-system manifest tarball, unpacks them,
# and places them into suitable directories. Files are placed in /opt/kubernetes.
function install-kube-binary-config {
cd "${KUBE_HOME}"
local -r server_binary_tar_urls=( $(split-commas "${SERVER_BINARY_TAR_URL}") )
local -r server_binary_tar="${server_binary_tar_urls[0]##*/}"
if [[ -n "${SERVER_BINARY_TAR_HASH:-}" ]]; then
local -r server_binary_tar_hash="${SERVER_BINARY_TAR_HASH}"
else
echo "Downloading binary release sha1 (not found in env)"
download-or-bust "" "${server_binary_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
local -r server_binary_tar_hash=$(cat "${server_binary_tar}.sha1")
fi
echo "Downloading binary release tar"
download-or-bust "${server_binary_tar_hash}" "${server_binary_tar_urls[@]}"
tar xzf "${KUBE_HOME}/${server_binary_tar}" -C "${KUBE_HOME}" --overwrite
# Copy docker_tag and image files to ${KUBE_HOME}/kube-docker-files.
src_dir="${KUBE_HOME}/kubernetes/server/bin"
dst_dir="${KUBE_HOME}/kube-docker-files"
mkdir -p "${dst_dir}"
cp "${src_dir}/"*.docker_tag "${dst_dir}"
if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then
cp "${src_dir}/kube-proxy.tar" "${dst_dir}"
else
cp "${src_dir}/kube-apiserver.tar" "${dst_dir}"
cp "${src_dir}/kube-controller-manager.tar" "${dst_dir}"
cp "${src_dir}/kube-scheduler.tar" "${dst_dir}"
cp -r "${KUBE_HOME}/kubernetes/addons" "${dst_dir}"
fi
local -r kube_bin="${KUBE_HOME}/bin"
mv "${src_dir}/kubelet" "${kube_bin}"
mv "${src_dir}/kubectl" "${kube_bin}"
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]] || \
[[ "${NETWORK_PROVIDER:-}" == "cni" ]]; then
#TODO(andyzheng0831): We should make the cni version number as a k8s env variable.
local -r cni_tar="cni-07a8a28637e97b22eb8dfe710eeae1344f69d16e.tar.gz"
local -r cni_sha1="19d49f7b2b99cd2493d5ae0ace896c64e289ccbb"
download-or-bust "${cni_sha1}" "https://storage.googleapis.com/kubernetes-release/network-plugins/${cni_tar}"
local -r cni_dir="${KUBE_HOME}/cni"
mkdir -p "${cni_dir}"
tar xzf "${KUBE_HOME}/${cni_tar}" -C "${cni_dir}" --overwrite
mv "${cni_dir}/bin"/* "${kube_bin}"
rmdir "${cni_dir}/bin"
rm -f "${KUBE_HOME}/${cni_tar}"
fi
mv "${KUBE_HOME}/kubernetes/LICENSES" "${KUBE_HOME}"
mv "${KUBE_HOME}/kubernetes/kubernetes-src.tar.gz" "${KUBE_HOME}"
# Put kube-system pods manifests in ${KUBE_HOME}/kube-manifests/.
dst_dir="${KUBE_HOME}/kube-manifests"
mkdir -p "${dst_dir}"
local -r manifests_tar_urls=( $(split-commas "${KUBE_MANIFESTS_TAR_URL}") )
local -r manifests_tar="${manifests_tar_urls[0]##*/}"
if [ -n "${KUBE_MANIFESTS_TAR_HASH:-}" ]; then
local -r manifests_tar_hash="${KUBE_MANIFESTS_TAR_HASH}"
else
echo "Downloading k8s manifests sha1 (not found in env)"
download-or-bust "" "${manifests_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
local -r manifests_tar_hash=$(cat "${manifests_tar}.sha1")
fi
echo "Downloading k8s manifests tar"
download-or-bust "${manifests_tar_hash}" "${manifests_tar_urls[@]}"
tar xzf "${KUBE_HOME}/${manifests_tar}" -C "${dst_dir}" --overwrite
local -r kube_addon_registry="${KUBE_ADDON_REGISTRY:-gcr.io/google_containers}"
if [[ "${kube_addon_registry}" != "gcr.io/google_containers" ]]; then
find "${dst_dir}" -name \*.yaml -or -name \*.yaml.in | \
xargs sed -ri "s@(image:\s.*)gcr.io/google_containers@\1${kube_addon_registry}@"
find "${dst_dir}" -name \*.manifest -or -name \*.json | \
xargs sed -ri "s@(image\":\s+\")gcr.io/google_containers@\1${kube_addon_registry}@"
fi
cp "${dst_dir}/kubernetes/gci-trusty/coreos-configure-helper.sh" "${KUBE_HOME}/bin/configure-helper.sh"
chmod -R 755 "${kube_bin}"
# Clean up.
rm -rf "${KUBE_HOME}/kubernetes"
rm -f "${KUBE_HOME}/${server_binary_tar}"
rm -f "${KUBE_HOME}/${server_binary_tar}.sha1"
rm -f "${KUBE_HOME}/${manifests_tar}"
rm -f "${KUBE_HOME}/${manifests_tar}.sha1"
}
######### Main Function ##########
echo "Start to install kubernetes files"
KUBE_HOME="/opt/kubernetes"
mkdir -p "${KUBE_HOME}"
download-kube-env
source "${KUBE_HOME}/kube-env"
install-kube-binary-config
echo "Done for installing kubernetes files"
# On CoreOS, the hosts is in /usr/share/baselayout/hosts
# So we need to manually populdate the hosts file here on gce.
echo "127.0.0.1 localhost" >> /etc/hosts
echo "::1 localhost" >> /etc/hosts
echo "Configuring hostname"
hostnamectl set-hostname $(hostname | cut -f1 -d.)

View File

@@ -0,0 +1,81 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is for master and node instance health monitoring, which is
# packed in kube-manifest tarball. It is executed through a systemd service
# in cluster/gce/gci/<master/node>.yaml. The env variables come from an env
# file provided by the systemd service.
set -o nounset
set -o pipefail
# We simply kill the process when there is a failure. Another systemd service will
# automatically restart the process.
function docker_monitoring {
while [ 1 ]; do
if ! timeout 10 docker ps > /dev/null; then
echo "Docker daemon failed!"
pkill docker
# Wait for a while, as we don't want to kill it again before it is really up.
sleep 30
else
sleep "${SLEEP_SECONDS}"
fi
done
}
function kubelet_monitoring {
echo "Wait for 2 minutes for kubelet to be fuctional"
# TODO(andyzheng0831): replace it with a more reliable method if possible.
sleep 120
local -r max_seconds=10
while [ 1 ]; do
if ! curl --insecure -m "${max_seconds}" -f -s https://127.0.0.1:${KUBELET_PORT:-10250}/healthz > /dev/null; then
echo "Kubelet is unhealthy!"
curl --insecure https://127.0.0.1:${KUBELET_PORT:-10250}/healthz
pkill kubelet
# Wait for a while, as we don't want to kill it again before it is really up.
sleep 60
else
sleep "${SLEEP_SECONDS}"
fi
done
}
############## Main Function ################
if [[ "$#" -ne 1 ]]; then
echo "Usage: health-monitor.sh <docker/kubelet>"
exit 1
fi
KUBE_ENV="/home/kubernetes/kube-env"
if [[ ! -e "${KUBE_ENV}" ]]; then
echo "The ${KUBE_ENV} file does not exist!! Terminate health monitoring"
exit 1
fi
SLEEP_SECONDS=10
component=$1
echo "Start kubernetes health monitoring for ${component}"
source "${KUBE_ENV}"
if [[ "${component}" == "docker" ]]; then
docker_monitoring
elif [[ "${component}" == "kubelet" ]]; then
kubelet_monitoring
else
echo "Health monitoring for component "${component}" is not supported!"
fi

View File

@@ -1,6 +1,6 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,20 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# A library of helper functions and constants for the CoreOS distro
MANIFESTS_DIR=/opt/kube-manifests/kubernetes
echo "Configuring hostname"
hostnamectl set-hostname $(hostname | cut -f1 -d.)
echo "Configuring kubelet"
mkdir -p /var/lib/kubelet
mkdir -p /etc/kubernetes/manifests
src=${MANIFESTS_DIR}/kubelet-config.yaml
dst=/var/lib/kubelet/kubeconfig
cp ${src} ${dst}
sed -i 's/\"/\\\"/g' ${dst} # eval will remove the double quotes if they are not escaped
eval "echo \"$(< ${dst})\"" > ${dst}
# This file intentionally left blank

View File

@@ -1,21 +0,0 @@
apiVersion: v1
kind: Service
metadata:
# This must match the --default-backend-service argument of the l7 lb
# controller and is required because GCE mandates a default backend.
name: default-http-backend
namespace: kube-system
labels:
k8s-app: glbc
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "GLBCDefaultBackend"
spec:
# The default backend must be of type NodePort.
type: NodePort
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: http
selector:
k8s-app: glbc

View File

@@ -1,69 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: l7-lb-controller-v0.6.0
namespace: kube-system
labels:
k8s-app: glbc
version: v0.6.0
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "GLBC"
spec:
# There should never be more than 1 controller alive simultaneously.
replicas: 1
selector:
k8s-app: glbc
version: v0.6.0
template:
metadata:
labels:
k8s-app: glbc
version: v0.6.0
name: glbc
kubernetes.io/cluster-service: "true"
spec:
terminationGracePeriodSeconds: 600
containers:
- name: default-http-backend
# Any image is permissible as long as:
# 1. It serves a 404 page at /
# 2. It serves 200 on a /healthz endpoint
image: gcr.io/google_containers/defaultbackend:1.0
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
ports:
- containerPort: 8080
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
- image: gcr.io/google_containers/glbc:0.6.0
livenessProbe:
httpGet:
path: /healthz
port: 8081
scheme: HTTP
initialDelaySeconds: 30
# healthz reaches out to GCE
periodSeconds: 30
timeoutSeconds: 5
name: l7-lb-controller
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 50Mi
args:
- --default-backend-service=kube-system/default-http-backend
- --sync-period=60s
- --cluster-uid=${KUBE_UID}

View File

@@ -1,127 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.2.0
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
version: v1.2.0
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.2.0
template:
metadata:
labels:
k8s-app: heapster
version: v1.2.0
spec:
containers:
- image: gcr.io/google_containers/heapster:v1.2.0
name: heapster
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
command:
- /heapster
- --source=kubernetes.summary_api:''
- --sink=gcm
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs
readOnly: true
- name: usrsharecacerts
mountPath: /usr/share/ca-certificates
readOnly: true
- image: gcr.io/google_containers/heapster:v1.2.0
name: eventer
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
command:
- /eventer
- --source=kubernetes:''
- --sink=gcl
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs
readOnly: true
- name: usrsharecacerts
mountPath: /usr/share/ca-certificates
readOnly: true
- image: gcr.io/google_containers/addon-resizer:1.0
name: heapster-nanny
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /pod_nanny
- --cpu=100m
- --extra-cpu=0m
- --memory=300Mi
- --extra-memory=4Mi
- --threshold=5
- --deployment=heapster-v1.2.0
- --container=heapster
- --poll-period=300000
- image: gcr.io/google_containers/addon-resizer:1.0
name: eventer-nanny
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /pod_nanny
- --cpu=100m
- --extra-cpu=0m
- --memory=300Mi
- --extra-memory=307200Ki
- --threshold=5
- --deployment=heapster-v1.2.0
- --container=eventer
- --poll-period=300000
volumes:
- name: ssl-certs
hostPath:
path: "/etc/ssl/certs"
- name: usrsharecacerts
hostPath:
path: "/usr/share/ca-certificates"

View File

@@ -1,14 +0,0 @@
kind: Service
apiVersion: v1
metadata:
name: heapster
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Heapster"
spec:
ports:
- port: 80
targetPort: 8082
selector:
k8s-app: heapster

View File

@@ -1,128 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.1.0
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
version: v1.1.0
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.1.0
template:
metadata:
labels:
k8s-app: heapster
version: v1.1.0
spec:
containers:
- image: gcr.io/google_containers/heapster:v1.2.0
name: heapster
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
command:
- /heapster
- --source=kubernetes.summary_api:''
- --sink=influxdb:http://monitoring-influxdb:8086
- --sink=gcm:?metrics=autoscaling
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs
readOnly: true
- name: usrsharecacerts
mountPath: /usr/share/ca-certificates
readOnly: true
- image: gcr.io/google_containers/heapster:v1.2.0
name: eventer
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
command:
- /eventer
- --source=kubernetes:''
- --sink=gcl
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs
readOnly: true
- name: usrsharecacerts
mountPath: /usr/share/ca-certificates
readOnly: true
- image: gcr.io/google_containers/addon-resizer:1.0
name: heapster-nanny
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /pod_nanny
- --cpu=100m
- --extra-cpu=0m
- --memory=300Mi
- --extra-memory=4Mi
- --threshold=5
- --deployment=heapster-v1.1.0
- --container=heapster
- --poll-period=300000
- image: gcr.io/google_containers/addon-resizer:1.0
name: eventer-nanny
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /pod_nanny
- --cpu=100m
- --extra-cpu=0m
- --memory=300Mi
- --extra-memory=307200Ki
- --threshold=5
- --deployment=heapster-v1.1.0
- --container=eventer
- --poll-period=300000
volumes:
- name: ssl-certs
hostPath:
path: "/etc/ssl/certs"
- name: usrsharecacerts
hostPath:
path: "/usr/share/ca-certificates"

View File

@@ -1,18 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: monitoring-grafana
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Grafana"
spec:
# On production clusters, consider setting up auth for grafana, and
# exposing Grafana either using a LoadBalancer or a public IP.
# type: LoadBalancer
ports:
- port: 80
targetPort: 3000
selector:
k8s-app: influxGrafana

View File

@@ -1,106 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.2.0
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
version: v1.2.0
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.2.0
template:
metadata:
labels:
k8s-app: heapster
version: v1.2.0
spec:
containers:
- image: gcr.io/google_containers/heapster:v1.2.0
name: heapster
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
command:
- /heapster
- --source=kubernetes.summary_api:''
- --sink=influxdb:http://monitoring-influxdb:8086
- image: gcr.io/google_containers/heapster:v1.2.0
name: eventer
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
command:
- /eventer
- --source=kubernetes:''
- --sink=influxdb:http://monitoring-influxdb:8086
- image: gcr.io/google_containers/addon-resizer:1.0
name: heapster-nanny
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /pod_nanny
- --cpu=100m
- --extra-cpu=0m
- --memory=300Mi
- --extra-memory=4Mi
- --threshold=5
- --deployment=heapster-v1.2.0
- --container=heapster
- --poll-period=300000
- image: gcr.io/google_containers/addon-resizer:1.0
name: eventer-nanny
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /pod_nanny
- --cpu=100m
- --extra-cpu=0m
- --memory=300Mi
- --extra-memory=307200Ki
- --threshold=5
- --deployment=heapster-v1.2.0
- --container=eventer
- --poll-period=300000

View File

@@ -1,14 +0,0 @@
kind: Service
apiVersion: v1
metadata:
name: heapster
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Heapster"
spec:
ports:
- port: 80
targetPort: 8082
selector:
k8s-app: heapster

View File

@@ -1,74 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: monitoring-influxdb-grafana-v3
namespace: kube-system
labels:
k8s-app: influxGrafana
version: v3
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: influxGrafana
version: v3
template:
metadata:
labels:
k8s-app: influxGrafana
version: v3
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/heapster_influxdb:v0.7
name: influxdb
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 500Mi
requests:
cpu: 100m
memory: 500Mi
ports:
- containerPort: 8083
- containerPort: 8086
volumeMounts:
- name: influxdb-persistent-storage
mountPath: /data
- image: gcr.io/google_containers/heapster_grafana:v3.1.1
name: grafana
env:
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
# This variable is required to setup templates in Grafana.
- name: INFLUXDB_SERVICE_URL
value: http://monitoring-influxdb:8086
# The following env variables are required to make Grafana accessible via
# the kubernetes api-server proxy. On production clusters, we recommend
# removing these env variables, setup auth for grafana, and expose the grafana
# service using a LoadBalancer or a public IP.
- name: GF_AUTH_BASIC_ENABLED
value: "false"
- name: GF_AUTH_ANONYMOUS_ENABLED
value: "true"
- name: GF_AUTH_ANONYMOUS_ORG_ROLE
value: Admin
- name: GF_SERVER_ROOT_URL
value: /api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/
volumeMounts:
- name: grafana-persistent-storage
mountPath: /var
volumes:
- name: influxdb-persistent-storage
emptyDir: {}
- name: grafana-persistent-storage
emptyDir: {}

View File

@@ -1,19 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: monitoring-influxdb
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "InfluxDB"
spec:
ports:
- name: http
port: 8083
targetPort: 8083
- name: api
port: 8086
targetPort: 8086
selector:
k8s-app: influxGrafana

View File

@@ -1,63 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.2.0
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
version: v1.2.0
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.2.0
template:
metadata:
labels:
k8s-app: heapster
version: v1.2.0
spec:
containers:
- image: gcr.io/google_containers/heapster:v1.2.0
name: heapster
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
command:
- /heapster
- --source=kubernetes.summary_api:''
- image: gcr.io/google_containers/addon-resizer:1.0
name: heapster-nanny
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /pod_nanny
- --cpu=100m
- --extra-cpu=0m
- --memory=300Mi
- --extra-memory=4Mi
- --threshold=5
- --deployment=heapster-v1.2.0
- --container=heapster
- --poll-period=300000

View File

@@ -1,14 +0,0 @@
kind: Service
apiVersion: v1
metadata:
name: heapster
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Heapster"
spec:
ports:
- port: 80
targetPort: 8082
selector:
k8s-app: heapster

View File

@@ -1,40 +0,0 @@
# Keep this file in sync with addons/dashboard/dashboard-controller.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
spec:
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
containers:
- name: kubernetes-dashboard
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.5.0
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 50Mi
requests:
cpu: 100m
memory: 50Mi
ports:
- containerPort: 9090
livenessProbe:
httpGet:
path: /
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30

View File

@@ -1,14 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
spec:
selector:
k8s-app: kubernetes-dashboard
ports:
- port: 80
targetPort: 9090

View File

@@ -1,145 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
containers:
- name: kubedns
image: gcr.io/google_containers/kubedns-amd64:1.9
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
livenessProbe:
httpGet:
path: /healthz-kubedns
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 3
timeoutSeconds: 5
args:
# command = "/kube-dns"
- --domain=${DNS_DOMAIN}.
- --dns-port=10053
- --config-map=kube-dns
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
- name: dnsmasq
image: gcr.io/google_containers/kube-dnsmasq-amd64:1.4
livenessProbe:
httpGet:
path: /healthz-dnsmasq
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --cache-size=1000
- --no-resolv
- --server=127.0.0.1#10053
- --log-facility=-
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- name: dnsmasq-metrics
image: gcr.io/google_containers/dnsmasq-metrics-amd64:1.0
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 10Mi
- name: healthz
image: gcr.io/google_containers/exechealthz-amd64:1.2
resources:
limits:
memory: 50Mi
requests:
cpu: 10m
# Note that this container shouldn't really need 50Mi of memory. The
# limits are set higher than expected pending investigation on #29688.
# The extra memory was stolen from the kubedns container to keep the
# net memory requested by the pod constant.
memory: 50Mi
args:
- --cmd=nslookup kubernetes.default.svc.${DNS_DOMAIN} 127.0.0.1 >/dev/null
- --url=/healthz-dnsmasq
- --cmd=nslookup kubernetes.default.svc.${DNS_DOMAIN} 127.0.0.1:10053 >/dev/null
- --url=/healthz-kubedns
- --port=8080
- --quiet
ports:
- containerPort: 8080
protocol: TCP
dnsPolicy: Default # Don't use cluster DNS.

View File

@@ -1,20 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: ${DNS_SERVER_IP}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP

View File

@@ -1,43 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: elasticsearch-logging-v1
namespace: kube-system
labels:
k8s-app: elasticsearch-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
replicas: 2
selector:
k8s-app: elasticsearch-logging
version: v1
template:
metadata:
labels:
k8s-app: elasticsearch-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/elasticsearch:v2.4.1
name: elasticsearch-logging
resources:
# need more cpu upon initialization, therefore burstable class
limits:
cpu: 1000m
requests:
cpu: 100m
ports:
- containerPort: 9200
name: db
protocol: TCP
- containerPort: 9300
name: transport
protocol: TCP
volumeMounts:
- name: es-persistent-storage
mountPath: /data
volumes:
- name: es-persistent-storage
emptyDir: {}

View File

@@ -1,16 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: elasticsearch-logging
namespace: kube-system
labels:
k8s-app: elasticsearch-logging
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Elasticsearch"
spec:
ports:
- port: 9200
protocol: TCP
targetPort: db
selector:
k8s-app: elasticsearch-logging

View File

@@ -1,37 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kibana-logging-v1
namespace: kube-system
labels:
k8s-app: kibana-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kibana-logging
version: v1
template:
metadata:
labels:
k8s-app: kibana-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: kibana-logging
image: gcr.io/google_containers/kibana:v4.6.1
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
requests:
cpu: 100m
env:
- name: "ELASTICSEARCH_URL"
value: "http://elasticsearch-logging:9200"
ports:
- containerPort: 5601
name: ui
protocol: TCP

View File

@@ -1,16 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: kibana-logging
namespace: kube-system
labels:
k8s-app: kibana-logging
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Kibana"
spec:
ports:
- port: 5601
protocol: TCP
targetPort: ui
selector:
k8s-app: kibana-logging

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: kube-system

View File

@@ -1,44 +0,0 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: node-problem-detector-v0.1
namespace: kube-system
labels:
k8s-app: node-problem-detector
version: v0.1
kubernetes.io/cluster-service: "true"
spec:
template:
metadata:
labels:
k8s-app: node-problem-detector
version: v0.1
kubernetes.io/cluster-service: "true"
spec:
hostNetwork: true
containers:
- name: node-problem-detector
image: gcr.io/google_containers/node-problem-detector:v0.1
env:
# Config the host ip and port of apiserver.
- name: "KUBERNETES_SERVICE_HOST"
value: ${INSTANCE_PREFIX}-master
- name: "KUBERNETES_SERVICE_PORT"
value: "443"
securityContext:
privileged: true
resources:
limits:
cpu: "200m"
memory: "100Mi"
requests:
cpu: "20m"
memory: "20Mi"
volumeMounts:
- name: log
mountPath: /log
readOnly: true
volumes:
- name: log
hostPath:
path: /var/log/

View File

@@ -1,14 +0,0 @@
kind: PersistentVolume
apiVersion: v1
metadata:
name: kube-system-kube-registry-pv
labels:
kubernetes.io/cluster-service: "true"
spec:
capacity:
storage: ${CLUSTER_REGISTRY_DISK_SIZE}
accessModes:
- ReadWriteOnce
gcePersistentDisk:
pdName: ${CLUSTER_REGISTRY_DISK}
fsType: "ext4"

View File

@@ -1,13 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: kube-registry-pvc
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: ${CLUSTER_REGISTRY_DISK_SIZE}

View File

@@ -1,44 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
limits:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
ports:
- containerPort: 5000
name: registry
protocol: TCP
volumes:
- name: image-store
persistentVolumeClaim:
claimName: kube-registry-pvc

View File

@@ -1,16 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: kube-registry
namespace: kube-system
labels:
k8s-app: kube-registry
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeRegistry"
spec:
selector:
k8s-app: kube-registry
ports:
- name: registry
port: 5000
protocol: TCP

View File

@@ -1,58 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: etcd-server-events
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- /usr/local/bin/etcd
--listen-peer-urls=http://127.0.0.1:2381
--advertise-client-urls=http://127.0.0.1:4002
--listen-client-urls=http://127.0.0.1:4002
--data-dir=/var/etcd/data-events
1>>/var/log/etcd-events.log 2>&1
image: gcr.io/google_containers/etcd:2.2.1
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
host: 127.0.0.1
path: /health
port: 4002
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
name: etcd-container
ports:
- containerPort: 2381
hostPort: 2381
name: serverport
protocol: TCP
- containerPort: 4002
hostPort: 4002
name: clientport
protocol: TCP
resources:
limits:
cpu: 100m
requests:
cpu: 100m
volumeMounts:
- mountPath: /var/etcd
name: varetcd
- mountPath: /var/log/etcd-events.log
name: varlogetcd
dnsPolicy: ClusterFirst
hostNetwork: true
nodeName: ${INSTANCE_PREFIX}-master
restartPolicy: Always
terminationGracePeriodSeconds: 30
volumes:
- hostPath:
path: /mnt/master-pd/var/etcd
name: varetcd
- hostPath:
path: /var/log/etcd-events.log
name: varlogetcd

View File

@@ -1,55 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: etcd-server
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- /usr/local/bin/etcd
--listen-peer-urls=http://127.0.0.1:2380
--advertise-client-urls=http://127.0.0.1:2379
--listen-client-urls=http://127.0.0.1:2379
--data-dir=/var/etcd/data
1>>/var/log/etcd.log 2>&1
image: gcr.io/google_containers/etcd:2.2.1
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
host: 127.0.0.1
path: /health
port: 2379
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
name: etcd-container
ports:
- containerPort: 2380
hostPort: 2380
name: serverport
protocol: TCP
- containerPort: 2379
hostPort: 2379
name: clientport
protocol: TCP
resources:
requests:
cpu: 200m
volumeMounts:
- mountPath: /var/etcd
name: varetcd
- mountPath: /var/log/etcd.log
name: varlogetcd
dnsPolicy: ClusterFirst
hostNetwork: true
restartPolicy: Always
terminationGracePeriodSeconds: 30
volumes:
- hostPath:
path: /mnt/master-pd/var/etcd
name: varetcd
- hostPath:
path: /var/log/etcd.log
name: varlogetcd

View File

@@ -1,34 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-addon-manager
namespace: kube-system
labels:
component: kube-addon-manager
spec:
hostNetwork: true
containers:
- name: kube-addon-manager
image: gcr.io/google-containers/kube-addon-manager:v6.1
command:
- /bin/bash
- -c
- /opt/kube-addons.sh 1>>/var/log/kube-addon-manager.log 2>&1
resources:
requests:
cpu: 5m
memory: 50Mi
volumeMounts:
- mountPath: /etc/kubernetes/
name: addons
readOnly: true
- mountPath: /var/log
name: varlog
readOnly: false
volumes:
- hostPath:
path: /etc/kubernetes/
name: addons
- hostPath:
path: /var/log
name: varlog

View File

@@ -1,83 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-apiserver
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- /usr/local/bin/kube-apiserver
--address=127.0.0.1
--etcd-servers=http://127.0.0.1:2379
--etcd-servers-overrides=/events#http://127.0.0.1:4002
--cloud-provider=gce
--admission-control=${ADMISSION_CONTROL}
--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}
--client-ca-file=/srv/kubernetes/ca.crt
--basic-auth-file=/srv/kubernetes/basic_auth.csv
--tls-cert-file=/srv/kubernetes/server.cert
--tls-private-key-file=/srv/kubernetes/server.key
--secure-port=443
--token-auth-file=/srv/kubernetes/known_tokens.csv
--v=2
--allow-privileged=True
1>>/var/log/kube-apiserver.log 2>&1
image: gcr.io/google_containers/kube-apiserver:${KUBE_APISERVER_DOCKER_TAG}
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
name: kube-apiserver
ports:
- containerPort: 443
hostPort: 443
name: https
protocol: TCP
- containerPort: 8080
hostPort: 8080
name: local
protocol: TCP
resources:
requests:
cpu: 250m
volumeMounts:
- mountPath: /srv/kubernetes
name: srvkube
readOnly: true
- mountPath: /var/log/kube-apiserver.log
name: logfile
- mountPath: /etc/ssl
name: etcssl
readOnly: true
- mountPath: /usr/share/ca-certificates
name: usrsharecacerts
readOnly: true
- mountPath: /srv/sshproxy
name: srvsshproxy
dnsPolicy: ClusterFirst
hostNetwork: true
restartPolicy: Always
terminationGracePeriodSeconds: 30
volumes:
- hostPath:
path: /srv/kubernetes
name: srvkube
- hostPath:
path: /var/log/kube-apiserver.log
name: logfile
- hostPath:
path: /etc/ssl
name: etcssl
- hostPath:
path: /usr/share/ca-certificates
name: usrsharecacerts
- hostPath:
path: /srv/sshproxy
name: srvsshproxy

View File

@@ -1,66 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-controller-manager
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- /usr/local/bin/kube-controller-manager
--master=127.0.0.1:8080
--cluster-name=${INSTANCE_PREFIX}
--cluster-cidr=${CLUSTER_IP_RANGE}
--service-cluster-ip-range="${SERVICE_CLUSTER_IP_RANGE}"
--allocate-node-cidrs=true
--cloud-provider=gce
--service-account-private-key-file=/srv/kubernetes/server.key
--v=2
--root-ca-file=/srv/kubernetes/ca.crt
1>>/var/log/kube-controller-manager.log 2>&1
image: gcr.io/google_containers/kube-controller-manager:${KUBE_CONTROLLER_MANAGER_DOCKER_TAG}
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 10252
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
name: kube-controller-manager
resources:
limits:
cpu: 200m
requests:
cpu: 200m
volumeMounts:
- mountPath: /srv/kubernetes
name: srvkube
readOnly: true
- mountPath: /var/log/kube-controller-manager.log
name: logfile
- mountPath: /etc/ssl
name: etcssl
readOnly: true
- mountPath: /usr/share/ca-certificates
name: usrsharecacerts
readOnly: true
dnsPolicy: ClusterFirst
hostNetwork: true
restartPolicy: Always
terminationGracePeriodSeconds: 30
volumes:
- hostPath:
path: /srv/kubernetes
name: srvkube
- hostPath:
path: /var/log/kube-controller-manager.log
name: logfile
- hostPath:
path: /etc/ssl
name: etcssl
- hostPath:
path: /usr/share/ca-certificates
name: usrsharecacerts

View File

@@ -1,40 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-scheduler
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- /usr/local/bin/kube-scheduler
--master=127.0.0.1:8080
--v=2
1>>/var/log/kube-scheduler.log 2>&1
image: gcr.io/google_containers/kube-scheduler:${KUBE_SCHEDULER_DOCKER_TAG}
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 10251
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
name: kube-scheduler
resources:
requests:
cpu: 100m
volumeMounts:
- mountPath: /var/log/kube-scheduler.log
name: logfile
dnsPolicy: ClusterFirst
hostNetwork: true
nodeName: ${INSTANCE_PREFIX}-master
restartPolicy: Always
terminationGracePeriodSeconds: 30
volumes:
- hostPath:
path: /var/log/kube-scheduler.log
name: logfile

View File

@@ -1,7 +0,0 @@
{
"apiVersion": "v1",
"kind": "Namespace",
"metadata": {
"name": "kube-system"
}
}

View File

@@ -1,17 +0,0 @@
apiVersion: v1
kind: Config
users:
- name: kubelet
user:
client-certificate-data: ${KUBELET_CERT}
client-key-data: ${KUBELET_KEY}
clusters:
- name: local
cluster:
certificate-authority-data: ${CA_CERT}
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context

View File

@@ -1,16 +0,0 @@
apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
clusters:
- name: local
cluster:
certificate-authority-data: ${CA_CERT}
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context

View File

@@ -1,168 +0,0 @@
#cloud-config
coreos:
update:
reboot-strategy: off
units:
- name: locksmithd.service
mask: true
- name: kube-env.service
command: start
content: |
[Unit]
Description=Fetch kubernetes-node-environment
Requires=network-online.target
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/usr/bin/curl --fail --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o /etc/kube-env.yaml \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env
# Transform the yaml to env file.
ExecStartPre=/usr/bin/mv /etc/kube-env.yaml /etc/kube-env
ExecStart=/usr/bin/sed -i "s/: '/=/;s/'$//" /etc/kube-env
- name: kubernetes-install-cni.service
command: start
content: |
[Unit]
Description=Download cni
Requires=network-online.target
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/usr/bin/mkdir -p /opt/cni
ExecStartPre=/usr/bin/curl --fail --silent --show-error --location --create-dirs --output /opt/downloads/cni.tar.gz https://storage.googleapis.com/kubernetes-release/network-plugins/cni-07a8a28637e97b22eb8dfe710eeae1344f69d16e.tar.gz
ExecStart=/usr/bin/tar xf /opt/downloads/cni.tar.gz -C /opt/cni/
- name: kubernetes-download-salt.service
command: start
content: |
[Unit]
Description=Download salt
Requires=network-online.target
After=network-online.target
Requires=kube-env.service
After=kube-env.service
[Service]
Type=oneshot
RemainAfterExit=yes
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/mkdir -p /opt/downloads
ExecStartPre=/usr/bin/curl --fail --silent --show-error --location --create-dirs --output \
/opt/downloads/kubernetes-salt.tar.gz ${SALT_TAR_URL}
# TODO(yifan): Check hash.
ExecStart=/usr/bin/tar xf /opt/downloads/kubernetes-salt.tar.gz -C /opt --overwrite
- name: kubernetes-download-manifests.service
command: start
content: |
[Unit]
Description=Download manifests
Requires=network-online.target
After=network-online.target
Requires=kube-env.service
After=kube-env.service
[Service]
Type=oneshot
RemainAfterExit=yes
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/mkdir -p /opt/downloads
ExecStartPre=/usr/bin/curl --fail --silent --show-error --location --create-dirs --output \
/opt/downloads/kubernetes-manifests.tar.gz ${KUBE_MANIFESTS_TAR_URL}
# TODO(yifan): Check hash.
ExecStartPre=/usr/bin/mkdir -p /opt/kube-manifests
ExecStart=/usr/bin/tar xf /opt/downloads/kubernetes-manifests.tar.gz -C /opt/kube-manifests --overwrite
- name: kubernetes-install-node.service
command: start
content: |
[Unit]
Description=Install Kubernetes Server
Requires=network-online.target
After=network-online.target
Requires=kube-env.service
After=kube-env.service
[Service]
Type=oneshot
RemainAfterExit=yes
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/mkdir -p /opt/downloads
ExecStartPre=/usr/bin/curl --fail --silent --show-error --location --create-dirs --output \
/opt/downloads/kubernetes-server-linux-amd64.tar.gz ${SERVER_BINARY_TAR_URL}
# TODO(yifan): Check hash.
ExecStart=/usr/bin/tar xf /opt/downloads/kubernetes-server-linux-amd64.tar.gz -C /opt --overwrite
- name: kubelet.service
command: start
content: |
[Unit]
Description=Run Kubelet service
Requires=network-online.target kube-env.service kubernetes-download-manifests.service kubernetes-install-cni.service
After=network-online.target kube-env.service kubernetes-download-manifests.service kubernetes-install-cni.service
[Service]
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/curl --fail --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o /run/configure-kubelet.sh \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-kubelet
ExecStartPre=/usr/bin/chmod 0755 /run/configure-kubelet.sh
ExecStartPre=/run/configure-kubelet.sh
ExecStart=/opt/kubernetes/server/bin/kubelet \
--api-servers=https://${INSTANCE_PREFIX}-master \
--enable-debugging-handlers=false \
--cloud-provider=gce \
--config=/etc/kubernetes/manifests \
--allow-privileged=true \
--v=2 \
--cluster-dns=${DNS_SERVER_IP} \
--cluster-domain=${DNS_DOMAIN} \
--logtostderr=true \
--container-runtime=docker \
--pod-cidr=${MASTER_IP_RANGE} \
--register-schedulable=false
Restart=always
RestartSec=10
KillMode=process
- name: docker.service
drop-ins:
- name: 50-docker-opts.conf
content: |
[Service]
Environment='DOCKER_OPTS=--bridge=cbr0 --iptables=false --ip-masq=false'
MountFlags=slave
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
Restart=always
RestartSec=2s
StartLimitInterval=0
- name: kubernetes-configure-node.service
command: start
content: |
[Unit]
Description=Configure Node For Kubernetes service
Requires=kubernetes-install-node.service
After=kubernetes-install-node.service
Requires=kubernetes-download-salt.service
After=kubernetes-download-salt.service
Requires=kubernetes-download-manifests.service
After=kubernetes-download-manifests.service
# Need the kubelet/docker running because we will use docker load for docker images.
Requires=kubelet.service
After=kubelet.service
[Service]
Type=oneshot
RemainAfterExit=yes
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/curl --fail --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o /run/configure-node.sh \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-node
ExecStartPre=/usr/bin/chmod 0755 /run/configure-node.sh
ExecStart=/run/configure-node.sh

View File

@@ -1,6 +1,6 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,6 +15,7 @@
# limitations under the License.
# A library of helper functions and constant for coreos os distro
source "${KUBE_ROOT}/cluster/gce/coreos/helper.sh"
# create-master-instance creates the master instance. If called with
# an argument, the argument is used as the name to a reserved IP
@@ -24,23 +25,56 @@
# It requires a whole slew of assumed variables, partially due to to
# the call to write-master-env. Listing them would be rather
# futile. Instead, we list the required calls to ensure any additional
#
# variables are set:
# ensure-temp-dir
# detect-project
# get-bearer-token
#
function create-master-instance() {
function create-master-instance {
local address_opt=""
[[ -n ${1:-} ]] && address_opt="--address ${1}"
write-master-env
create-master-instance-internal "${MASTER_NAME}" "${address_opt}"
}
function replicate-master-instance() {
local existing_master_zone="${1}"
local existing_master_name="${2}"
local existing_master_replicas="${3}"
local kube_env="$(get-metadata "${existing_master_zone}" "${existing_master_name}" kube-env)"
# Substitute INITIAL_ETCD_CLUSTER to enable etcd clustering.
kube_env="$(echo "${kube_env}" | grep -v "INITIAL_ETCD_CLUSTER")"
kube_env="$(echo -e "${kube_env}\nINITIAL_ETCD_CLUSTER: '${existing_master_replicas},${REPLICA_NAME}'")"
ETCD_CA_KEY="$(echo "${kube_env}" | grep "ETCD_CA_KEY" | sed "s/^.*: '//" | sed "s/'$//")"
ETCD_CA_CERT="$(echo "${kube_env}" | grep "ETCD_CA_CERT" | sed "s/^.*: '//" | sed "s/'$//")"
create-etcd-certs "${REPLICA_NAME}" "${ETCD_CA_CERT}" "${ETCD_CA_KEY}"
kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_KEY")"
kube_env="$(echo -e "${kube_env}\nETCD_PEER_KEY: '${ETCD_PEER_KEY_BASE64}'")"
kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_CERT")"
kube_env="$(echo -e "${kube_env}\nETCD_PEER_CERT: '${ETCD_PEER_CERT_BASE64}'")"
echo "${kube_env}" > ${KUBE_TEMP}/master-kube-env.yaml
get-metadata "${existing_master_zone}" "${existing_master_name}" cluster-name > "${KUBE_TEMP}/cluster-name.txt"
create-master-instance-internal "${REPLICA_NAME}"
}
function create-master-instance-internal() {
local -r master_name="${1}"
local -r address_option="${2:-}"
local preemptible_master=""
if [[ "${PREEMPTIBLE_MASTER:-}" == "true" ]]; then
preemptible_master="--preemptible --maintenance-policy TERMINATE"
fi
write-master-env
gcloud compute instances create "${MASTER_NAME}" \
${address_opt} \
gcloud compute instances create "${master_name}" \
${address_option} \
--project "${PROJECT}" \
--zone "${ZONE}" \
--machine-type "${MASTER_SIZE}" \
@@ -51,8 +85,18 @@ function create-master-instance() {
--scopes "storage-ro,compute-rw,monitoring,logging-write" \
--can-ip-forward \
--metadata-from-file \
"kube-env=${KUBE_TEMP}/master-kube-env.yaml,user-data=${KUBE_ROOT}/cluster/gce/coreos/master-${CONTAINER_RUNTIME}.yaml,configure-node=${KUBE_ROOT}/cluster/gce/coreos/configure-node.sh,configure-kubelet=${KUBE_ROOT}/cluster/gce/coreos/configure-kubelet.sh,cluster-name=${KUBE_TEMP}/cluster-name.txt" \
--disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no" \
--boot-disk-size "${MASTER_ROOT_DISK_SIZE:-10}" \
${preemptible_master}
"kube-env=${KUBE_TEMP}/master-kube-env.yaml,user-data=${KUBE_ROOT}/cluster/gce/coreos/master.yaml,configure-sh=${KUBE_ROOT}/cluster/gce/coreos/configure.sh,cluster-name=${KUBE_TEMP}/cluster-name.txt" \
--disk "name=${master_name}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no" \
--boot-disk-size "${MASTER_ROOT_DISK_SIZE:-30}" \
${preemptible_master}
}
function get-metadata() {
local zone="${1}"
local name="${2}"
local key="${3}"
gcloud compute ssh "${name}" \
--project "${PROJECT}" \
--zone "${zone}" \
--command "curl \"http://metadata.google.internal/computeMetadata/v1/instance/attributes/${key}\" -H \"Metadata-Flavor: Google\"" 2>/dev/null
}

View File

@@ -1,220 +0,0 @@
#cloud-config
coreos:
update:
reboot-strategy: off
units:
- name: locksmithd.service
mask: true
- name: kube-env.service
command: start
content: |
[Unit]
Description=Fetch kubernetes-node-environment
Requires=network-online.target
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/usr/bin/curl --fail --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o /etc/kube-env.yaml \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env
# Transform the yaml to env file.
ExecStartPre=/usr/bin/mv /etc/kube-env.yaml /etc/kube-env
ExecStart=/usr/bin/sed -i "s/: '/=/;s/'$//" /etc/kube-env
- name: kubernetes-install-cni.service
command: start
content: |
[Unit]
Description=Download cni
Requires=network-online.target
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/usr/bin/mkdir -p /opt/cni
ExecStartPre=/usr/bin/curl --fail --silent --show-error --location --create-dirs --output /opt/downloads/cni.tar.gz https://storage.googleapis.com/kubernetes-release/network-plugins/cni-07a8a28637e97b22eb8dfe710eeae1344f69d16e.tar.gz
ExecStart=/usr/bin/tar xf /opt/downloads/cni.tar.gz -C /opt/cni/
- name: kubernetes-install-docker2aci.service
command: start
content: |
[Unit]
Description=Download docker2aci
Requires=network-online.target
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/usr/bin/mkdir -p /opt/docker2aci
ExecStartPre=/usr/bin/curl --fail --silent --show-error --location --create-dirs --output /opt/downloads/docker2aci.tar.gz https://github.com/appc/docker2aci/releases/download/v0.11.1/docker2aci-v0.11.1.tar.gz
ExecStart=/usr/bin/tar --strip-components=1 -xf /opt/downloads/docker2aci.tar.gz -C /opt/docker2aci/ --overwrite
- name: kubernetes-install-rkt.service
command: start
content: |
[Unit]
Description=Fetch rkt
Documentation=http://github.com/coreos/rkt
Requires=network-online.target
After=network-online.target
Requires=kube-env.service
After=kube-env.service
[Service]
Type=oneshot
RemainAfterExit=yes
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/mkdir -p /etc/rkt /opt/downloads /opt/rkt/
ExecStartPre=/usr/bin/curl --fail --silent --location --create-dirs --output /opt/downloads/rkt.tar.gz https://github.com/coreos/rkt/releases/download/v${RKT_VERSION}/rkt-v${RKT_VERSION}.tar.gz
ExecStart=/usr/bin/tar --strip-components=1 -xf /opt/downloads/rkt.tar.gz -C /opt/rkt/ --overwrite
- name: kubernetes-load-rkt-stage1.service
command: start
content: |
[Unit]
Description=Load rkt stage1 images
Documentation=http://github.com/coreos/rkt
Requires=network-online.target
After=network-online.target
Requires=kubernetes-install-rkt.service
After=kubernetes-install-rkt.service
[Service]
Type=oneshot
ExecStart=/opt/rkt/rkt fetch /opt/rkt/stage1-coreos.aci /opt/rkt/stage1-kvm.aci /opt/rkt/stage1-fly.aci --insecure-options=image
- name: kubernetes-download-salt.service
command: start
content: |
[Unit]
Description=Download salt
Requires=network-online.target
After=network-online.target
Requires=kube-env.service
After=kube-env.service
[Service]
Type=oneshot
RemainAfterExit=yes
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/mkdir -p /opt/downloads
ExecStartPre=/usr/bin/curl --fail --silent --show-error --location --create-dirs --output \
/opt/downloads/kubernetes-salt.tar.gz ${SALT_TAR_URL}
# TODO(yifan): Check hash.
ExecStart=/usr/bin/tar xf /opt/downloads/kubernetes-salt.tar.gz -C /opt --overwrite
- name: kubernetes-download-manifests.service
command: start
content: |
[Unit]
Description=Download manifests
Requires=network-online.target
After=network-online.target
Requires=kube-env.service
After=kube-env.service
[Service]
Type=oneshot
RemainAfterExit=yes
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/mkdir -p /opt/downloads
ExecStartPre=/usr/bin/curl --fail --silent --show-error --location --create-dirs --output \
/opt/downloads/kubernetes-manifests.tar.gz ${KUBE_MANIFESTS_TAR_URL}
# TODO(yifan): Check hash.
ExecStartPre=/usr/bin/mkdir -p /opt/kube-manifests
ExecStart=/usr/bin/tar xf /opt/downloads/kubernetes-manifests.tar.gz -C /opt/kube-manifests --overwrite
- name: kubernetes-install-node.service
command: start
content: |
[Unit]
Description=Install Kubernetes Server
Requires=network-online.target
After=network-online.target
Requires=kube-env.service
After=kube-env.service
[Service]
Type=oneshot
RemainAfterExit=yes
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/mkdir -p /opt/downloads
ExecStartPre=/usr/bin/curl --fail --silent --show-error --location --create-dirs --output \
/opt/downloads/kubernetes-server-linux-amd64.tar.gz ${SERVER_BINARY_TAR_URL}
# TODO(yifan): Check hash.
ExecStart=/usr/bin/tar xf /opt/downloads/kubernetes-server-linux-amd64.tar.gz -C /opt --overwrite
- name: rkt-api-service.service
command: start
content: |
[Unit]
Description=Start rkt API service as Daemon
Requires=kubernetes-install-rkt.service
After=kubernetes-install-rkt.service
[Service]
ExecStart=/opt/rkt/rkt api-service
Restart=always
RestartSec=10
- name: kubelet.service
command: start
content: |
[Unit]
Description=Run Kubelet service
Requires=network-online.target kube-env.service kubernetes-download-manifests.service kubernetes-install-cni.service kubernetes-load-rkt-stage1.service
After=network-online.target kube-env.service kubernetes-download-manifests.service kubernetes-install-cni.service kubernetes-load-rkt-stage1.service
[Service]
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/curl --fail --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o /run/configure-kubelet.sh \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-kubelet
ExecStartPre=/usr/bin/chmod 0755 /run/configure-kubelet.sh
ExecStartPre=/run/configure-kubelet.sh
ExecStart=/opt/kubernetes/server/bin/kubelet \
--api-servers=https://${INSTANCE_PREFIX}-master \
--enable-debugging-handlers=false \
--cloud-provider=gce \
--config=/etc/kubernetes/manifests \
--allow-privileged=true \
--v=2 \
--cluster-dns=${DNS_SERVER_IP} \
--cluster-domain=${DNS_DOMAIN} \
--logtostderr=true \
--container-runtime=rkt \
--rkt-path=/opt/rkt/rkt \
--rkt-stage1-image=${RKT_STAGE_IMAGE} \
--pod-cidr=${MASTER_IP_RANGE} \
--register-schedulable=false
Restart=always
RestartSec=10
KillMode=process
- name: docker.service
command: stop
- name: kubernetes-configure-node.service
command: start
content: |
[Unit]
Description=Configure Node For Kubernetes service
Requires=kubernetes-install-node.service
After=kubernetes-install-node.service
Requires=kubernetes-install-rkt.service
After=kubernetes-install-rkt.service
Requires=kubernetes-download-salt.service
After=kubernetes-download-salt.service
Requires=kubernetes-download-manifests.service
After=kubernetes-download-manifests.service
Requires=kubernetes-install-docker2aci.service
After=kubernetes-install-docker2aci.service
Requires=kubelet.service
After=kubelet.service
[Service]
Type=oneshot
RemainAfterExit=yes
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/curl --fail --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o /run/configure-node.sh \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-node
ExecStartPre=/usr/bin/chmod 0755 /run/configure-node.sh
ExecStart=/run/configure-node.sh

View File

@@ -0,0 +1,56 @@
#cloud-config
coreos:
update:
reboot-strategy: off
units:
- name: locksmithd.service
mask: true
- name: kube-master-installation.service
command: start
content: |
[Unit]
Description=Download and install k8s binaries and configurations
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/bin/mkdir -p /opt/kubernetes/bin
ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh
ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure.sh
ExecStart=/opt/kubernetes/bin/configure.sh
[Install]
WantedBy=kubernetes.target
- name: kube-master-configuration.service
command: start
content: |
[Unit]
Description=Configure kubernetes master
After=kube-master-installation.service
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure-helper.sh
ExecStart=/opt/kubernetes/bin/configure-helper.sh
[Install]
WantedBy=kubernetes.target
- name: kubernetes.target
enable: true
command: start
content: |
[Unit]
Description=Kubernetes
[Install]
WantedBy=multi-user.target
- name: docker.service
drop-ins:
- name: "use-cgroupfs-driver.conf"
# This is required for setting cgroup parent in the current ~1.4 per-pod cgroup impl
content: |
[Service]
Environment="DOCKER_CGROUPS=--exec-opt native.cgroupdriver="

View File

@@ -1,159 +0,0 @@
#cloud-config
coreos:
update:
reboot-strategy: off
units:
- name: locksmithd.service
mask: true
- name: kube-env.service
command: start
content: |
[Unit]
Description=Fetch kubernetes-node-environment
Requires=network-online.target
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/usr/bin/curl --fail --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o /etc/kube-env.yaml \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env
# Transform the yaml to env file.
ExecStartPre=/usr/bin/mv /etc/kube-env.yaml /etc/kube-env
ExecStart=/usr/bin/sed -i "s/: '/=/;s/'$//" /etc/kube-env
- name: kubernetes-install-cni.service
command: start
content: |
[Unit]
Description=Download cni
Requires=network-online.target
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/usr/bin/mkdir -p /opt/cni
ExecStartPre=/usr/bin/curl --fail --silent --show-error --location --create-dirs --output /opt/downloads/cni.tar.gz https://storage.googleapis.com/kubernetes-release/network-plugins/cni-07a8a28637e97b22eb8dfe710eeae1344f69d16e.tar.gz
ExecStart=/usr/bin/tar xf /opt/downloads/cni.tar.gz -C /opt/cni/
- name: kubernetes-download-manifests.service
command: start
content: |
[Unit]
Description=Download manifests
Requires=network-online.target
After=network-online.target
Requires=kube-env.service
After=kube-env.service
[Service]
Type=oneshot
RemainAfterExit=yes
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/mkdir -p /opt/downloads
ExecStartPre=/usr/bin/curl --fail --silent --show-error --location --create-dirs --output \
/opt/downloads/kubernetes-manifests.tar.gz ${KUBE_MANIFESTS_TAR_URL}
# TODO(yifan): Check hash.
ExecStartPre=/usr/bin/mkdir -p /opt/kube-manifests
ExecStart=/usr/bin/tar xf /opt/downloads/kubernetes-manifests.tar.gz -C /opt/kube-manifests --overwrite
- name: kubernetes-install-node.service
command: start
content: |
[Unit]
Description=Install Kubernetes Server
Requires=network-online.target
After=network-online.target
Requires=kube-env.service
After=kube-env.service
[Service]
Type=oneshot
RemainAfterExit=yes
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/mkdir -p /opt/kubernetes/pkg
ExecStartPre=/usr/bin/curl --fail --silent --show-error --location --create-dirs --output \
/opt/kubernetes/pkg/kubernetes-server-linux-amd64.tar.gz ${SERVER_BINARY_TAR_URL}
ExecStart=/usr/bin/tar xf /opt/kubernetes/pkg/kubernetes-server-linux-amd64.tar.gz -C /opt --overwrite
- name: kubelet.service
command: start
content: |
[Unit]
Description=Run Kubelet service
Requires=network-online.target kube-env.service kubernetes-download-manifests.service kubernetes-install-cni.service
After=network-online.target kube-env.service kubernetes-download-manifests.service kubernetes-install-cni.service
[Service]
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/curl --fail --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o /run/configure-kubelet.sh \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-kubelet
ExecStartPre=/usr/bin/chmod 0755 /run/configure-kubelet.sh
ExecStartPre=/run/configure-kubelet.sh
ExecStart=/opt/kubernetes/server/bin/kubelet \
--api-servers=https://${INSTANCE_PREFIX}-master \
--enable-debugging-handlers=true \
--cloud-provider=gce \
--config=/etc/kubernetes/manifests \
--allow-privileged=true \
--v=2 \
--cluster-dns=${DNS_SERVER_IP} \
--cluster-domain=${DNS_DOMAIN} \
--logtostderr=true \
--container-runtime=docker \
--network-plugin=${NETWORK_PROVIDER}
Restart=always
RestartSec=10
KillMode=process
- name: kube-proxy.service
command: start
content: |
[Unit]
Description=Start Kube-proxy service as Daemon
Requires=kubernetes-configure-node.service
After=kubernetes-configure-node.service
[Service]
EnvironmentFile=/etc/kube-env
ExecStart=/opt/kubernetes/server/bin/kube-proxy \
--master=https://${KUBERNETES_MASTER_NAME} \
--kubeconfig=/var/lib/kube-proxy/kubeconfig \
--v=2 \
--logtostderr=true
Restart=always
RestartSec=10
- name: docker.service
drop-ins:
- name: 50-docker-opts.conf
content: |
[Service]
Environment='DOCKER_OPTS= --iptables=false --ip-masq=false'
MountFlags=slave
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
Restart=always
RestartSec=2s
StartLimitInterval=0
- name: kubernetes-configure-node.service
command: start
content: |
[Unit]
Description=Configure Node For Kubernetes service
Requires=kubernetes-install-node.service
After=kubernetes-install-node.service
Requires=kubernetes-download-manifests.service
After=kubernetes-download-manifests.service
[Service]
Type=oneshot
RemainAfterExit=yes
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/curl --fail --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o /run/configure-node.sh \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-node
ExecStartPre=/usr/bin/chmod 0755 /run/configure-node.sh
ExecStart=/run/configure-node.sh

View File

@@ -1,6 +1,6 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,15 +14,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for coreos os distro
# A library of helper functions and constant for the CoreOS distro
source "${KUBE_ROOT}/cluster/gce/coreos/helper.sh"
# $1: template name (required)
function create-node-instance-template() {
# $1: template name (required).
function create-node-instance-template {
local template_name="$1"
create-node-template "$template_name" "${scope_flags}" \
create-node-template "$template_name" "${scope_flags[*]}" \
"kube-env=${KUBE_TEMP}/node-kube-env.yaml" \
"user-data=${KUBE_ROOT}/cluster/gce/coreos/node-${CONTAINER_RUNTIME}.yaml" \
"configure-node=${KUBE_ROOT}/cluster/gce/coreos/configure-node.sh" \
"configure-kubelet=${KUBE_ROOT}/cluster/gce/coreos/configure-kubelet.sh" \
"user-data=${KUBE_ROOT}/cluster/gce/coreos/node.yaml" \
"configure-sh=${KUBE_ROOT}/cluster/gce/coreos/configure.sh" \
"cluster-name=${KUBE_TEMP}/cluster-name.txt"
# TODO(euank): We should include update-strategy here. We should also switch to ignition
}

View File

@@ -1,196 +0,0 @@
#cloud-config
coreos:
update:
reboot-strategy: off
units:
- name: locksmithd.service
mask: true
- name: kube-env.service
command: start
content: |
[Unit]
Description=Fetch kubernetes-node-environment
Requires=network-online.target
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/usr/bin/curl --fail --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o /etc/kube-env.yaml \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env
# Transform the yaml to env file.
ExecStartPre=/usr/bin/mv /etc/kube-env.yaml /etc/kube-env
ExecStart=/usr/bin/sed -i "s/: '/=/;s/'$//" /etc/kube-env
- name: kubernetes-install-cni.service
command: start
content: |
[Unit]
Description=Download cni
Requires=network-online.target
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/usr/bin/mkdir -p /opt/cni
ExecStartPre=/usr/bin/curl --fail --silent --show-error --location --create-dirs --output /opt/downloads/cni.tar.gz https://storage.googleapis.com/kubernetes-release/network-plugins/cni-07a8a28637e97b22eb8dfe710eeae1344f69d16e.tar.gz
ExecStart=/usr/bin/tar xf /opt/downloads/cni.tar.gz -C /opt/cni/
- name: kubernetes-install-rkt.service
command: start
content: |
[Unit]
Description=Fetch rkt
Documentation=http://github.com/coreos/rkt
Requires=network-online.target
After=network-online.target
Requires=kube-env.service
After=kube-env.service
[Service]
Type=oneshot
RemainAfterExit=yes
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/mkdir -p /etc/rkt /opt/downloads /opt/rkt/
ExecStartPre=/usr/bin/curl --fail --silent --location --create-dirs --output /opt/downloads/rkt.tar.gz https://github.com/coreos/rkt/releases/download/v${RKT_VERSION}/rkt-v${RKT_VERSION}.tar.gz
ExecStart=/usr/bin/tar --strip-components=1 -xf /opt/downloads/rkt.tar.gz -C /opt/rkt/ --overwrite
- name: kubernetes-load-rkt-stage1.service
command: start
content: |
[Unit]
Description=Load rkt stage1 images
Documentation=http://github.com/coreos/rkt
Requires=network-online.target
After=network-online.target
Requires=kubernetes-install-rkt.service
After=kubernetes-install-rkt.service
[Service]
Type=oneshot
ExecStart=/opt/rkt/rkt fetch /opt/rkt/stage1-coreos.aci /opt/rkt/stage1-kvm.aci /opt/rkt/stage1-fly.aci --insecure-options=image
- name: kubernetes-download-manifests.service
command: start
content: |
[Unit]
Description=Download manifests
Requires=network-online.target
After=network-online.target
Requires=kube-env.service
After=kube-env.service
[Service]
Type=oneshot
RemainAfterExit=yes
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/mkdir -p /opt/downloads
ExecStartPre=/usr/bin/curl --fail --silent --show-error --location --create-dirs --output \
/opt/downloads/kubernetes-manifests.tar.gz ${KUBE_MANIFESTS_TAR_URL}
# TODO(yifan): Check hash.
ExecStartPre=/usr/bin/mkdir -p /opt/kube-manifests
ExecStart=/usr/bin/tar xf /opt/downloads/kubernetes-manifests.tar.gz -C /opt/kube-manifests --overwrite
- name: kubernetes-install-node.service
command: start
content: |
[Unit]
Description=Install Kubernetes Server
Requires=network-online.target
After=network-online.target
Requires=kube-env.service
After=kube-env.service
[Service]
Type=oneshot
RemainAfterExit=yes
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/mkdir -p /opt/kubernetes/pkg
ExecStartPre=/usr/bin/curl --fail --silent --show-error --location --create-dirs --output \
/opt/kubernetes/pkg/kubernetes-server-linux-amd64.tar.gz ${SERVER_BINARY_TAR_URL}
ExecStart=/usr/bin/tar xf /opt/kubernetes/pkg/kubernetes-server-linux-amd64.tar.gz -C /opt --overwrite
- name: rkt-api-service.service
command: start
content: |
[Unit]
Description=Start rkt API service as Daemon
Requires=kubernetes-install-rkt.service
After=kubernetes-install-rkt.service
[Service]
ExecStart=/opt/rkt/rkt api-service
Restart=always
RestartSec=10
- name: kubelet.service
command: start
content: |
[Unit]
Description=Run Kubelet service
Requires=network-online.target kube-env.service kubernetes-download-manifests.service kubernetes-install-cni.service kubernetes-load-rkt-stage1.service
After=network-online.target kube-env.service kubernetes-download-manifests.service kubernetes-install-cni.service kubernetes-load-rkt-stage1.service
[Service]
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/curl --fail --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o /run/configure-kubelet.sh \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-kubelet
ExecStartPre=/usr/bin/chmod 0755 /run/configure-kubelet.sh
ExecStartPre=/run/configure-kubelet.sh
ExecStart=/opt/kubernetes/server/bin/kubelet \
--api-servers=https://${INSTANCE_PREFIX}-master \
--enable-debugging-handlers=true \
--cloud-provider=gce \
--config=/etc/kubernetes/manifests \
--allow-privileged=true \
--v=2 \
--cluster-dns=${DNS_SERVER_IP} \
--cluster-domain=${DNS_DOMAIN} \
--logtostderr=true \
--container-runtime=rkt \
--rkt-path=/opt/rkt/rkt \
--rkt-stage1-image=${RKT_STAGE1_IMAGE} \
--network-plugin=kubenet
Restart=always
RestartSec=10
KillMode=process
- name: kube-proxy.service
command: start
content: |
[Unit]
Description=Start Kube-proxy service as Daemon
Requires=kubernetes-configure-node.service
After=kubernetes-configure-node.service
[Service]
EnvironmentFile=/etc/kube-env
ExecStart=/opt/kubernetes/server/bin/kube-proxy \
--master=https://${KUBERNETES_MASTER_NAME} \
--kubeconfig=/var/lib/kube-proxy/kubeconfig \
--v=2 \
--logtostderr=true
Restart=always
RestartSec=10
- name: docker.service
command: stop
- name: kubernetes-configure-node.service
command: start
content: |
[Unit]
Description=Configure Node For Kubernetes service
Requires=kubernetes-install-node.service
After=kubernetes-install-node.service
Requires=kubernetes-install-rkt.service
After=kubernetes-install-rkt.service
Requires=kubernetes-download-manifests.service
After=kubernetes-download-manifests.service
[Service]
Type=oneshot
RemainAfterExit=yes
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/curl --fail --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o /run/configure-node.sh \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-node
ExecStartPre=/usr/bin/chmod 0755 /run/configure-node.sh
ExecStart=/run/configure-node.sh

View File

@@ -0,0 +1,56 @@
#cloud-config
coreos:
update:
reboot-strategy: off
units:
- name: locksmithd.service
mask: true
- name: kube-node-installation.service
command: start
content: |
[Unit]
Description=Download and install k8s binaries and configurations
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/bin/mkdir -p /opt/kubernetes/bin
ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --silent --show-error -H "X-Google-Metadata-Request: True" -o /opt/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh
ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure.sh
ExecStart=/opt/kubernetes/bin/configure.sh
[Install]
WantedBy=kubernetes.target
- name: kube-node-configuration.service
command: start
content: |
[Unit]
Description=Configure kubernetes master
After=kube-node-installation.service
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/bin/chmod 544 /opt/kubernetes/bin/configure-helper.sh
ExecStart=/opt/kubernetes/bin/configure-helper.sh
[Install]
WantedBy=kubernetes.target
- name: kubernetes.target
enable: true
command: start
content: |
[Unit]
Description=Kubernetes
[Install]
WantedBy=multi-user.target
- name: docker.service
drop-ins:
- name: "use-cgroupfs-driver.conf"
# This is required for setting cgroup parent in the current ~1.4 per-pod cgroup impl
content: |
[Service]
Environment="DOCKER_CGROUPS=--exec-opt native.cgroupdriver="

View File

@@ -202,7 +202,6 @@ function install-kube-binary-config {
rm -f "${KUBE_HOME}/${manifests_tar}.sha1"
}
######### Main Function ##########
echo "Start to install kubernetes files"
set-broken-motd
@@ -211,3 +210,4 @@ download-kube-env
source "${KUBE_HOME}/kube-env"
install-kube-binary-config
echo "Done for installing kubernetes files"

View File

@@ -15,7 +15,6 @@
# limitations under the License.
# A library of helper functions and constant for GCI distro
source "${KUBE_ROOT}/cluster/gce/gci/helper.sh"
# create-master-instance creates the master instance. If called with
@@ -72,6 +71,12 @@ function replicate-master-instance() {
function create-master-instance-internal() {
local -r master_name="${1}"
local -r address_option="${2:-}"
local preemptible_master=""
if [[ "${PREEMPTIBLE_MASTER:-}" == "true" ]]; then
preemptible_master="--preemptible --maintenance-policy TERMINATE"
fi
gcloud compute instances create "${master_name}" \
${address_option} \
--project "${PROJECT}" \
@@ -86,7 +91,8 @@ function create-master-instance-internal() {
--metadata-from-file \
"kube-env=${KUBE_TEMP}/master-kube-env.yaml,user-data=${KUBE_ROOT}/cluster/gce/gci/master.yaml,configure-sh=${KUBE_ROOT}/cluster/gce/gci/configure.sh,cluster-name=${KUBE_TEMP}/cluster-name.txt,gci-update-strategy=${KUBE_TEMP}/gci-update.txt,gci-ensure-gke-docker=${KUBE_TEMP}/gci-ensure-gke-docker.txt,gci-docker-version=${KUBE_TEMP}/gci-docker-version.txt" \
--disk "name=${master_name}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no" \
--boot-disk-size "${MASTER_ROOT_DISK_SIZE:-10}"
--boot-disk-size "${MASTER_ROOT_DISK_SIZE:-10}" \
${preemptible_master}
}
function get-metadata() {

View File

@@ -12,6 +12,9 @@ cluster/gce/configure-vm.sh: cloud_config: ${CLOUD_CONFIG}
cluster/gce/configure-vm.sh: env-to-grains "feature_gates"
cluster/gce/configure-vm.sh: env-to-grains "runtime_config"
cluster/gce/configure-vm.sh: kubelet_api_servers: '${KUBELET_APISERVER}'
cluster/gce/coreos/configure-helper.sh: local api_servers="--master=https://${KUBERNETES_MASTER_NAME}"
cluster/gce/coreos/configure-helper.sh: sed -i -e "s@{{ *storage_backend *}}@${STORAGE_BACKEND:-}@g" "${temp_file}"
cluster/gce/coreos/configure-helper.sh: sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}"
cluster/gce/gci/configure-helper.sh: local api_servers="--master=https://${KUBERNETES_MASTER_NAME}"
cluster/gce/gci/configure-helper.sh: sed -i -e "s@{{ *storage_backend *}}@${STORAGE_BACKEND:-}@g" "${temp_file}"
cluster/gce/gci/configure-helper.sh: sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}"