Merge pull request #4498 from lhuard1A/libvirt-coreos

Provide a way to create a multi-minions cluster on local VMs faster than with vagrant.
This commit is contained in:
Brendan Burns
2015-02-26 14:42:05 -08:00
14 changed files with 828 additions and 3 deletions

View File

@@ -18,7 +18,7 @@
# You can override the default provider by exporting the KUBERNETES_PROVIDER
# variable in your bashrc
#
# The valid values: 'gce', 'gke', 'aws', 'azure', 'vagrant', 'vsphere'
# The valid values: 'gce', 'gke', 'aws', 'azure', 'vagrant', 'vsphere', 'libvirt-coreos'
KUBERNETES_PROVIDER=${KUBERNETES_PROVIDER:-gce}

View File

@@ -120,6 +120,11 @@ elif [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then
config=(
"--kubeconfig=$HOME/.kubernetes_vagrant_kubeconfig"
)
elif [[ "$KUBERNETES_PROVIDER" == "libvirt-coreos" ]]; then
detect-master > /dev/null
config=(
"--server=http://${KUBE_MASTER_IP}:8080"
)
fi
echo "current-context: \"$(${kubectl} "${config[@]:+${config[@]}}" config view -o template --template='{{index . "current-context"}}')\"" >&2

2
cluster/libvirt-coreos/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
/libvirt_storage_pool/
/coreos_production_qemu_image.img.bz2

View File

@@ -0,0 +1,21 @@
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Contains configuration values for interacting with the libvirt CoreOS cluster
# Number of minions in the cluster
NUM_MINIONS=${NUM_MINIONS:-3}
export NUM_MINIONS

View File

@@ -0,0 +1,71 @@
<domain type='kvm'>
<name>${name}</name>
<memory unit='MiB'>512</memory>
<currentMemory unit='MiB'>512</currentMemory>
<vcpu placement='static'>2</vcpu>
<os>
<type arch='x86_64' machine='pc'>hvm</type>
<boot dev='hd'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>
<emulator>$(which qemu-system-$(uname -m))</emulator>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
<source file='${POOL_PATH}/${image}'/>
<target dev='vda' bus='virtio'/>
</disk>
<controller type='usb' index='0'>
</controller>
<filesystem type='mount' accessmode='squash'>
<source dir='${POOL_PATH}/${config}'/>
<target dir='config-2'/>
<readonly/>
</filesystem>
<filesystem type='mount' accessmode='squash'>
<source dir='${kubernetes_dir}'/>
<target dir='kubernetes'/>
<readonly/>
</filesystem>
<interface type='network'>
<mac address='52:54:00:00:00:${i}'/>
<source network='kubernetes_global'/>
<model type='virtio'/>
</interface>
<interface type='network'>
<mac address='52:54:00:00:01:${i}'/>
<source network='kubernetes_pods'/>
<model type='virtio'/>
</interface>
<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<channel type='spicevmc'>
<target type='virtio' name='com.redhat.spice.0'/>
</channel>
<input type='tablet' bus='usb'/>
<input type='mouse' bus='ps2'/>
<input type='keyboard' bus='ps2'/>
<graphics type='spice' autoport='yes'/>
<sound model='ich6'>
</sound>
<video>
<model type='qxl' vram='9216' heads='1'/>
</video>
<redirdev bus='usb' type='spicevmc'>
</redirdev>
<memballoon model='virtio'>
</memballoon>
</devices>
</domain>

View File

@@ -0,0 +1,11 @@
<network>
<name>kubernetes_global</name>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<bridge name='virbr_kub_gl' stp='off' delay='0'/>
<ip address='192.168.10.254' netmask='255.255.255.0'>
</ip>
</network>

View File

@@ -0,0 +1,6 @@
<network>
<name>kubernetes_pods</name>
<bridge name='virbr_kub_pods' stp='off' delay='0'/>
<ip address='10.10.0.1' netmask='255.255.0.0'>
</ip>
</network>

View File

@@ -0,0 +1,111 @@
#cloud-config
hostname: ${name}
ssh_authorized_keys:
${ssh_keys}
write_files:
- path: /etc/systemd/journald.conf
permissions: 0644
content: |
[Journal]
SystemMaxUse=50M
RuntimeMaxUse=50M
coreos:
etcd:
name: ${name}
addr: 192.168.10.$(($i+1)):4001
bind-addr: 0.0.0.0
peer-addr: 192.168.10.$(($i+1)):7001
# peers: {etcd_peers}
discovery: ${discovery}
units:
- name: static.network
command: start
content: |
[Match]
# Name=eth0
MACAddress=52:54:00:00:00:${i}
[Network]
Address=192.168.10.$(($i+1))/24
DNS=192.168.10.254
Gateway=192.168.10.254
- name: cbr0.netdev
command: start
content: |
[NetDev]
Kind=bridge
Name=cbr0
- name: cbr0.network
command: start
content: |
[Match]
Name=cbr0
[Network]
Address=10.10.$(($i+1)).1/24
[Route]
Destination=10.10.0.0/16
- name: cbr0-interface.network
command: start
content: |
[Match]
# Name=eth1
MACAddress=52:54:00:00:01:${i}
[Network]
Bridge=cbr0
- name: nat.service
command: start
content: |
[Unit]
Description=NAT non container traffic
[Service]
ExecStart=/usr/sbin/iptables -w -t nat -A POSTROUTING -o eth0 -j MASQUERADE ! -d 10.10.0.0/16
RemainAfterExit=yes
Type=oneshot
- name: etcd.service
command: start
- name: docker.service
command: start
drop-ins:
- name: 50-opts.conf
content: |
[Service]
Environment=DOCKER_OPTS='--bridge=cbr0 --iptables=false'
- name: docker-tcp.socket
command: start
enable: yes
content: |
[Unit]
Description=Docker Socket for the API
[Socket]
ListenStream=2375
BindIPv6Only=both
Service=docker.service
[Install]
WantedBy=sockets.target
- name: opt-kubernetes.mount
command: start
content: |
[Unit]
ConditionVirtualization=|vm
[Mount]
What=kubernetes
Where=/opt/kubernetes
Options=ro,trans=virtio,version=9p2000.L
Type=9p
update:
group: ${COREOS_CHANNEL:-alpha}
reboot-strategy: off
$( [[ ${type} =~ "master" ]] && render-template "$ROOT/user_data_master.yml" )
$( [[ ${type} =~ "minion" ]] && render-template "$ROOT/user_data_minion.yml" )

View File

@@ -0,0 +1,63 @@
#cloud-config
coreos:
units:
- name: kube-apiserver.service
command: start
content: |
[Unit]
After=opt-kubernetes.mount etcd.service
ConditionFileIsExecutable=/opt/kubernetes/bin/kube-apiserver
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=opt-kubernetes.mount etcd.service
[Service]
ExecStart=/opt/kubernetes/bin/kube-apiserver \
--address=0.0.0.0 \
--port=8080 \
--etcd_servers=http://127.0.0.1:4001 \
--kubelet_port=10250 \
--portal_net=10.10.254.0/24
Restart=always
RestartSec=2
[Install]
WantedBy=multi-user.target
- name: kube-controller-manager.service
command: start
content: |
[Unit]
After=opt-kubernetes.mount kube-apiserver.service
ConditionFileIsExecutable=/opt/kubernetes/bin/kube-controller-manager
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=opt-kubernetes.mount kube-apiserver.service
[Service]
ExecStart=/opt/kubernetes/bin/kube-controller-manager \
--master=127.0.0.1:8080 \
--machines=${machines}
Restart=always
RestartSec=2
[Install]
WantedBy=multi-user.target
- name: kube-scheduler.service
command: start
content: |
[Unit]
After=opt-kubernetes.mount kube-apiserver.service
ConditionFileIsExecutable=/opt/kubernetes/bin/kube-scheduler
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=opt-kubernetes.mount kube-apiserver.service
[Service]
ExecStart=/opt/kubernetes/bin/kube-scheduler \
--master=127.0.0.1:8080
Restart=always
RestartSec=2
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,43 @@
#cloud-config
coreos:
units:
- name: kubelet.service
command: start
content: |
[Unit]
After=opt-kubernetes.mount etcd.service docker.socket
ConditionFileIsExecutable=/opt/kubernetes/bin/kubelet
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=opt-kubernetes.mount etcd.service docker.socket
[Service]
ExecStart=/opt/kubernetes/bin/kubelet \
--address=0.0.0.0 \
--hostname_override=192.168.10.$(($i+1)) \
--etcd_servers=http://127.0.0.1:4001
Restart=always
RestartSec=2
[Install]
WantedBy=multi-user.target
- name: kube-proxy.service
command: start
content: |
[Unit]
After=opt-kubernetes.mount etcd.service
ConditionFileIsExecutable=/opt/kubernetes/bin/kube-proxy
Description=Kubernetes Proxy
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=opt-kubernetes.mount etcd.service
[Service]
ExecStart=/opt/kubernetes/bin/kube-proxy \
--etcd_servers=http://127.0.0.1:4001 \
--master=http://192.168.10.1:7080
Restart=always
RestartSec=2
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,256 @@
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts.
readonly KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
readonly ROOT=$(dirname "${BASH_SOURCE}")
source $ROOT/${KUBE_CONFIG_FILE:-"config-default.sh"}
export LIBVIRT_DEFAULT_URI=qemu:///system
readonly POOL=kubernetes
readonly POOL_PATH="$(cd $ROOT && pwd)/libvirt_storage_pool"
# join <delim> <list...>
# Concatenates the list elements with the delimiter passed as first parameter
#
# Ex: join , a b c
# -> a,b,c
function join {
local IFS="$1"
shift
echo "$*"
}
# Must ensure that the following ENV vars are set
function detect-master {
KUBE_MASTER_IP=192.168.10.1
KUBE_MASTER=kubernetes-master
export KUBERNETES_MASTER=http://$KUBE_MASTER_IP:8080
echo "KUBE_MASTER_IP: $KUBE_MASTER_IP"
echo "KUBE_MASTER: $KUBE_MASTER"
}
# Get minion IP addresses and store in KUBE_MINION_IP_ADDRESSES[]
function detect-minions {
for (( i = 0 ; i < $NUM_MINIONS ; i++ )); do
KUBE_MINION_IP_ADDRESSES[$i]=192.168.10.$(($i+2))
done
echo "KUBE_MINION_IP_ADDRESSES=[${KUBE_MINION_IP_ADDRESSES[@]}]"
}
# Verify prereqs on host machine
function verify-prereqs {
if ! which virsh >/dev/null; then
echo "Can't find virsh in PATH, please fix and retry." >&2
exit 1
fi
if ! virsh nodeinfo >/dev/null; then
exit 1
fi
if [[ "$(</sys/kernel/mm/ksm/run)" -ne "1" ]]; then
echo "KSM is not enabled" >&2
echo "Enabling it would reduce the memory footprint of large clusters" >&2
if [[ -t 0 ]]; then
read -t 5 -n 1 -p "Do you want to enable KSM (requires root password) (y/n)? " answer
echo ""
if [[ "$answer" == 'y' ]]; then
su -c 'echo 1 > /sys/kernel/mm/ksm/run'
fi
else
echo "You can enable it with (as root):" >&2
echo "" >&2
echo " echo 1 > /sys/kernel/mm/ksm/run" >&2
echo "" >&2
fi
fi
}
# Destroy the libvirt storage pool and all the images inside
#
# If 'keep_base_image' is passed as first parameter,
# the base image is kept, as well as the storage pool.
# All the other images are deleted.
function destroy-pool {
virsh pool-info $POOL >/dev/null 2>&1 || return
rm -rf "$POOL_PATH"/kubernetes/*
rm -rf "$POOL_PATH"/kubernetes_config*/*
local vol
virsh vol-list $POOL | awk 'NR>2 && !/^$/ && $1 ~ /^kubernetes/ {print $1}' | \
while read vol; do
virsh vol-delete $vol --pool $POOL
done
[[ "$1" == 'keep_base_image' ]] && return
set +e
virsh vol-delete coreos_base.img --pool $POOL
virsh pool-destroy $POOL
rmdir "$POOL_PATH"
set -e
}
# Creates the libvirt storage pool and populate it with
# - the CoreOS base image
# - the kubernetes binaries
function initialize-pool {
mkdir -p "$POOL_PATH"
if ! virsh pool-info $POOL >/dev/null 2>&1; then
virsh pool-create-as $POOL dir --target "$POOL_PATH"
fi
wget -N -P "$ROOT" http://${COREOS_CHANNEL:-alpha}.release.core-os.net/amd64-usr/current/coreos_production_qemu_image.img.bz2
if [ "$ROOT/coreos_production_qemu_image.img.bz2" -nt "$POOL_PATH/coreos_base.img" ]; then
bunzip2 -f -k "$ROOT/coreos_production_qemu_image.img.bz2"
virsh vol-delete coreos_base.img --pool $POOL 2> /dev/null || true
mv "$ROOT/coreos_production_qemu_image.img" "$POOL_PATH/coreos_base.img"
fi
# if ! virsh vol-list $POOL | grep -q coreos_base.img; then
# virsh vol-create-as $POOL coreos_base.img 10G --format qcow2
# virsh vol-upload coreos_base.img "$ROOT/coreos_production_qemu_image.img" --pool $POOL
# fi
mkdir -p "$POOL_PATH/kubernetes"
kube-push
virsh pool-refresh $POOL
}
function destroy-network {
set +e
virsh net-destroy kubernetes_global
virsh net-destroy kubernetes_pods
set -e
}
function initialize-network {
virsh net-create "$ROOT/network_kubernetes_global.xml"
virsh net-create "$ROOT/network_kubernetes_pods.xml"
}
function render-template {
eval "echo \"$(cat $1)\""
}
# Instantiate a kubernetes cluster
function kube-up {
detect-master
detect-minions
initialize-pool keep_base_image
initialize-network
readonly ssh_keys="$(cat ~/.ssh/id_*.pub | sed 's/^/ - /')"
readonly kubernetes_dir="$POOL_PATH/kubernetes"
readonly discovery=$(curl -s https://discovery.etcd.io/new)
readonly machines=$(join , "${KUBE_MINION_IP_ADDRESSES[@]}")
local i
for (( i = 0 ; i <= $NUM_MINIONS ; i++ )); do
if [[ $i -eq 0 ]]; then
type=master
else
type=minion-$(printf "%02d" $i)
fi
name=kubernetes_$type
image=$name.img
config=kubernetes_config_$type
virsh vol-create-as $POOL $image 10G --format qcow2 --backing-vol coreos_base.img --backing-vol-format qcow2
mkdir -p "$POOL_PATH/$config/openstack/latest"
render-template "$ROOT/user_data.yml" > "$POOL_PATH/$config/openstack/latest/user_data"
virsh pool-refresh $POOL
domain_xml=$(mktemp)
render-template $ROOT/coreos.xml > $domain_xml
virsh create $domain_xml
rm $domain_xml
done
}
# Delete a kubernetes cluster
function kube-down {
virsh list | awk 'NR>2 && !/^$/ && $2 ~ /^kubernetes/ {print $2}' | \
while read dom; do
virsh destroy $dom
done
destroy-pool keep_base_image
destroy-network
}
function find-release-tars {
SERVER_BINARY_TAR="${KUBE_ROOT}/server/kubernetes-server-linux-amd64.tar.gz"
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
SERVER_BINARY_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz"
fi
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
echo "!!! Cannot find kubernetes-server-linux-amd64.tar.gz"
exit 1
fi
}
# The kubernetes binaries are pushed to a host directory which is exposed to the VM
function upload-server-tars {
tar -x -C "$POOL_PATH/kubernetes" -f "$SERVER_BINARY_TAR" kubernetes
rm -rf "$POOL_PATH/kubernetes/bin"
mv "$POOL_PATH/kubernetes/kubernetes/server/bin" "$POOL_PATH/kubernetes/bin"
rmdir "$POOL_PATH/kubernetes/kubernetes/server" "$POOL_PATH/kubernetes/kubernetes"
}
# Update a kubernetes cluster with latest source
function kube-push {
find-release-tars
upload-server-tars
}
# Execute prior to running tests to build a release if required for env
function test-build-release {
echo "TODO"
}
# Execute prior to running tests to initialize required structure
function test-setup {
echo "TODO"
}
# Execute after running tests to perform any required clean-up
function test-teardown {
echo "TODO"
}
# Set the {KUBE_USER} and {KUBE_PASSWORD} environment values required to interact with provider
function get-password {
export KUBE_USER=core
echo "TODO get-password"
}
function setup-monitoring-firewall {
echo "TODO" 1>&2
}
function teardown-monitoring-firewall {
echo "TODO" 1>&2
}
function setup-logging-firewall {
echo "TODO: setup logging"
}
function teardown-logging-firewall {
echo "TODO: teardown logging"
}

View File

@@ -55,7 +55,7 @@ echo "Found ${found} nodes."
cat -n "${MINIONS_FILE}"
# On vSphere, use minion IPs as their names
if [[ "${KUBERNETES_PROVIDER}" == "vsphere" ]] || [[ "${KUBERNETES_PROVIDER}" == "vagrant" ]]; then
if [[ "${KUBERNETES_PROVIDER}" == "vsphere" ]] || [[ "${KUBERNETES_PROVIDER}" == "vagrant" ]] || [[ "${KUBERNETES_PROVIDER}" == "libvirt-coreos" ]]; then
MINION_NAMES=("${KUBE_MINION_IP_ADDRESSES[@]}")
fi