diff --git a/cluster/kube-env.sh b/cluster/kube-env.sh
index d719476c465..89be7dc8480 100644
--- a/cluster/kube-env.sh
+++ b/cluster/kube-env.sh
@@ -18,7 +18,7 @@
# You can override the default provider by exporting the KUBERNETES_PROVIDER
# variable in your bashrc
#
-# The valid values: 'gce', 'gke', 'aws', 'azure', 'vagrant', 'vsphere'
+# The valid values: 'gce', 'gke', 'aws', 'azure', 'vagrant', 'vsphere', 'libvirt-coreos'
KUBERNETES_PROVIDER=${KUBERNETES_PROVIDER:-gce}
diff --git a/cluster/kubectl.sh b/cluster/kubectl.sh
index 74a6f364187..4527a57b3b8 100755
--- a/cluster/kubectl.sh
+++ b/cluster/kubectl.sh
@@ -120,6 +120,11 @@ elif [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then
config=(
"--kubeconfig=$HOME/.kubernetes_vagrant_kubeconfig"
)
+elif [[ "$KUBERNETES_PROVIDER" == "libvirt-coreos" ]]; then
+ detect-master > /dev/null
+ config=(
+ "--server=http://${KUBE_MASTER_IP}:8080"
+ )
fi
echo "current-context: \"$(${kubectl} "${config[@]:+${config[@]}}" config view -o template --template='{{index . "current-context"}}')\"" >&2
diff --git a/cluster/libvirt-coreos/.gitignore b/cluster/libvirt-coreos/.gitignore
new file mode 100644
index 00000000000..f26d8e4020a
--- /dev/null
+++ b/cluster/libvirt-coreos/.gitignore
@@ -0,0 +1,2 @@
+/libvirt_storage_pool/
+/coreos_production_qemu_image.img.bz2
diff --git a/cluster/libvirt-coreos/config-default.sh b/cluster/libvirt-coreos/config-default.sh
new file mode 100644
index 00000000000..cce5c06c689
--- /dev/null
+++ b/cluster/libvirt-coreos/config-default.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+## Contains configuration values for interacting with the libvirt CoreOS cluster
+
+# Number of minions in the cluster
+NUM_MINIONS=${NUM_MINIONS:-3}
+export NUM_MINIONS
diff --git a/cluster/libvirt-coreos/coreos.xml b/cluster/libvirt-coreos/coreos.xml
new file mode 100644
index 00000000000..041f3a01510
--- /dev/null
+++ b/cluster/libvirt-coreos/coreos.xml
@@ -0,0 +1,71 @@
+
+ ${name}
+ 512
+ 512
+ 2
+
+ hvm
+
+
+
+
+
+
+
+
+ destroy
+ restart
+ restart
+
+ $(which qemu-system-$(uname -m))
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/cluster/libvirt-coreos/network_kubernetes_global.xml b/cluster/libvirt-coreos/network_kubernetes_global.xml
new file mode 100644
index 00000000000..b22cb262fc0
--- /dev/null
+++ b/cluster/libvirt-coreos/network_kubernetes_global.xml
@@ -0,0 +1,11 @@
+
+ kubernetes_global
+
+
+
+
+
+
+
+
+
diff --git a/cluster/libvirt-coreos/network_kubernetes_pods.xml b/cluster/libvirt-coreos/network_kubernetes_pods.xml
new file mode 100644
index 00000000000..ed95113da34
--- /dev/null
+++ b/cluster/libvirt-coreos/network_kubernetes_pods.xml
@@ -0,0 +1,6 @@
+
+ kubernetes_pods
+
+
+
+
diff --git a/cluster/libvirt-coreos/user_data.yml b/cluster/libvirt-coreos/user_data.yml
new file mode 100644
index 00000000000..16d960dbfbe
--- /dev/null
+++ b/cluster/libvirt-coreos/user_data.yml
@@ -0,0 +1,111 @@
+#cloud-config
+
+hostname: ${name}
+
+ssh_authorized_keys:
+${ssh_keys}
+
+write_files:
+ - path: /etc/systemd/journald.conf
+ permissions: 0644
+ content: |
+ [Journal]
+ SystemMaxUse=50M
+ RuntimeMaxUse=50M
+
+coreos:
+ etcd:
+ name: ${name}
+ addr: 192.168.10.$(($i+1)):4001
+ bind-addr: 0.0.0.0
+ peer-addr: 192.168.10.$(($i+1)):7001
+ # peers: {etcd_peers}
+ discovery: ${discovery}
+ units:
+ - name: static.network
+ command: start
+ content: |
+ [Match]
+ # Name=eth0
+ MACAddress=52:54:00:00:00:${i}
+
+ [Network]
+ Address=192.168.10.$(($i+1))/24
+ DNS=192.168.10.254
+ Gateway=192.168.10.254
+ - name: cbr0.netdev
+ command: start
+ content: |
+ [NetDev]
+ Kind=bridge
+ Name=cbr0
+ - name: cbr0.network
+ command: start
+ content: |
+ [Match]
+ Name=cbr0
+
+ [Network]
+ Address=10.10.$(($i+1)).1/24
+
+ [Route]
+ Destination=10.10.0.0/16
+ - name: cbr0-interface.network
+ command: start
+ content: |
+ [Match]
+ # Name=eth1
+ MACAddress=52:54:00:00:01:${i}
+
+ [Network]
+ Bridge=cbr0
+ - name: nat.service
+ command: start
+ content: |
+ [Unit]
+ Description=NAT non container traffic
+
+ [Service]
+ ExecStart=/usr/sbin/iptables -w -t nat -A POSTROUTING -o eth0 -j MASQUERADE ! -d 10.10.0.0/16
+ RemainAfterExit=yes
+ Type=oneshot
+ - name: etcd.service
+ command: start
+ - name: docker.service
+ command: start
+ drop-ins:
+ - name: 50-opts.conf
+ content: |
+ [Service]
+ Environment=DOCKER_OPTS='--bridge=cbr0 --iptables=false'
+ - name: docker-tcp.socket
+ command: start
+ enable: yes
+ content: |
+ [Unit]
+ Description=Docker Socket for the API
+
+ [Socket]
+ ListenStream=2375
+ BindIPv6Only=both
+ Service=docker.service
+
+ [Install]
+ WantedBy=sockets.target
+ - name: opt-kubernetes.mount
+ command: start
+ content: |
+ [Unit]
+ ConditionVirtualization=|vm
+
+ [Mount]
+ What=kubernetes
+ Where=/opt/kubernetes
+ Options=ro,trans=virtio,version=9p2000.L
+ Type=9p
+ update:
+ group: ${COREOS_CHANNEL:-alpha}
+ reboot-strategy: off
+
+$( [[ ${type} =~ "master" ]] && render-template "$ROOT/user_data_master.yml" )
+$( [[ ${type} =~ "minion" ]] && render-template "$ROOT/user_data_minion.yml" )
diff --git a/cluster/libvirt-coreos/user_data_master.yml b/cluster/libvirt-coreos/user_data_master.yml
new file mode 100644
index 00000000000..5f1b444198e
--- /dev/null
+++ b/cluster/libvirt-coreos/user_data_master.yml
@@ -0,0 +1,63 @@
+#cloud-config
+
+coreos:
+ units:
+ - name: kube-apiserver.service
+ command: start
+ content: |
+ [Unit]
+ After=opt-kubernetes.mount etcd.service
+ ConditionFileIsExecutable=/opt/kubernetes/bin/kube-apiserver
+ Description=Kubernetes API Server
+ Documentation=https://github.com/GoogleCloudPlatform/kubernetes
+ Requires=opt-kubernetes.mount etcd.service
+
+ [Service]
+ ExecStart=/opt/kubernetes/bin/kube-apiserver \
+ --address=0.0.0.0 \
+ --port=8080 \
+ --etcd_servers=http://127.0.0.1:4001 \
+ --kubelet_port=10250 \
+ --portal_net=10.10.254.0/24
+ Restart=always
+ RestartSec=2
+
+ [Install]
+ WantedBy=multi-user.target
+ - name: kube-controller-manager.service
+ command: start
+ content: |
+ [Unit]
+ After=opt-kubernetes.mount kube-apiserver.service
+ ConditionFileIsExecutable=/opt/kubernetes/bin/kube-controller-manager
+ Description=Kubernetes Controller Manager
+ Documentation=https://github.com/GoogleCloudPlatform/kubernetes
+ Requires=opt-kubernetes.mount kube-apiserver.service
+
+ [Service]
+ ExecStart=/opt/kubernetes/bin/kube-controller-manager \
+ --master=127.0.0.1:8080 \
+ --machines=${machines}
+ Restart=always
+ RestartSec=2
+
+ [Install]
+ WantedBy=multi-user.target
+ - name: kube-scheduler.service
+ command: start
+ content: |
+ [Unit]
+ After=opt-kubernetes.mount kube-apiserver.service
+ ConditionFileIsExecutable=/opt/kubernetes/bin/kube-scheduler
+ Description=Kubernetes Scheduler
+ Documentation=https://github.com/GoogleCloudPlatform/kubernetes
+ Requires=opt-kubernetes.mount kube-apiserver.service
+
+ [Service]
+ ExecStart=/opt/kubernetes/bin/kube-scheduler \
+ --master=127.0.0.1:8080
+ Restart=always
+ RestartSec=2
+
+ [Install]
+ WantedBy=multi-user.target
diff --git a/cluster/libvirt-coreos/user_data_minion.yml b/cluster/libvirt-coreos/user_data_minion.yml
new file mode 100644
index 00000000000..139bba85134
--- /dev/null
+++ b/cluster/libvirt-coreos/user_data_minion.yml
@@ -0,0 +1,43 @@
+#cloud-config
+
+coreos:
+ units:
+ - name: kubelet.service
+ command: start
+ content: |
+ [Unit]
+ After=opt-kubernetes.mount etcd.service docker.socket
+ ConditionFileIsExecutable=/opt/kubernetes/bin/kubelet
+ Description=Kubernetes Kubelet
+ Documentation=https://github.com/GoogleCloudPlatform/kubernetes
+ Requires=opt-kubernetes.mount etcd.service docker.socket
+
+ [Service]
+ ExecStart=/opt/kubernetes/bin/kubelet \
+ --address=0.0.0.0 \
+ --hostname_override=192.168.10.$(($i+1)) \
+ --etcd_servers=http://127.0.0.1:4001
+ Restart=always
+ RestartSec=2
+
+ [Install]
+ WantedBy=multi-user.target
+ - name: kube-proxy.service
+ command: start
+ content: |
+ [Unit]
+ After=opt-kubernetes.mount etcd.service
+ ConditionFileIsExecutable=/opt/kubernetes/bin/kube-proxy
+ Description=Kubernetes Proxy
+ Documentation=https://github.com/GoogleCloudPlatform/kubernetes
+ Requires=opt-kubernetes.mount etcd.service
+
+ [Service]
+ ExecStart=/opt/kubernetes/bin/kube-proxy \
+ --etcd_servers=http://127.0.0.1:4001 \
+ --master=http://192.168.10.1:7080
+ Restart=always
+ RestartSec=2
+
+ [Install]
+ WantedBy=multi-user.target
diff --git a/cluster/libvirt-coreos/util.sh b/cluster/libvirt-coreos/util.sh
new file mode 100644
index 00000000000..4c4a6f12a5f
--- /dev/null
+++ b/cluster/libvirt-coreos/util.sh
@@ -0,0 +1,256 @@
+#!/bin/bash
+
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts.
+
+readonly KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
+readonly ROOT=$(dirname "${BASH_SOURCE}")
+source $ROOT/${KUBE_CONFIG_FILE:-"config-default.sh"}
+
+export LIBVIRT_DEFAULT_URI=qemu:///system
+
+readonly POOL=kubernetes
+readonly POOL_PATH="$(cd $ROOT && pwd)/libvirt_storage_pool"
+
+# join
+# Concatenates the list elements with the delimiter passed as first parameter
+#
+# Ex: join , a b c
+# -> a,b,c
+function join {
+ local IFS="$1"
+ shift
+ echo "$*"
+}
+
+# Must ensure that the following ENV vars are set
+function detect-master {
+ KUBE_MASTER_IP=192.168.10.1
+ KUBE_MASTER=kubernetes-master
+ export KUBERNETES_MASTER=http://$KUBE_MASTER_IP:8080
+ echo "KUBE_MASTER_IP: $KUBE_MASTER_IP"
+ echo "KUBE_MASTER: $KUBE_MASTER"
+}
+
+# Get minion IP addresses and store in KUBE_MINION_IP_ADDRESSES[]
+function detect-minions {
+ for (( i = 0 ; i < $NUM_MINIONS ; i++ )); do
+ KUBE_MINION_IP_ADDRESSES[$i]=192.168.10.$(($i+2))
+ done
+ echo "KUBE_MINION_IP_ADDRESSES=[${KUBE_MINION_IP_ADDRESSES[@]}]"
+}
+
+# Verify prereqs on host machine
+function verify-prereqs {
+ if ! which virsh >/dev/null; then
+ echo "Can't find virsh in PATH, please fix and retry." >&2
+ exit 1
+ fi
+ if ! virsh nodeinfo >/dev/null; then
+ exit 1
+ fi
+ if [[ "$(&2
+ echo "Enabling it would reduce the memory footprint of large clusters" >&2
+ if [[ -t 0 ]]; then
+ read -t 5 -n 1 -p "Do you want to enable KSM (requires root password) (y/n)? " answer
+ echo ""
+ if [[ "$answer" == 'y' ]]; then
+ su -c 'echo 1 > /sys/kernel/mm/ksm/run'
+ fi
+ else
+ echo "You can enable it with (as root):" >&2
+ echo "" >&2
+ echo " echo 1 > /sys/kernel/mm/ksm/run" >&2
+ echo "" >&2
+ fi
+ fi
+}
+
+# Destroy the libvirt storage pool and all the images inside
+#
+# If 'keep_base_image' is passed as first parameter,
+# the base image is kept, as well as the storage pool.
+# All the other images are deleted.
+function destroy-pool {
+ virsh pool-info $POOL >/dev/null 2>&1 || return
+
+ rm -rf "$POOL_PATH"/kubernetes/*
+ rm -rf "$POOL_PATH"/kubernetes_config*/*
+ local vol
+ virsh vol-list $POOL | awk 'NR>2 && !/^$/ && $1 ~ /^kubernetes/ {print $1}' | \
+ while read vol; do
+ virsh vol-delete $vol --pool $POOL
+ done
+
+ [[ "$1" == 'keep_base_image' ]] && return
+
+ set +e
+ virsh vol-delete coreos_base.img --pool $POOL
+ virsh pool-destroy $POOL
+ rmdir "$POOL_PATH"
+ set -e
+}
+
+# Creates the libvirt storage pool and populate it with
+# - the CoreOS base image
+# - the kubernetes binaries
+function initialize-pool {
+ mkdir -p "$POOL_PATH"
+ if ! virsh pool-info $POOL >/dev/null 2>&1; then
+ virsh pool-create-as $POOL dir --target "$POOL_PATH"
+ fi
+
+ wget -N -P "$ROOT" http://${COREOS_CHANNEL:-alpha}.release.core-os.net/amd64-usr/current/coreos_production_qemu_image.img.bz2
+ if [ "$ROOT/coreos_production_qemu_image.img.bz2" -nt "$POOL_PATH/coreos_base.img" ]; then
+ bunzip2 -f -k "$ROOT/coreos_production_qemu_image.img.bz2"
+ virsh vol-delete coreos_base.img --pool $POOL 2> /dev/null || true
+ mv "$ROOT/coreos_production_qemu_image.img" "$POOL_PATH/coreos_base.img"
+ fi
+ # if ! virsh vol-list $POOL | grep -q coreos_base.img; then
+ # virsh vol-create-as $POOL coreos_base.img 10G --format qcow2
+ # virsh vol-upload coreos_base.img "$ROOT/coreos_production_qemu_image.img" --pool $POOL
+ # fi
+
+ mkdir -p "$POOL_PATH/kubernetes"
+ kube-push
+ virsh pool-refresh $POOL
+}
+
+function destroy-network {
+ set +e
+ virsh net-destroy kubernetes_global
+ virsh net-destroy kubernetes_pods
+ set -e
+}
+
+function initialize-network {
+ virsh net-create "$ROOT/network_kubernetes_global.xml"
+ virsh net-create "$ROOT/network_kubernetes_pods.xml"
+}
+
+function render-template {
+ eval "echo \"$(cat $1)\""
+}
+
+# Instantiate a kubernetes cluster
+function kube-up {
+ detect-master
+ detect-minions
+ initialize-pool keep_base_image
+ initialize-network
+
+ readonly ssh_keys="$(cat ~/.ssh/id_*.pub | sed 's/^/ - /')"
+ readonly kubernetes_dir="$POOL_PATH/kubernetes"
+ readonly discovery=$(curl -s https://discovery.etcd.io/new)
+
+ readonly machines=$(join , "${KUBE_MINION_IP_ADDRESSES[@]}")
+
+ local i
+ for (( i = 0 ; i <= $NUM_MINIONS ; i++ )); do
+ if [[ $i -eq 0 ]]; then
+ type=master
+ else
+ type=minion-$(printf "%02d" $i)
+ fi
+ name=kubernetes_$type
+ image=$name.img
+ config=kubernetes_config_$type
+
+ virsh vol-create-as $POOL $image 10G --format qcow2 --backing-vol coreos_base.img --backing-vol-format qcow2
+
+ mkdir -p "$POOL_PATH/$config/openstack/latest"
+ render-template "$ROOT/user_data.yml" > "$POOL_PATH/$config/openstack/latest/user_data"
+ virsh pool-refresh $POOL
+
+ domain_xml=$(mktemp)
+ render-template $ROOT/coreos.xml > $domain_xml
+ virsh create $domain_xml
+ rm $domain_xml
+ done
+}
+
+# Delete a kubernetes cluster
+function kube-down {
+ virsh list | awk 'NR>2 && !/^$/ && $2 ~ /^kubernetes/ {print $2}' | \
+ while read dom; do
+ virsh destroy $dom
+ done
+ destroy-pool keep_base_image
+ destroy-network
+}
+
+function find-release-tars {
+ SERVER_BINARY_TAR="${KUBE_ROOT}/server/kubernetes-server-linux-amd64.tar.gz"
+ if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
+ SERVER_BINARY_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz"
+ fi
+ if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
+ echo "!!! Cannot find kubernetes-server-linux-amd64.tar.gz"
+ exit 1
+ fi
+}
+
+# The kubernetes binaries are pushed to a host directory which is exposed to the VM
+function upload-server-tars {
+ tar -x -C "$POOL_PATH/kubernetes" -f "$SERVER_BINARY_TAR" kubernetes
+ rm -rf "$POOL_PATH/kubernetes/bin"
+ mv "$POOL_PATH/kubernetes/kubernetes/server/bin" "$POOL_PATH/kubernetes/bin"
+ rmdir "$POOL_PATH/kubernetes/kubernetes/server" "$POOL_PATH/kubernetes/kubernetes"
+}
+
+# Update a kubernetes cluster with latest source
+function kube-push {
+ find-release-tars
+ upload-server-tars
+}
+
+# Execute prior to running tests to build a release if required for env
+function test-build-release {
+ echo "TODO"
+}
+
+# Execute prior to running tests to initialize required structure
+function test-setup {
+ echo "TODO"
+}
+
+# Execute after running tests to perform any required clean-up
+function test-teardown {
+ echo "TODO"
+}
+
+# Set the {KUBE_USER} and {KUBE_PASSWORD} environment values required to interact with provider
+function get-password {
+ export KUBE_USER=core
+ echo "TODO get-password"
+}
+
+function setup-monitoring-firewall {
+ echo "TODO" 1>&2
+}
+
+function teardown-monitoring-firewall {
+ echo "TODO" 1>&2
+}
+
+function setup-logging-firewall {
+ echo "TODO: setup logging"
+}
+
+function teardown-logging-firewall {
+ echo "TODO: teardown logging"
+}
diff --git a/cluster/validate-cluster.sh b/cluster/validate-cluster.sh
index bc45b77d025..e9dee7a48f6 100755
--- a/cluster/validate-cluster.sh
+++ b/cluster/validate-cluster.sh
@@ -55,7 +55,7 @@ echo "Found ${found} nodes."
cat -n "${MINIONS_FILE}"
# On vSphere, use minion IPs as their names
-if [[ "${KUBERNETES_PROVIDER}" == "vsphere" ]] || [[ "${KUBERNETES_PROVIDER}" == "vagrant" ]]; then
+if [[ "${KUBERNETES_PROVIDER}" == "vsphere" ]] || [[ "${KUBERNETES_PROVIDER}" == "vagrant" ]] || [[ "${KUBERNETES_PROVIDER}" == "libvirt-coreos" ]]; then
MINION_NAMES=("${KUBE_MINION_IP_ADDRESSES[@]}")
fi
diff --git a/docs/getting-started-guides/README.md b/docs/getting-started-guides/README.md
index aae2fe5b480..e5b1a4d151a 100644
--- a/docs/getting-started-guides/README.md
+++ b/docs/getting-started-guides/README.md
@@ -17,7 +17,8 @@ Bare-metal | custom | Ubuntu | [docs](../../docs/getting-started-guide
Local | | | [docs](../../docs/getting-started-guides/locally.md) | Inactive |
Ovirt | | | [docs](../../docs/getting-started-guides/ovirt.md) | Inactive |
Rackspace | CoreOS | CoreOS | [docs](../../docs/getting-started-guides/rackspace.md) | Inactive |
-Bare-metal | custom | CentOS | [docs](../../docs/getting-started-guides/centos/centos_manual_config.md) | Community(@coolsvap) | Uses K8s v0.9.1
+Bare-metal | custom | CentOS | [docs](../../docs/getting-started-guides/centos/centos_manual_config.md) | Community(@coolsvap) | Uses K8s v0.9.1
+libvirt/KVM | CoreOS | CoreOS | [docs](../../docs/getting-started-guides/libvirt-coreos.md) | Community (@lhuard1A) |
Definition of columns:
- **IaaS Provider** is who/what provides the virtual or physical machines (nodes) that Kubernetes runs on.
- **OS** is the base operating system of the nodes.
diff --git a/docs/getting-started-guides/libvirt-coreos.md b/docs/getting-started-guides/libvirt-coreos.md
new file mode 100644
index 00000000000..8effbff7bdd
--- /dev/null
+++ b/docs/getting-started-guides/libvirt-coreos.md
@@ -0,0 +1,235 @@
+## Getting started with libvirt CoreOS
+
+### Highlights
+
+* Super-fast cluster boot-up (few seconds instead of several minutes for vagrant)
+* Reduced disk usage thanks to [COW](https://en.wikibooks.org/wiki/QEMU/Images#Copy_on_write)
+* Reduced memory footprint thanks to [KSM](https://www.kernel.org/doc/Documentation/vm/ksm.txt)
+
+### Prerequisites
+
+1. Install [qemu](http://wiki.qemu.org/Main_Page)
+2. Install [libvirt](http://libvirt.org/)
+3. [Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html)
+4. Check that your $HOME is accessible to the qemu user²
+
+#### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access.
+
+You can test it with the following command:
+```
+virsh -c qemu:///system pool-list
+```
+
+If you have access error messages, please read https://libvirt.org/acl.html and https://libvirt.org/aclpolkit.html .
+
+In short, if your libvirt has been compiled with Polkit support (ex: Arch, Fedora 21), you can create `/etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules` with the following content to grant full access to libvirt to `$USER`
+
+```
+polkit.addRule(function(action, subject) {
+ if (action.id == "org.libvirt.unix.manage" &&
+ subject.user == "$USER") {
+ return polkit.Result.YES;
+ polkit.log("action=" + action);
+ polkit.log("subject=" + subject);
+ }
+});
+```
+
+(Replace `$USER` with your login name)
+
+If your libvirt has not been compiled with Polkit (ex: Ubuntu 14.04.1 LTS), check the permissions on the libvirt unix socket:
+
+```
+ls -l /var/run/libvirt/libvirt-sock
+srwxrwx--- 1 root libvirtd 0 févr. 12 16:03 /var/run/libvirt/libvirt-sock
+
+usermod -a -G libvirtd $USER
+# $USER needs to logout/login to have the new group be taken into account
+```
+
+(Replace `$USER` with your login name)
+
+#### ² Qemu will run with a specific user. It must have access to the VMs drives
+
+All the disk drive resources needed by the VM (CoreOS disk image, kubernetes binaries, cloud-init files, etc.) are put inside `./cluster/libvirt-coreos/libvirt_storage_pool`.
+
+As we’re using the `qemu:///system` instance of libvirt, qemu will run with a specific `user:group` distinct from your user. It is configured in `/etc/libvirt/qemu.conf`. That qemu user must have access to that libvirt storage pool.
+
+If your `$HOME` is world readable, everything is fine. If your $HOME is private, `cluster/kube-up.sh` will fail with an error message like:
+
+```
+error: Cannot access storage file '$HOME/.../kubernetes/cluster/libvirt-coreos/libvirt_storage_pool/kubernetes_master.img' (as uid:99, gid:78): Permission denied
+```
+
+In order to fix that issue, you have several possibilities:
+* set `POOL_PATH` inside `cluster/libvirt-coreos/config-default.sh` to a directory:
+ * backed by a filesystem with a lot of free disk space
+ * writable by your user;
+ * accessible by the qemu user.
+* Grant the qemu user access to the storage pool.
+
+On Arch:
+
+```
+setfacl -m g:kvm:--x ~
+```
+
+### Setup
+
+By default, the libvirt-coreos setup will create a single kubernetes master and 3 kubernetes minions. Because the VM drives use Copy-on-Write and because of memory ballooning and KSM, there is a lot of resource over-allocation.
+
+To start your local cluster, open a shell and run:
+
+```shell
+cd kubernetes
+
+export KUBERNETES_PROVIDER=libvirt-coreos
+cluster/kube-up.sh
+```
+
+The `KUBERNETES_PROVIDER` environment variable tells all of the various cluster management scripts which variant to use. If you forget to set this, the assumption is you are running on Google Compute Engine.
+
+The `NUM_MINIONS` environment variable may be set to specify the number of minions to start. If it is not set, the number of minions defaults to 3.
+
+You can check that your machines are there and running with:
+
+```
+virsh -c qemu:///system list
+ Id Name State
+----------------------------------------------------
+ 15 kubernetes_master running
+ 16 kubernetes_minion-01 running
+ 17 kubernetes_minion-02 running
+ 18 kubernetes_minion-03 running
+ ```
+
+You can check that the kubernetes cluster is working with:
+
+```
+$ ./cluster/kubectl.sh get minions
+NAME LABELS STATUS
+192.168.10.2 Ready
+192.168.10.3 Ready
+192.168.10.4 Ready
+```
+
+The VMs are running [CoreOS](https://coreos.com/).
+Your ssh keys have already been pushed to the VM. (It looks for ~/.ssh/id_*.pub)
+The user to use to connect to the VM is `core`.
+The IP to connect to the master is 192.168.10.1.
+The IPs to connect to the minions are 192.168.10.2 and onwards.
+
+Connect to `kubernetes_master`:
+```
+ssh core@192.168.10.1
+```
+
+Connect to `kubernetes_minion-01`:
+```
+ssh core@192.168.10.2
+```
+
+### Interacting with your Kubernetes cluster with the `kube-*` scripts.
+
+All of the following commands assume you have set `KUBERNETES_PROVIDER` appropriately:
+
+```
+export KUBERNETES_PROVIDER=libvirt-coreos
+```
+
+Bring up a libvirt-CoreOS cluster of 5 minions
+
+```
+NUM_MINIONS=5 cluster/kube-up.sh
+```
+
+Destroy the libvirt-CoreOS cluster
+
+```
+cluster/kube-down.sh
+```
+
+Uptade the libvirt-CoreOS cluster with a new Kubernetes release:
+
+```
+cluster/kube-push.sh
+```
+
+Interact with the cluster
+
+```
+cluster/kubectl.sh
+```
+
+### Troubleshooting
+
+#### !!! Cannot find kubernetes-server-linux-amd64.tar.gz
+
+Build the release tarballs:
+
+```
+make release
+```
+
+#### Can't find virsh in PATH, please fix and retry.
+
+Install libvirt
+
+On Arch:
+
+```
+pacman -S qemu libvirt
+```
+
+On Ubuntu 14.04.1:
+
+```
+aptitude install qemu-system-x86 libvirt-bin
+```
+
+On Fedora 21:
+
+```
+yum install qemu libvirt
+```
+
+#### error: Failed to connect socket to '/var/run/libvirt/libvirt-sock': No such file or directory
+
+Start the libvirt daemon
+
+On Arch:
+
+```
+systemctl start libvirtd
+```
+
+On Ubuntu 14.04.1:
+
+```
+service libvirt-bin start
+```
+
+#### error: Failed to connect socket to '/var/run/libvirt/libvirt-sock': Permission denied
+
+Fix libvirt access permission (Remember to adapt `$USER`)
+
+On Arch and Fedora 21:
+
+```
+cat > /etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules <