diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh
index 8fd2fa8422f..3fb21a0f062 100755
--- a/cluster/gce/config-default.sh
+++ b/cluster/gce/config-default.sh
@@ -113,7 +113,7 @@ ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota
KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
# OpenContrail networking plugin specific settings
-NETWORK_PROVIDER="${NETWORK_PROVIDER:-none}" # opencontrail
+NETWORK_PROVIDER="${NETWORK_PROVIDER:-none}" # opencontrail, flannel
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh
index 8eca5118d70..d0d4153ace3 100755
--- a/cluster/gce/config-test.sh
+++ b/cluster/gce/config-test.sh
@@ -125,7 +125,7 @@ KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
TEST_CLUSTER="${TEST_CLUSTER:-true}"
# OpenContrail networking plugin specific settings
-NETWORK_PROVIDER="${NETWORK_PROVIDER:-none}" # opencontrail
+NETWORK_PROVIDER="${NETWORK_PROVIDER:-none}" # opencontrail,flannel
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
diff --git a/cluster/saltbase/salt/flannel-server/flannel-server.manifest b/cluster/saltbase/salt/flannel-server/flannel-server.manifest
new file mode 100644
index 00000000000..906d1354c13
--- /dev/null
+++ b/cluster/saltbase/salt/flannel-server/flannel-server.manifest
@@ -0,0 +1,99 @@
+{
+ "kind": "Pod",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "flannel-server",
+ "namespace": "kube-system",
+ "labels": {
+ "app": "flannel-server",
+ "version": "v0.1"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "varlog",
+ "hostPath": {
+ "path": "/var/log"
+ }
+ },
+ {
+ "name": "etcdstorage",
+ "emptyDir": {}
+ },
+ {
+ "name": "networkconfig",
+ "hostPath": {
+ "path": "/etc/kubernetes/network.json"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "flannel-server-helper",
+ "image": "gcr.io/google_containers/flannel-server-helper:0.1",
+ "args": [
+ "--network-config=/etc/kubernetes/network.json",
+ "--etcd-prefix=/kubernetes.io/network",
+ "--etcd-server=http://127.0.0.1:4001"
+ ],
+ "volumeMounts": [
+ {
+ "name": "networkconfig",
+ "mountPath": "/etc/kubernetes/network.json"
+ }
+ ],
+ "imagePullPolicy": "Always"
+ },
+ {
+ "name": "flannel-container",
+ "image": "quay.io/coreos/flannel:0.5.5",
+ "command": [
+ "/bin/sh",
+ "-c",
+ "/opt/bin/flanneld -listen 0.0.0.0:10253 -etcd-endpoints http://127.0.0.1:4001 -etcd-prefix /kubernetes.io/network 1>>/var/log/flannel_server.log 2>&1"
+ ],
+ "ports": [
+ {
+ "hostPort": 10253,
+ "containerPort": 10253
+ }
+ ],
+ "resources": {
+ "limits": {
+ "cpu": "100m"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "varlog",
+ "mountPath": "/var/log"
+ }
+ ]
+ },
+ {
+ "name": "etcd-container",
+ "image": "gcr.io/google_containers/etcd:2.2.1",
+ "command": [
+ "/bin/sh",
+ "-c",
+ "/opt/bin/etcd --listen-peer-urls http://127.0.0.1:4001 --addr http://127.0.0.1:4001 --bind-addr 127.0.0.1:4001 --data-dir /var/etcd/data 1>>/var/log/etcd_flannel.log 2>&1"
+ ],
+ "resources": {
+ "limits": {
+ "cpu": "100m",
+ "memory": "50Mi"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "etcdstorage",
+ "mountPath": "/var/etcd/data"
+ }
+ ]
+ }
+ ],
+ "hostNetwork": true
+ }
+}
+
diff --git a/cluster/saltbase/salt/flannel-server/init.sls b/cluster/saltbase/salt/flannel-server/init.sls
new file mode 100644
index 00000000000..a5b1d2e66c7
--- /dev/null
+++ b/cluster/saltbase/salt/flannel-server/init.sls
@@ -0,0 +1,24 @@
+touch /var/log/flannel.log:
+ cmd.run:
+ - creates: /var/log/flannel.log
+
+touch /var/log/etcd_flannel.log:
+ cmd.run:
+ - creates: /var/log/etcd_flannel.log
+
+/etc/kubernetes/network.json:
+ file.managed:
+ - source: salt://flannel-server/network.json
+ - makedirs: True
+ - user: root
+ - group: root
+ - mode: 755
+
+/etc/kubernetes/manifests/flannel-server.manifest:
+ file.managed:
+ - source: salt://flannel-server/flannel-server.manifest
+ - user: root
+ - group: root
+ - mode: 644
+ - makedirs: true
+ - dir_mode: 755
diff --git a/cluster/saltbase/salt/flannel-server/network.json b/cluster/saltbase/salt/flannel-server/network.json
new file mode 100644
index 00000000000..b0a6bd4560a
--- /dev/null
+++ b/cluster/saltbase/salt/flannel-server/network.json
@@ -0,0 +1,8 @@
+{
+ "Network": "172.16.0.0/12",
+ "SubnetLen": 24,
+ "Backend": {
+ "Type": "vxlan",
+ "VNI": 1
+ }
+}
diff --git a/cluster/saltbase/salt/flannel/default b/cluster/saltbase/salt/flannel/default
new file mode 100644
index 00000000000..a4940061070
--- /dev/null
+++ b/cluster/saltbase/salt/flannel/default
@@ -0,0 +1,6 @@
+{% if grains.api_servers is defined -%}
+ {% set daemon_args = "-remote " + grains.api_servers + ":10253" -%}
+{% else -%}
+ {% set daemon_args = "-remote 127.0.0.1:10253" -%}
+{% endif -%}
+DAEMON_ARGS="{{daemon_args}}"
diff --git a/cluster/saltbase/salt/flannel/init.sls b/cluster/saltbase/salt/flannel/init.sls
new file mode 100644
index 00000000000..ee746d241ae
--- /dev/null
+++ b/cluster/saltbase/salt/flannel/init.sls
@@ -0,0 +1,44 @@
+# TODO: Run flannel daemon in a static pod once we've moved the overlay network
+# setup into a network plugin.
+flannel-tar:
+ archive:
+ - extracted
+ - user: root
+ - name: /usr/local/src
+ - makedirs: True
+ - source: https://github.com/coreos/flannel/releases/download/v0.5.5/flannel-0.5.5-linux-amd64.tar.gz
+ - tar_options: v
+ - source_hash: md5=972c717254775bef528f040af804f2cc
+ - archive_format: tar
+ - if_missing: /usr/local/src/flannel/flannel-0.5.5/
+
+flannel-symlink:
+ file.symlink:
+ - name: /usr/local/bin/flanneld
+ - target: /usr/local/src/flannel-0.5.5/flanneld
+ - force: true
+ - watch:
+ - archive: flannel-tar
+
+/etc/default/flannel:
+ file.managed:
+ - source: salt://flannel/default
+ - template: jinja
+ - user: root
+ - group: root
+ - mode: 644
+
+/etc/init.d/flannel:
+ file.managed:
+ - source: salt://flannel/initd
+ - user: root
+ - group: root
+ - mode: 755
+
+flannel:
+ service.running:
+ - enable: True
+ - watch:
+ - file: /usr/local/bin/flanneld
+ - file: /etc/init.d/flannel
+ - file: /etc/default/flannel
diff --git a/cluster/saltbase/salt/flannel/initd b/cluster/saltbase/salt/flannel/initd
new file mode 100644
index 00000000000..3e3a98eaa76
--- /dev/null
+++ b/cluster/saltbase/salt/flannel/initd
@@ -0,0 +1,126 @@
+#!/bin/bash
+#
+### BEGIN INIT INFO
+# Provides: flanneld
+# Required-Start: $local_fs $network $syslog
+# Required-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Flannel daemon
+# Description:
+# Flannel daemon.
+### END INIT INFO
+
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="Flannel overlay network daemon"
+NAME=flannel
+DAEMON=/usr/local/bin/flanneld
+DAEMON_ARGS=""
+DAEMON_LOG_FILE=/var/log/$NAME.log
+PIDFILE=/var/run/$NAME.pid
+SCRIPTNAME=/etc/init.d/$NAME
+DAEMON_USER=root
+
+# Exit if the package is not installed
+[ -x "$DAEMON" ] || exit 0
+
+# Read configuration variable file if it is present
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+
+# Define LSB log_* functions.
+# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
+# and status_of_proc is working.
+. /lib/lsb/init-functions
+
+#
+# Function that starts the daemon/service
+#
+do_start()
+{
+ # Avoid a potential race at boot time when both monit and init.d start
+ # the same service
+ PIDS=$(pidof $DAEMON)
+ for PID in ${PIDS}; do
+ kill -9 $PID
+ done
+
+ # Return
+ # 0 if daemon has been started
+ # 1 if daemon was already running
+ # 2 if daemon could not be started
+ start-stop-daemon --start --quiet --background --no-close \
+ --make-pidfile --pidfile $PIDFILE \
+ --exec $DAEMON -c $DAEMON_USER --test > /dev/null \
+ || return 1
+ start-stop-daemon --start --quiet --background --no-close \
+ --make-pidfile --pidfile $PIDFILE \
+ --exec $DAEMON -c $DAEMON_USER -- \
+ $DAEMON_ARGS >> $DAEMON_LOG_FILE 2>&1 \
+ || return 2
+}
+
+#
+# Function that stops the daemon/service
+#
+do_stop()
+{
+ # Return
+ # 0 if daemon has been stopped
+ # 1 if daemon was already stopped
+ # 2 if daemon could not be stopped
+ # other if a failure occurred
+ start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
+ RETVAL="$?"
+ [ "$RETVAL" = 2 ] && return 2
+ # Many daemons don't delete their pidfiles when they exit.
+ rm -f $PIDFILE
+ return "$RETVAL"
+}
+
+
+case "$1" in
+ start)
+ log_daemon_msg "Starting $DESC" "$NAME"
+ do_start
+ case "$?" in
+ 0|1) log_end_msg 0 || exit 0 ;;
+ 2) log_end_msg 1 || exit 1 ;;
+ esac
+ ;;
+ stop)
+ log_daemon_msg "Stopping $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1) log_end_msg 0 ;;
+ 2) exit 1 ;;
+ esac
+ ;;
+ status)
+ status_of_proc -p $PIDFILE "$DAEMON" "$NAME" && exit 0 || exit $?
+ ;;
+
+ restart|force-reload)
+ log_daemon_msg "Restarting $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1)
+ do_start
+ case "$?" in
+ 0) log_end_msg 0 ;;
+ 1) log_end_msg 1 ;; # Old process is still running
+ *) log_end_msg 1 ;; # Failed to start
+ esac
+ ;;
+ *)
+ # Failed to stop
+ log_end_msg 1
+ ;;
+ esac
+ ;;
+ *)
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
+ exit 3
+ ;;
+esac
diff --git a/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest b/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest
index 23af5ebad58..d3df6419226 100644
--- a/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest
+++ b/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest
@@ -10,7 +10,11 @@
{% if pillar['cluster_cidr'] is defined and pillar['cluster_cidr'] != "" -%}
{% set cluster_cidr = "--cluster-cidr=" + pillar['cluster_cidr'] -%}
{% endif -%}
-{% if pillar['allocate_node_cidrs'] is defined -%}
+# When we're using flannel it is responsible for cidr allocation.
+# This is expected to be a short-term compromise.
+{% if pillar.get('network_provider', '').lower() == 'flannel' %}
+ {% set allocate_node_cidrs = "--allocate-node-cidrs=false" -%}
+{% elif pillar['allocate_node_cidrs'] is defined -%}
{% set allocate_node_cidrs = "--allocate-node-cidrs=" + pillar['allocate_node_cidrs'] -%}
{% endif -%}
{% if pillar['terminated_pod_gc_threshold'] is defined -%}
diff --git a/cluster/saltbase/salt/kubelet/default b/cluster/saltbase/salt/kubelet/default
index 35242a71693..c642e4acc9f 100644
--- a/cluster/saltbase/salt/kubelet/default
+++ b/cluster/saltbase/salt/kubelet/default
@@ -85,6 +85,11 @@
{% set configure_cbr0 = "--configure-cbr0=" + pillar['allocate_node_cidrs'] -%}
{% endif -%}
+{% set experimental_flannel_overlay = "" -%}
+{% if pillar.get('network_provider', '').lower() == 'flannel' %}
+ {% set experimental_flannel_overlay = "--experimental-flannel-overlay=true" %}
+{% endif -%}
+
# Run containers under the root cgroup and create a system container.
{% set system_container = "" -%}
{% set cgroup_root = "" -%}
@@ -117,4 +122,4 @@
{% endif -%}
# test_args has to be kept at the end, so they'll overwrite any prior configuration
-DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{test_args}}"
+DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{experimental_flannel_overlay}} {{test_args}}"
diff --git a/cluster/saltbase/salt/top.sls b/cluster/saltbase/salt/top.sls
index c80c527e372..5789f42ed5a 100644
--- a/cluster/saltbase/salt/top.sls
+++ b/cluster/saltbase/salt/top.sls
@@ -13,6 +13,9 @@ base:
'roles:kubernetes-pool':
- match: grain
- docker
+{% if pillar.get('network_provider', '').lower() == 'flannel' %}
+ - flannel
+{% endif %}
- helpers
- cadvisor
- kube-client-tools
@@ -40,6 +43,10 @@ base:
- match: grain
- generate-cert
- etcd
+{% if pillar.get('network_provider', '').lower() == 'flannel' %}
+ - flannel-server
+ - flannel
+{% endif %}
- kube-apiserver
- kube-controller-manager
- kube-scheduler
diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go
index 448b7ed0d1f..4cad923808a 100644
--- a/cmd/kube-controller-manager/app/controllermanager.go
+++ b/cmd/kube-controller-manager/app/controllermanager.go
@@ -300,6 +300,8 @@ func (s *CMServer) Run(_ []string) error {
routeController := routecontroller.New(routes, kubeClient, s.ClusterName, &s.ClusterCIDR)
routeController.Run(s.NodeSyncPeriod)
}
+ } else {
+ glog.Infof("allocate-node-cidrs set to %v, node controller not creating routes", s.AllocateNodeCIDRs)
}
resourcequotacontroller.NewResourceQuotaController(kubeClient).Run(s.ResourceQuotaSyncPeriod)
diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go
index 64845f16581..1196159ce4e 100644
--- a/cmd/kubelet/app/server.go
+++ b/cmd/kubelet/app/server.go
@@ -67,7 +67,10 @@ import (
"k8s.io/kubernetes/pkg/cloudprovider"
)
-const defaultRootDir = "/var/lib/kubelet"
+const (
+ defaultRootDir = "/var/lib/kubelet"
+ experimentalFlannelOverlay = false
+)
// KubeletServer encapsulates all of the parameters necessary for starting up
// a kubelet. These can either be set via command line or directly.
@@ -154,7 +157,8 @@ type KubeletServer struct {
KubeAPIBurst int
// Pull images one at a time.
- SerializeImagePulls bool
+ SerializeImagePulls bool
+ ExperimentalFlannelOverlay bool
}
// bootstrapping interface for kubelet, targets the initialization protocol
@@ -227,6 +231,7 @@ func NewKubeletServer() *KubeletServer {
ReconcileCIDR: true,
KubeAPIQPS: 5.0,
KubeAPIBurst: 10,
+ ExperimentalFlannelOverlay: experimentalFlannelOverlay,
}
}
@@ -341,6 +346,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
fs.BoolVar(&s.SerializeImagePulls, "serialize-image-pulls", s.SerializeImagePulls, "Pull images one at a time. We recommend *not* changing the default value on nodes that run docker daemon with version < 1.9 or an Aufs storage backend. Issue #10959 has more details. [default=true]")
+ fs.BoolVar(&s.ExperimentalFlannelOverlay, "experimental-flannel-overlay", s.ExperimentalFlannelOverlay, "Experimental support for starting the kubelet with the default overlay network (flannel). Assumes flanneld is already running in client mode. [default=false]")
}
// UnsecuredKubeletConfig returns a KubeletConfig suitable for being run, or an error if the server setup
@@ -478,6 +484,8 @@ func (s *KubeletServer) UnsecuredKubeletConfig() (*KubeletConfig, error) {
TLSOptions: tlsOptions,
Writer: writer,
VolumePlugins: ProbeVolumePlugins(),
+
+ ExperimentalFlannelOverlay: s.ExperimentalFlannelOverlay,
}, nil
}
@@ -949,6 +957,8 @@ type KubeletConfig struct {
TLSOptions *kubelet.TLSOptions
Writer io.Writer
VolumePlugins []volume.VolumePlugin
+
+ ExperimentalFlannelOverlay bool
}
func CreateAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.PodConfig, err error) {
@@ -1031,6 +1041,7 @@ func CreateAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
kc.OOMAdjuster,
kc.SerializeImagePulls,
kc.ContainerManager,
+ kc.ExperimentalFlannelOverlay,
)
if err != nil {
diff --git a/docs/admin/kubelet.md b/docs/admin/kubelet.md
index 44946ad848b..4597f35020f 100644
--- a/docs/admin/kubelet.md
+++ b/docs/admin/kubelet.md
@@ -85,6 +85,7 @@ kubelet
--enable-server[=true]: Enable the Kubelet's server
--event-burst=10: Maximum size of a bursty event records, temporarily allows event records to burst to this number, while still not exceeding event-qps. Only used if --event-qps > 0
--event-qps=5: If > 0, limit event creations per second to this value. If 0, unlimited.
+ --experimental-flannel-overlay[=false]: Experimental support for starting the kubelet with the default overlay network (flannel). Assumes flanneld is already running in client mode. [default=false]
--file-check-frequency=20s: Duration between checking config files for new data
--google-json-key="": The Google Cloud Platform Service Account JSON Key to use for authentication.
--healthz-bind-address=127.0.0.1: The IP address for the healthz server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)
@@ -139,7 +140,7 @@ kubelet
--tls-private-key-file="": File containing x509 private key matching --tls-cert-file.
```
-###### Auto generated by spf13/cobra on 21-Nov-2015
+###### Auto generated by spf13/cobra on 24-Nov-2015
diff --git a/docs/proposals/flannel-integration.md b/docs/proposals/flannel-integration.md
new file mode 100644
index 00000000000..417cab1d364
--- /dev/null
+++ b/docs/proposals/flannel-integration.md
@@ -0,0 +1,165 @@
+
+
+
+
+
+
+
+
+
+
+