Merge pull request #13986 from derekwaynecarr/move_to_flannel

Auto commit by PR queue bot
This commit is contained in:
k8s-merge-robot 2015-09-16 09:36:09 -07:00
commit 3dcb75c599
9 changed files with 161 additions and 116 deletions

View File

@ -1,3 +1,9 @@
# Early configurations of Kubernetes ran etcd on the host and as part of a migration step, we began to delete the host etcd
# It's possible though that the host has configured a separate etcd to configure other services like Flannel
# In that case, we do not want Salt to remove or stop the host service
# Note: its imperative that the host installed etcd not conflict with the Kubernetes managed etcd
{% if grains['keep_host_etcd'] is not defined %}
delete_etc_etcd_dir:
file.absent:
- name: /etc/etcd
@ -6,20 +12,6 @@ delete_etcd_conf:
file.absent:
- name: /etc/etcd/etcd.conf
touch /var/log/etcd.log:
cmd.run:
- creates: /var/log/etcd.log
/var/etcd:
file.directory:
- user: root
- group: root
- dir_mode: 700
- recurse:
- user
- group
- mode
delete_etcd_default:
file.absent:
- name: /etc/default/etcd
@ -34,6 +26,28 @@ delete_etcd_initd:
file.absent:
- name: /etc/init.d/etcd
#stop legacy etcd_service
stop_etcd-service:
service.dead:
- name: etcd
- enable: None
{% endif %}
touch /var/log/etcd.log:
cmd.run:
- creates: /var/log/etcd.log
/var/etcd:
file.directory:
- user: root
- group: root
- dir_mode: 700
- recurse:
- user
- group
- mode
/etc/kubernetes/manifests/etcd.manifest:
file.managed:
- source: salt://etcd/etcd.manifest
@ -43,9 +57,3 @@ delete_etcd_initd:
- mode: 644
- makedirs: true
- dir_mode: 755
#stop legacy etcd_service
stop_etcd-service:
service.dead:
- name: etcd
- enable: None

View File

@ -74,7 +74,7 @@ ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
# TODO Enable selinux when Fedora 21 repositories get an updated docker package
# see https://bugzilla.redhat.com/show_bug.cgi?id=1216151
#EXTRA_DOCKER_OPTS="-b=cbr0 --selinux-enabled --insecure-registry 10.0.0.0/8"
EXTRA_DOCKER_OPTS="-b=cbr0 --insecure-registry 10.0.0.0/8"
EXTRA_DOCKER_OPTS="--insecure-registry 10.0.0.0/8"
# Flag to tell the kubelet to enable CFS quota support
ENABLE_CPU_CFS_QUOTA="${KUBE_ENABLE_CPU_CFS_QUOTA:-true}"

View File

@ -79,8 +79,8 @@ done
echo "127.0.0.1 localhost" >> /etc/hosts # enables cmds like 'kubectl get pods' on master.
echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts
# Configure the openvswitch network
provision-network
# Configure the master network
provision-network-master
# Update salt configuration
mkdir -p /etc/salt/minion.d
@ -108,6 +108,7 @@ grains:
runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")'
docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")'
master_extra_sans: '$(echo "$MASTER_EXTRA_SANS" | sed -e "s/'/''/g")'
keep_host_etcd: true
EOF
mkdir -p /srv/salt-overlay/pillar

View File

@ -107,7 +107,7 @@ for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
done
# Configure network
provision-network
provision-network-minion
# Placeholder for any other manifests that may be per-node.
mkdir -p /etc/kubernetes/manifests

View File

@ -0,0 +1,80 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# provision-network-master configures flannel on the master
function provision-network-master {
echo "Provisioning network on master"
FLANNEL_ETCD_URL="http://${MASTER_IP}:4379"
# Install etcd for flannel data
if ! which etcd >/dev/null 2>&1; then
yum install -y etcd
# Modify etcd configuration for flannel data
cat <<EOF >/etc/etcd/etcd.conf
ETCD_NAME=flannel
ETCD_DATA_DIR="/var/lib/etcd/flannel.etcd"
ETCD_LISTEN_PEER_URLS="http://${MASTER_IP}:4380"
ETCD_LISTEN_CLIENT_URLS="http://${MASTER_IP}:4379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://${MASTER_IP}:4380"
ETCD_INITIAL_CLUSTER="flannel=http://${MASTER_IP}:4380"
ETCD_ADVERTISE_CLIENT_URLS="${FLANNEL_ETCD_URL}"
EOF
# Enable and start etcd
systemctl enable etcd
systemctl start etcd
fi
# Install flannel for overlay
if ! which flanneld >/dev/null 2>&1; then
yum install -y flannel
cat <<EOF >/etc/flannel-config.json
{
"Network": "${CONTAINER_SUBNET}",
"SubnetLen": 24,
"Backend": {
"Type": "udp",
"Port": 8285
}
}
EOF
# Import default configuration into etcd for master setup
etcdctl -C ${FLANNEL_ETCD_URL} set /coreos.com/network/config < /etc/flannel-config.json
# Configure local daemon to speak to master
NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/
if_to_edit=$( find ${NETWORK_CONF_PATH}ifcfg-* | xargs grep -l VAGRANT-BEGIN )
NETWORK_IF_NAME=`echo ${if_to_edit} | awk -F- '{ print $3 }'`
cat <<EOF >/etc/sysconfig/flanneld
FLANNEL_ETCD="${FLANNEL_ETCD_URL}"
FLANNEL_ETCD_KEY="/coreos.com/network"
FLANNEL_OPTIONS="-iface=${NETWORK_IF_NAME}"
EOF
# Start flannel
systemctl enable flanneld
systemctl start flanneld
fi
echo "Network configuration verified"
}

View File

@ -0,0 +1,45 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# provision-network-minion configures flannel on the minion
function provision-network-minion {
echo "Provisioning network on minion"
FLANNEL_ETCD_URL="http://${MASTER_IP}:4379"
# Install flannel for overlay
if ! which flanneld >/dev/null 2>&1; then
yum install -y flannel
# Configure local daemon to speak to master
NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/
if_to_edit=$( find ${NETWORK_CONF_PATH}ifcfg-* | xargs grep -l VAGRANT-BEGIN )
NETWORK_IF_NAME=`echo ${if_to_edit} | awk -F- '{ print $3 }'`
cat <<EOF >/etc/sysconfig/flanneld
FLANNEL_ETCD="${FLANNEL_ETCD_URL}"
FLANNEL_ETCD_KEY="/coreos.com/network"
FLANNEL_OPTIONS="-iface=${NETWORK_IF_NAME}"
EOF
# Start flannel
systemctl enable flanneld
systemctl start flanneld
fi
echo "Network configuration verified"
}

View File

@ -1,89 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DOCKER_BRIDGE=cbr0
OVS_SWITCH=obr0
DOCKER_OVS_TUN=tun0
TUNNEL_BASE=gre
NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/
# provision network configures the ovs network for pods
function provision-network {
echo "Verifying network configuration"
# Only do this operation if the bridge is not defined
ifconfig | grep -q ${DOCKER_BRIDGE} || {
echo "It looks like the required network bridge has not yet been created"
CONTAINER_SUBNETS=(${MASTER_CONTAINER_SUBNET} ${MINION_CONTAINER_SUBNETS[@]})
CONTAINER_IPS=(${MASTER_IP} ${MINION_IPS[@]})
# Install openvswitch
echo "Installing, enabling prerequisites"
yum install -y openvswitch bridge-utils
systemctl enable openvswitch
systemctl start openvswitch
# create new docker bridge
echo "Create a new docker bridge"
ip link set dev ${DOCKER_BRIDGE} down || true
brctl delbr ${DOCKER_BRIDGE} || true
brctl addbr ${DOCKER_BRIDGE}
ip link set dev ${DOCKER_BRIDGE} up
ifconfig ${DOCKER_BRIDGE} ${CONTAINER_ADDR} netmask ${CONTAINER_NETMASK} up
# add ovs bridge
echo "Add ovs bridge"
ovs-vsctl del-br ${OVS_SWITCH} || true
ovs-vsctl add-br ${OVS_SWITCH} -- set Bridge ${OVS_SWITCH} fail-mode=secure
ovs-vsctl set bridge ${OVS_SWITCH} protocols=OpenFlow13
ovs-vsctl del-port ${OVS_SWITCH} ${TUNNEL_BASE}0 || true
ovs-vsctl add-port ${OVS_SWITCH} ${TUNNEL_BASE}0 -- set Interface ${TUNNEL_BASE}0 type=${TUNNEL_BASE} options:remote_ip="flow" options:key="flow" ofport_request=10
# add tun device
echo "Add tun device"
ovs-vsctl del-port ${OVS_SWITCH} ${DOCKER_OVS_TUN} || true
ovs-vsctl add-port ${OVS_SWITCH} ${DOCKER_OVS_TUN} -- set Interface ${DOCKER_OVS_TUN} type=internal ofport_request=9
brctl addif ${DOCKER_BRIDGE} ${DOCKER_OVS_TUN}
ip link set ${DOCKER_OVS_TUN} up
# add oflow rules, because we do not want to use stp
echo "Add oflow rules"
ovs-ofctl -O OpenFlow13 del-flows ${OVS_SWITCH}
# now loop through all other minions and create persistent gre tunnels
echo "Creating persistent gre tunnels"
NODE_INDEX=0
for remote_ip in "${CONTAINER_IPS[@]}"
do
if [ "${remote_ip}" == "${NODE_IP}" ]; then
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,ip,in_port=10,nw_dst=${CONTAINER_SUBNETS[${NODE_INDEX}]},actions=output:9"
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,arp,in_port=10,nw_dst=${CONTAINER_SUBNETS[${NODE_INDEX}]},actions=output:9"
else
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,in_port=9,ip,nw_dst=${CONTAINER_SUBNETS[${NODE_INDEX}]},actions=set_field:${remote_ip}->tun_dst,output:10"
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,in_port=9,arp,nw_dst=${CONTAINER_SUBNETS[${NODE_INDEX}]},actions=set_field:${remote_ip}->tun_dst,output:10"
fi
((NODE_INDEX++)) || true
done
echo "Created persistent gre tunnels"
# add ip route rules such that all pod traffic flows through docker bridge and consequently to the gre tunnels
echo "Add ip route rules such that all pod traffic flows through docker bridge"
ip route add ${CONTAINER_SUBNET} dev ${DOCKER_BRIDGE} scope link src ${CONTAINER_ADDR}
}
echo "Network configuration verified"
}

View File

@ -154,7 +154,7 @@ function create-provision-scripts {
echo "KUBE_PROXY_TOKEN='${KUBE_PROXY_TOKEN:-}'"
echo "MASTER_EXTRA_SANS='${MASTER_EXTRA_SANS:-}'"
echo "ENABLE_CPU_CFS_QUOTA='${ENABLE_CPU_CFS_QUOTA}'"
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network.sh"
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network-master.sh"
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-master.sh"
) > "${KUBE_TEMP}/master-start.sh"
@ -179,7 +179,7 @@ function create-provision-scripts {
echo "KUBELET_TOKEN='${KUBELET_TOKEN:-}'"
echo "KUBE_PROXY_TOKEN='${KUBE_PROXY_TOKEN:-}'"
echo "MASTER_EXTRA_SANS='${MASTER_EXTRA_SANS:-}'"
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network.sh"
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network-minion.sh"
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-minion.sh"
) > "${KUBE_TEMP}/minion-start-${i}.sh"
done

View File

@ -134,7 +134,7 @@ Here are all the solutions mentioned above in table form.
IaaS Provider | Config. Mgmt | OS | Networking | Docs | Conforms | Support Level
-------------------- | ------------ | ------ | ---------- | --------------------------------------------- | ---------| ----------------------------
GKE | | | GCE | [docs](https://cloud.google.com/container-engine) | [✓][3] | Commercial
Vagrant | Saltstack | Fedora | OVS | [docs](vagrant.md) | [✓][2] | Project
Vagrant | Saltstack | Fedora | flannel | [docs](vagrant.md) | [✓][2] | Project
GCE | Saltstack | Debian | GCE | [docs](gce.md) | [✓][1] | Project
Azure | CoreOS | CoreOS | Weave | [docs](coreos/azure/README.md) | | Community ([@errordeveloper](https://github.com/errordeveloper), [@squillace](https://github.com/squillace), [@chanezon](https://github.com/chanezon), [@crossorigin](https://github.com/crossorigin))
Docker Single Node | custom | N/A | local | [docs](docker.md) | | Project ([@brendandburns](https://github.com/brendandburns))