From 6b8c332cd3b7d566ad9f3285429cb1d8fa2ebf4c Mon Sep 17 00:00:00 2001 From: Zach Loafman Date: Mon, 9 Feb 2015 13:40:11 -0800 Subject: [PATCH] Revert "Fix vagrant networking" --- cluster/saltbase/salt/kubelet/init.sls | 3 - cluster/saltbase/salt/top.sls | 3 - cluster/vagrant/config-default.sh | 9 +- cluster/vagrant/provision-network.sh | 111 ++++++++++++++----------- cluster/vagrant/util.sh | 26 +----- hack/e2e-suite/services.sh | 13 +-- 6 files changed, 77 insertions(+), 88 deletions(-) diff --git a/cluster/saltbase/salt/kubelet/init.sls b/cluster/saltbase/salt/kubelet/init.sls index b53717307c0..3e85b63bdef 100644 --- a/cluster/saltbase/salt/kubelet/init.sls +++ b/cluster/saltbase/salt/kubelet/init.sls @@ -70,7 +70,4 @@ kubelet: - file: /etc/init.d/kubelet {% endif %} - file: /var/lib/kubelet/kubernetes_auth -{% if grains.network_mode is defined and grains.network_mode == 'openvswitch' %} - - sls: sdn -{% endif %} diff --git a/cluster/saltbase/salt/top.sls b/cluster/saltbase/salt/top.sls index 35421f3a219..3a34d349072 100644 --- a/cluster/saltbase/salt/top.sls +++ b/cluster/saltbase/salt/top.sls @@ -40,9 +40,6 @@ base: - kube-addons {% if grains['cloud'] is defined and grains['cloud'] == 'azure' %} - openvpn -{% else %} - - docker - - sdn {% endif %} 'roles:kubernetes-pool-vsphere': diff --git a/cluster/vagrant/config-default.sh b/cluster/vagrant/config-default.sh index f831bf32c74..bde0db27b23 100755 --- a/cluster/vagrant/config-default.sh +++ b/cluster/vagrant/config-default.sh @@ -29,15 +29,12 @@ export MASTER_NAME="${INSTANCE_PREFIX}-master" # Map out the IPs, names and container subnets of each minion export MINION_IP_BASE="10.245.1." MINION_CONTAINER_SUBNET_BASE="10.246" -MASTER_CONTAINER_NETMASK="255.255.255.0" -MASTER_CONTAINER_ADDR="${MINION_CONTAINER_SUBNET_BASE}.0.1" -MASTER_CONTAINER_SUBNET="${MINION_CONTAINER_SUBNET_BASE}.0.1/24" CONTAINER_SUBNET="${MINION_CONTAINER_SUBNET_BASE}.0.0/16" for ((i=0; i < NUM_MINIONS; i++)) do MINION_IPS[$i]="${MINION_IP_BASE}$((i+3))" MINION_NAMES[$i]="${INSTANCE_PREFIX}-minion-$((i+1))" - MINION_CONTAINER_SUBNETS[$i]="${MINION_CONTAINER_SUBNET_BASE}.$((i+1)).1/24" - MINION_CONTAINER_ADDRS[$i]="${MINION_CONTAINER_SUBNET_BASE}.$((i+1)).1" + MINION_CONTAINER_SUBNETS[$i]="${MINION_CONTAINER_SUBNET_BASE}.${i}.1/24" + MINION_CONTAINER_ADDRS[$i]="${MINION_CONTAINER_SUBNET_BASE}.${i}.1" MINION_CONTAINER_NETMASKS[$i]="255.255.255.0" VAGRANT_MINION_NAMES[$i]="minion-$((i+1))" done @@ -72,4 +69,4 @@ DNS_REPLICAS=1 # Optional: Enable setting flags for kube-apiserver to turn on behavior in active-dev RUNTIME_CONFIG="" -#RUNTIME_CONFIG="api/v1beta3" +#RUNTIME_CONFIG="api/v1beta3" \ No newline at end of file diff --git a/cluster/vagrant/provision-network.sh b/cluster/vagrant/provision-network.sh index d18eb0085db..6a24936cf16 100755 --- a/cluster/vagrant/provision-network.sh +++ b/cluster/vagrant/provision-network.sh @@ -16,8 +16,7 @@ DOCKER_BRIDGE=kbr0 OVS_SWITCH=obr0 -DOCKER_OVS_TUN=tun0 -TUNNEL_BASE=gre +GRE_TUNNEL_BASE=gre NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/ POST_NETWORK_SCRIPT_DIR=/kubernetes-vagrant POST_NETWORK_SCRIPT=${POST_NETWORK_SCRIPT_DIR}/network_closure.sh @@ -25,6 +24,55 @@ POST_NETWORK_SCRIPT=${POST_NETWORK_SCRIPT_DIR}/network_closure.sh # ensure location of POST_NETWORK_SCRIPT exists mkdir -p $POST_NETWORK_SCRIPT_DIR +# add docker bridge ifcfg file +cat < ${NETWORK_CONF_PATH}ifcfg-${DOCKER_BRIDGE} +# Generated by yours truly +DEVICE=${DOCKER_BRIDGE} +ONBOOT=yes +TYPE=Bridge +BOOTPROTO=static +IPADDR=${MINION_CONTAINER_ADDR} +NETMASK=${MINION_CONTAINER_NETMASK} +STP=yes +EOF + +# add the ovs bridge ifcfg file +cat < ${NETWORK_CONF_PATH}ifcfg-${OVS_SWITCH} +DEVICE=${OVS_SWITCH} +ONBOOT=yes +DEVICETYPE=ovs +TYPE=OVSBridge +BOOTPROTO=static +HOTPLUG=no +BRIDGE=${DOCKER_BRIDGE} +EOF + +# now loop through all other minions and create persistent gre tunnels +GRE_NUM=0 +for remote_ip in "${MINION_IPS[@]}" +do + if [ "${remote_ip}" == "${MINION_IP}" ]; then + continue + fi + ((GRE_NUM++)) || echo + GRE_TUNNEL=${GRE_TUNNEL_BASE}${GRE_NUM} + # ovs-vsctl add-port ${OVS_SWITCH} ${GRE_TUNNEL} -- set interface ${GRE_TUNNEL} type=gre options:remote_ip=${remote_ip} + cat < ${NETWORK_CONF_PATH}ifcfg-${GRE_TUNNEL} +DEVICE=${GRE_TUNNEL} +ONBOOT=yes +DEVICETYPE=ovs +TYPE=OVSTunnel +OVS_BRIDGE=${OVS_SWITCH} +OVS_TUNNEL_TYPE=gre +OVS_TUNNEL_OPTIONS="options:remote_ip=${remote_ip}" +EOF +done + +# add ip route rules such that all pod traffic flows through docker bridge and consequently to the gre tunnels +cat < ${NETWORK_CONF_PATH}route-${DOCKER_BRIDGE} +${CONTAINER_SUBNET} dev ${DOCKER_BRIDGE} scope link src ${MINION_CONTAINER_ADDR} +EOF + # generate the post-configure script to be called by salt as cmd.wait cat < ${POST_NETWORK_SCRIPT} #!/bin/bash @@ -33,58 +81,27 @@ set -e # Only do this operation once, otherwise, we get docker.service files output on disk, and the command line arguments get applied multiple times grep -q kbr0 /etc/sysconfig/docker || { - CONTAINER_SUBNETS=(${MASTER_CONTAINER_SUBNET} ${MINION_CONTAINER_SUBNETS[@]}) - CONTAINER_IPS=(${MASTER_IP} ${MINION_IPS[@]}) - # Stop docker before making these updates systemctl stop docker - # create new docker bridge - ip link set dev ${DOCKER_BRIDGE} down || true - brctl delbr ${DOCKER_BRIDGE} || true - brctl addbr ${DOCKER_BRIDGE} + # NAT interface fails to revive on network restart, so OR-gate to true + systemctl restart network.service || true + + # set docker bridge up, and set stp on the ovs bridge ip link set dev ${DOCKER_BRIDGE} up - ifconfig ${DOCKER_BRIDGE} ${CONTAINER_ADDR} netmask ${CONTAINER_NETMASK} up - - # add ovs bridge - ovs-vsctl del-br ${OVS_SWITCH} || true - ovs-vsctl add-br ${OVS_SWITCH} -- set Bridge ${OVS_SWITCH} fail-mode=secure - ovs-vsctl set bridge ${OVS_SWITCH} protocols=OpenFlow13 - ovs-vsctl del-port ${OVS_SWITCH} ${TUNNEL_BASE}0 || true - ovs-vsctl add-port ${OVS_SWITCH} ${TUNNEL_BASE}0 -- set Interface ${TUNNEL_BASE}0 type=${TUNNEL_BASE} options:remote_ip="flow" options:key="flow" ofport_request=10 - - # add tun device - ovs-vsctl del-port ${OVS_SWITCH} ${DOCKER_OVS_TUN} || true - ovs-vsctl add-port ${OVS_SWITCH} ${DOCKER_OVS_TUN} -- set Interface ${DOCKER_OVS_TUN} type=internal ofport_request=9 - brctl addif ${DOCKER_BRIDGE} ${DOCKER_OVS_TUN} - ip link set ${DOCKER_OVS_TUN} up - - - # add oflow rules, because we do not want to use stp - ovs-ofctl -O OpenFlow13 del-flows ${OVS_SWITCH} - - # now loop through all other minions and create persistent gre tunnels - NODE_INDEX=0 - for remote_ip in "\${CONTAINER_IPS[@]}" - do - if [ "\${remote_ip}" == "${NODE_IP}" ]; then - ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,ip,in_port=10,nw_dst=\${CONTAINER_SUBNETS[\${NODE_INDEX}]},actions=output:9" - ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,arp,in_port=10,nw_dst=\${CONTAINER_SUBNETS[\${NODE_INDEX}]},actions=output:9" - else - ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,in_port=9,ip,nw_dst=\${CONTAINER_SUBNETS[\${NODE_INDEX}]},actions=set_field:\${remote_ip}->tun_dst,output:10" - ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,in_port=9,arp,nw_dst=\${CONTAINER_SUBNETS[\${NODE_INDEX}]},actions=set_field:\${remote_ip}->tun_dst,output:10" - fi - ((NODE_INDEX++)) || true - done - - # add ip route rules such that all pod traffic flows through docker bridge and consequently to the gre tunnels - ip route add ${CONTAINER_SUBNET} dev ${DOCKER_BRIDGE} scope link src ${CONTAINER_ADDR} - + ovs-vsctl set Bridge ${OVS_SWITCH} stp_enable=true # modify the docker service file such that it uses the kube docker bridge and not its own - echo "OPTIONS='-b=kbr0 --selinux-enabled ${DOCKER_OPTS}'" >/etc/sysconfig/docker + #echo "OPTIONS=-b=kbr0 --iptables=false --selinux-enabled" > /etc/sysconfig/docker + echo "OPTIONS='-b=kbr0 --iptables=false --selinux-enabled ${DOCKER_OPTS}'" >/etc/sysconfig/docker systemctl daemon-reload - systemctl start docker + systemctl restart docker.service + + # setup iptables masquerade rules so the pods can reach the internet + iptables -t nat -A POSTROUTING -s ${CONTAINER_SUBNET} ! -d ${CONTAINER_SUBNET} -j MASQUERADE + + # persist please + iptables-save >& /etc/sysconfig/iptables } EOF diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh index 33285e6d202..ac707ca9a78 100644 --- a/cluster/vagrant/util.sh +++ b/cluster/vagrant/util.sh @@ -69,13 +69,6 @@ function create-provision-scripts { echo "MASTER_IP='${MASTER_IP}'" echo "MINION_NAMES=(${MINION_NAMES[@]})" echo "MINION_IPS=(${MINION_IPS[@]})" - echo "NODE_IP='${MASTER_IP}'" - echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'" - echo "CONTAINER_NETMASK='${MASTER_CONTAINER_NETMASK}'" - echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'" - echo "CONTAINER_ADDR='${MASTER_CONTAINER_ADDR}'" - echo "MINION_CONTAINER_NETMASKS='${MINION_CONTAINER_NETMASKS[@]}'" - echo "MINION_CONTAINER_SUBNETS=(${MINION_CONTAINER_SUBNETS[@]})" echo "PORTAL_NET='${PORTAL_NET}'" echo "MASTER_USER='${MASTER_USER}'" echo "MASTER_PASSWD='${MASTER_PASSWD}'" @@ -87,7 +80,6 @@ function create-provision-scripts { echo "DNS_DOMAIN='${DNS_DOMAIN:-}'" echo "RUNTIME_CONFIG='${RUNTIME_CONFIG:-}'" grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-master.sh" - grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-network.sh" ) > "${KUBE_TEMP}/master-start.sh" for (( i=0; i<${#MINION_NAMES[@]}; i++)); do @@ -99,11 +91,8 @@ function create-provision-scripts { echo "MINION_IPS=(${MINION_IPS[@]})" echo "MINION_IP='${MINION_IPS[$i]}'" echo "MINION_ID='$i'" - echo "NODE_IP='${MINION_IPS[$i]}'" - echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'" - echo "CONTAINER_ADDR='${MINION_CONTAINER_ADDRS[$i]}'" - echo "CONTAINER_NETMASK='${MINION_CONTAINER_NETMASKS[$i]}'" - echo "MINION_CONTAINER_SUBNETS=(${MINION_CONTAINER_SUBNETS[@]})" + echo "MINION_CONTAINER_ADDR='${MINION_CONTAINER_ADDRS[$i]}'" + echo "MINION_CONTAINER_NETMASK='${MINION_CONTAINER_NETMASKS[$i]}'" echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'" echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS-}'" grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-minion.sh" @@ -268,10 +257,6 @@ function find-vagrant-name-by-ip { # Find the vagrant machien name based on the host name of the minion function find-vagrant-name-by-minion-name { local ip="$1" - if [[ "$ip" == "${INSTANCE_PREFIX}-master" ]]; then - echo "master" - return $? - fi local ip_pattern="${INSTANCE_PREFIX}-minion-(.*)" [[ $ip =~ $ip_pattern ]] || { @@ -295,7 +280,7 @@ function ssh-to-node { return 1 } - vagrant ssh "${machine}" -c "${cmd}" + vagrant ssh "${machine}" -c "${cmd}" | grep -v "Connection to.*closed" } # Restart the kube-proxy on a node ($1) @@ -303,11 +288,6 @@ function restart-kube-proxy { ssh-to-node "$1" "sudo systemctl restart kube-proxy" } -# Restart the apiserver -function restart-apiserver { - ssh-to-node "${master}" "sudo systemctl restart kube-apiserver" -} - function setup-monitoring-firewall { echo "TODO" 1>&2 } diff --git a/hack/e2e-suite/services.sh b/hack/e2e-suite/services.sh index 73e049582fc..a8b4ec9d031 100755 --- a/hack/e2e-suite/services.sh +++ b/hack/e2e-suite/services.sh @@ -33,6 +33,11 @@ source "${KUBE_VERSION_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh" prepare-e2e +if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then + echo "WARNING: Skipping services.sh for ${KUBERNETES_PROVIDER}. See https://github.com/GoogleCloudPlatform/kubernetes/issues/3655" + exit 0 +fi + function error() { echo "$@" >&2 exit 1 @@ -261,7 +266,7 @@ function verify_from_container() { for i in $(seq -s' ' 1 $4); do ok=false for j in $(seq -s' ' 1 10); do - if wget -q -T 5 -O - http://$2:$3; then + if wget -q -T 1 -O - http://$2:$3; then echo ok=true break @@ -415,11 +420,7 @@ verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \ # echo "Test 6: Restart the master, make sure portals come back." echo "Restarting the master" -if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then - restart-apiserver "${master}" -else - ssh-to-node "${master}" "sudo /etc/init.d/kube-apiserver restart" -fi +ssh-to-node "${master}" "sudo /etc/init.d/kube-apiserver restart" sleep 5 echo "Verifying the portals from the host" wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \