diff --git a/cluster/saltbase/salt/top.sls b/cluster/saltbase/salt/top.sls index e96c6390cea..0f1db45d913 100644 --- a/cluster/saltbase/salt/top.sls +++ b/cluster/saltbase/salt/top.sls @@ -80,7 +80,3 @@ base: {% if pillar.get('network_provider', '').lower() == 'opencontrail' %} - opencontrail-networking-master {% endif %} - - 'roles:kubernetes-pool-vsphere': - - match: grain - - static-routes diff --git a/cluster/vsphere/config-default.sh b/cluster/vsphere/config-default.sh index a1b8ac2107d..d841d0febf6 100755 --- a/cluster/vsphere/config-default.sh +++ b/cluster/vsphere/config-default.sh @@ -27,7 +27,8 @@ MASTER_MEMORY_MB=1024 MASTER_CPU=1 NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_NODES}})) -NODE_IP_RANGES=($(eval echo "10.244.{1..${NUM_NODES}}.0/24")) +NODE_IP_RANGES="10.244.0.0/16" +MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" NODE_MEMORY_MB=2048 NODE_CPU=1 diff --git a/cluster/vsphere/config-test.sh b/cluster/vsphere/config-test.sh index 0a7013fa5d2..fb4c493c11c 100755 --- a/cluster/vsphere/config-test.sh +++ b/cluster/vsphere/config-test.sh @@ -27,7 +27,8 @@ MASTER_MEMORY_MB=1024 MASTER_CPU=1 NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_NODES}})) -NODE_IP_RANGES=($(eval echo "10.244.{1..${NUM_NODES}}.0/24")) +NODE_IP_RANGES="10.244.0.0/16" +MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" NODE_MEMORY_MB=1024 NODE_CPU=1 diff --git a/cluster/vsphere/templates/create-dynamic-salt-files.sh b/cluster/vsphere/templates/create-dynamic-salt-files.sh index 6273756f799..1dcaa071ff7 100755 --- a/cluster/vsphere/templates/create-dynamic-salt-files.sh +++ b/cluster/vsphere/templates/create-dynamic-salt-files.sh @@ -121,7 +121,8 @@ dns_replicas: ${DNS_REPLICAS:-1} dns_server: $DNS_SERVER_IP dns_domain: $DNS_DOMAIN e2e_storage_test_environment: "${E2E_STORAGE_TEST_ENVIRONMENT:-false}" - +cluster_cidr: "$NODE_IP_RANGES" +allocate_node_cidrs: "${ALLOCATE_NODE_CIDRS:-true}" EOF mkdir -p /srv/salt-overlay/salt/nginx diff --git a/cluster/vsphere/templates/salt-master.sh b/cluster/vsphere/templates/salt-master.sh index 831c0f820fa..81ab17bcb5c 100755 --- a/cluster/vsphere/templates/salt-master.sh +++ b/cluster/vsphere/templates/salt-master.sh @@ -25,6 +25,7 @@ cat </etc/salt/minion.d/grains.conf grains: roles: - kubernetes-master + cbr-cidr: $MASTER_IP_RANGE cloud: vsphere EOF diff --git a/cluster/vsphere/templates/salt-minion.sh b/cluster/vsphere/templates/salt-minion.sh index e6e74d459e0..57265f0eecc 100755 --- a/cluster/vsphere/templates/salt-minion.sh +++ b/cluster/vsphere/templates/salt-minion.sh @@ -41,7 +41,6 @@ grains: roles: - kubernetes-pool - kubernetes-pool-vsphere - cbr-cidr: $NODE_IP_RANGE cloud: vsphere EOF diff --git a/cluster/vsphere/util.sh b/cluster/vsphere/util.sh index 0bc488cb351..5fb52a7ec23 100755 --- a/cluster/vsphere/util.sh +++ b/cluster/vsphere/util.sh @@ -262,9 +262,6 @@ function kube-check { done } - - - # # verify if salt master is up. check 30 times and then echo out bad output and return 0 # @@ -306,6 +303,41 @@ function remote-pgrep { done } +# identify the pod routes and route them together. +# +# Assumptions: +# All packages have been installed and kubelet has started running. +# +function setup-pod-routes { + # wait till the kubelet sets up the bridge. + echo "Setting up routes" + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do + printf "check if cbr0 bridge is ready on ${NODE_NAMES[$i]}\n" + kube-check ${KUBE_NODE_IP_ADDRESSES[$i]} 'sudo ifconfig cbr0 | grep -oP "inet addr:\K\S+"' + done + + + # identify the subnet assigned to the node by the kubernertes controller manager. + KUBE_NODE_BRIDGE_NETWORK=() + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do + printf " finding network of cbr0 bridge on node ${NODE_NAMES[$i]}\n" + network=$(kube-ssh ${KUBE_NODE_IP_ADDRESSES[$i]} 'sudo ip route show | grep -E "dev cbr0" | cut -d " " -f1') + KUBE_NODE_BRIDGE_NETWORK+=("${network}") + done + + + # make the pods visible to each other. + local j + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do + printf "setting up routes for ${NODE_NAMES[$i]}" + for (( j=0; j<${#NODE_NAMES[@]}; j++)); do + if [[ $i != $j ]]; then + kube-ssh ${KUBE_NODE_IP_ADDRESSES[$i]} "sudo route add -net ${KUBE_NODE_BRIDGE_NETWORK[$j]} gw ${KUBE_NODE_IP_ADDRESSES[$j]}" + fi + done + done +} + # Instantiate a kubernetes cluster # # Assumed vars: @@ -331,8 +363,10 @@ function kube-up { grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/hostname.sh" echo "cd /home/kube/cache/kubernetes-install" echo "readonly MASTER_NAME='${MASTER_NAME}'" + echo "readonly MASTER_IP_RANGE='${MASTER_IP_RANGE}'" echo "readonly INSTANCE_PREFIX='${INSTANCE_PREFIX}'" echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-node'" + echo "readonly NODE_IP_RANGES='${NODE_IP_RANGES}'" echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'" echo "readonly ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'" echo "readonly LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'" @@ -365,7 +399,7 @@ function kube-up { grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/hostname.sh" echo "KUBE_MASTER=${KUBE_MASTER}" echo "KUBE_MASTER_IP=${KUBE_MASTER_IP}" - echo "NODE_IP_RANGE=${NODE_IP_RANGES[$i]}" + echo "NODE_IP_RANGE=$NODE_IP_RANGES" grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/salt-minion.sh" ) > "${KUBE_TEMP}/node-start-${i}.sh" @@ -427,8 +461,10 @@ function kube-up { done printf " OK\n" done - echo "Kubernetes cluster created." + setup-pod-routes + + echo "Kubernetes cluster created." # TODO use token instead of basic auth export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt" export KUBE_KEY="/tmp/$RANDOM-kubecfg.key" @@ -444,6 +480,7 @@ function kube-up { create-kubeconfig ) + printf "\n" echo echo "Sanity checking cluster..." @@ -451,7 +488,6 @@ function kube-up { sleep 5 # Basic sanity checking - local i for (( i=0; i<${#NODE_NAMES[@]}; i++)); do # Make sure docker is installed kube-ssh "${KUBE_NODE_IP_ADDRESSES[$i]}" which docker > /dev/null || {