From 0b50244c72561dc181ef3242eda5a31129822956 Mon Sep 17 00:00:00 2001 From: Ryan Richard Date: Thu, 28 Aug 2014 20:56:20 -0500 Subject: [PATCH 1/3] workaround lack of jinja2.7 support --- cluster/saltbase/salt/apiserver/default | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/cluster/saltbase/salt/apiserver/default b/cluster/saltbase/salt/apiserver/default index 30b359bf499..43fad7b141d 100644 --- a/cluster/saltbase/salt/apiserver/default +++ b/cluster/saltbase/salt/apiserver/default @@ -28,7 +28,7 @@ MACHINES="{{ salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values()|join(',', attribute='hostnamef') }}" {% set machines = "-machines $MACHINES" %} {% endif %} -{% if grains.cloud == 'rackspace' or grains.cloud == 'vsphere' %} +{% if grains.cloud == 'vsphere' %} # Collect IPs of minions as machines list. # # Use a bash array to build the value we need. Jinja 2.7 does support a 'map' @@ -41,6 +41,15 @@ {% set machines = "-machines=$(echo ${MACHINE_IPS[@]} | xargs -n1 echo | paste -sd,)" %} {% set minion_regexp = "" %} {% endif %} +{%- if grains.cloud == 'rackspace' %} + {%- set ip_addrs = [] %} + {%- for addrs in salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values() %} + {%- do ip_addrs.append(addrs.ip_interfaces.eth2[0]) %} + {%- endfor %} + MACHINES="{{ip_addrs|join(',')}}" + {%- set machines = "-machines=$MACHINES" %} + {%- set minion_regexp = "" %} +{% endif %} {% endif %} DAEMON_ARGS="{{daemon_args}} {{address}} {{machines}} {{etcd_servers}} {{ minion_regexp }} {{ cloud_provider }}" From 0fed66a6674b15c9049996543434dc268e0bfc83 Mon Sep 17 00:00:00 2001 From: Ryan Richard Date: Fri, 29 Aug 2014 17:40:25 -0500 Subject: [PATCH 2/3] add sanity checking steps and fix KUBE_MASTER_IP --- cluster/rackspace/kube-up.sh | 3 ++- cluster/rackspace/util.sh | 35 +++++++++-------------------------- 2 files changed, 11 insertions(+), 27 deletions(-) diff --git a/cluster/rackspace/kube-up.sh b/cluster/rackspace/kube-up.sh index 10f7af28313..f3436059783 100755 --- a/cluster/rackspace/kube-up.sh +++ b/cluster/rackspace/kube-up.sh @@ -31,6 +31,7 @@ echo "Starting cluster using provider: $KUBERNETES_PROVIDER" verify-prereqs kube-up -source $(dirname $0)/validate-cluster.sh +# skipping validation for now until since machines show up as private IPs +# source $(dirname $0)/validate-cluster.sh echo "Done" diff --git a/cluster/rackspace/util.sh b/cluster/rackspace/util.sh index a0f34203cee..3440db0a5b0 100644 --- a/cluster/rackspace/util.sh +++ b/cluster/rackspace/util.sh @@ -71,7 +71,7 @@ rax-boot-master() { ) > ${KUBE_TEMP}/masterStart.sh # Copy cloud-config to KUBE_TEMP and work some sed magic - sed -e "s/KUBE_MASTER/$MASTER_NAME/" \ + sed -e "s/KUBE_MASTER/$MASTER_NAME/g" \ -e "s/MASTER_HTPASSWD/$HTPASSWD/" \ $(dirname $0)/cloud-config/master-cloud-config.yaml > $KUBE_TEMP/master-cloud-config.yaml @@ -197,8 +197,8 @@ kube-up() { rax-boot-master # a bit of a hack to wait until master is has an IP from the extra network - echo "cluster/rackspace/util.sh: sleeping 30 seconds" - sleep 30 + echo "cluster/rackspace/util.sh: sleeping 35 seconds" + sleep 35 detect-master-nova-net $NOVA_NETWORK_LABEL rax-boot-minions @@ -213,7 +213,7 @@ kube-up() { exit 2 fi - detect-master > /dev/null + detect-master echo "Waiting for cluster initialization." echo @@ -223,11 +223,11 @@ kube-up() { echo #This will fail until apiserver salt is updated - #until $(curl --insecure --user ${user}:${passwd} --max-time 5 \ - # --fail --output /dev/null --silent https://${KUBE_MASTER_IP}/api/v1beta1/pods); do - # printf "." - # sleep 2 - #done + until $(curl --insecure --user ${user}:${passwd} --max-time 5 \ + --fail --output /dev/null --silent https://${KUBE_MASTER_IP}/api/v1beta1/pods); do + printf "." + sleep 2 + done echo "Kubernetes cluster created." echo "Sanity checking cluster..." @@ -238,25 +238,8 @@ kube-up() { set +e sleep 45 - #detect-minions > /dev/null detect-minions - - #This will fail until apiserver salt is updated - # Basic sanity checking - #for (( i=0; i<${#KUBE_MINION_IP_ADDRESSES[@]}; i++)); do - # - # # Make sure the kubelet is running - # if [ "$(curl --insecure --user ${user}:${passwd} https://${KUBE_MASTER_IP}/proxy/minion/${KUBE_MINION_IP_ADDRESSES[$i]}/healthz)" != "ok" ]; then - # echo "Kubelet failed to install on ${KUBE_MINION_IP_ADDRESSES[$i]} your cluster is unlikely to work correctly" - # echo "Please run ./cluster/kube-down.sh and re-create the cluster. (sorry!)" - # exit 1 - # else - # echo "Kubelet is successfully installed on ${MINION_NAMES[$i]}" - # - # fi - # - #done echo "All minions may not be online yet, this is okay." echo echo "Kubernetes cluster is running. Access the master at:" From 58b9b2fb88c5f951c96b27048d273e20d4d077cf Mon Sep 17 00:00:00 2001 From: Ryan Richard Date: Fri, 29 Aug 2014 17:41:39 -0500 Subject: [PATCH 3/3] switch kubelets to connect to etcd on cloud network Previously we were connecting across the public interfaces. This will connect across eth2 and send communication over a private network. --- .../cloud-config/master-cloud-config.yaml | 1 + cluster/rackspace/templates/salt-minion.sh | 18 ++++++++---------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/cluster/rackspace/cloud-config/master-cloud-config.yaml b/cluster/rackspace/cloud-config/master-cloud-config.yaml index 8f1b7334c0f..cec8bf2e13a 100644 --- a/cluster/rackspace/cloud-config/master-cloud-config.yaml +++ b/cluster/rackspace/cloud-config/master-cloud-config.yaml @@ -6,6 +6,7 @@ write_files: roles: - kubernetes-master cloud: rackspace + etcd_servers: KUBE_MASTER path: /etc/salt/minion.d/grains.conf - content: | auto_accept: True diff --git a/cluster/rackspace/templates/salt-minion.sh b/cluster/rackspace/templates/salt-minion.sh index ad5e4a5d9f2..78daf184c9f 100644 --- a/cluster/rackspace/templates/salt-minion.sh +++ b/cluster/rackspace/templates/salt-minion.sh @@ -16,12 +16,10 @@ # Prepopulate the name of the Master mkdir -p /etc/salt/minion.d -echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf - +echo master: $MASTER_NAME > /etc/salt/minion.d/master.conf # Turn on debugging for salt-minion # echo "DAEMON_ARGS=\"\$DAEMON_ARGS --log-file-level=debug\"" > /etc/default/salt-minion - -MINION_IP=$(ip -f inet a sh dev eth2 | grep -i inet | awk '{print $2}' | cut -d / -f 1) +MINION_IP=$(ip -f inet a sh dev eth2 | awk -F '[ \t/]+' '/inet/ { print $3 }' ) # Our minions will have a pool role to distinguish them from the master. cat </etc/salt/minion.d/grains.conf grains: @@ -29,22 +27,22 @@ grains: - kubernetes-pool cbr-cidr: $MINION_IP_RANGE minion_ip: $MINION_IP + etcd_servers: $MASTER_NAME EOF - #Move all of this to salt apt-get update apt-get install bridge-utils -y brctl addbr cbr0 -ip link set dev cbr0 up +ip l set dev cbr0 up #for loop to add routes of other minions -for (( i=1; i<=${NUM_MINIONS[@]}; i++)); do - ip r a 10.240.$i.0/24 dev cbr0 +for i in `seq 1 $NUM_MINIONS` +do ip r a 10.240.$i.0/24 dev cbr0 done -ip link add vxlan42 type vxlan id 42 group 239.0.0.42 dev eth2 +ip l a vxlan42 type vxlan id 42 group 239.0.0.42 dev eth2 brctl addif cbr0 vxlan42 # Install Salt # # We specify -X to avoid a race condition that can cause minion failure to # install. See https://github.com/saltstack/salt-bootstrap/issues/270 curl -L http://bootstrap.saltstack.com | sh -s -- -X -ip link set vxlan42 up +ip l set vxlan42 up \ No newline at end of file