Merge pull request #1120 from doublerr/rackspace/fix_salt_apiserver

Rackspace: fix apiserver salt config
This commit is contained in:
Joe Beda 2014-09-02 21:31:05 -07:00
commit cbedf9f470
5 changed files with 30 additions and 38 deletions

View File

@ -6,6 +6,7 @@ write_files:
roles: roles:
- kubernetes-master - kubernetes-master
cloud: rackspace cloud: rackspace
etcd_servers: KUBE_MASTER
path: /etc/salt/minion.d/grains.conf path: /etc/salt/minion.d/grains.conf
- content: | - content: |
auto_accept: True auto_accept: True

View File

@ -31,6 +31,7 @@ echo "Starting cluster using provider: $KUBERNETES_PROVIDER"
verify-prereqs verify-prereqs
kube-up kube-up
source $(dirname $0)/validate-cluster.sh # skipping validation for now until since machines show up as private IPs
# source $(dirname $0)/validate-cluster.sh
echo "Done" echo "Done"

View File

@ -16,12 +16,10 @@
# Prepopulate the name of the Master # Prepopulate the name of the Master
mkdir -p /etc/salt/minion.d mkdir -p /etc/salt/minion.d
echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf echo master: $MASTER_NAME > /etc/salt/minion.d/master.conf
# Turn on debugging for salt-minion # Turn on debugging for salt-minion
# echo "DAEMON_ARGS=\"\$DAEMON_ARGS --log-file-level=debug\"" > /etc/default/salt-minion # echo "DAEMON_ARGS=\"\$DAEMON_ARGS --log-file-level=debug\"" > /etc/default/salt-minion
MINION_IP=$(ip -f inet a sh dev eth2 | awk -F '[ \t/]+' '/inet/ { print $3 }' )
MINION_IP=$(ip -f inet a sh dev eth2 | grep -i inet | awk '{print $2}' | cut -d / -f 1)
# Our minions will have a pool role to distinguish them from the master. # Our minions will have a pool role to distinguish them from the master.
cat <<EOF >/etc/salt/minion.d/grains.conf cat <<EOF >/etc/salt/minion.d/grains.conf
grains: grains:
@ -29,22 +27,22 @@ grains:
- kubernetes-pool - kubernetes-pool
cbr-cidr: $MINION_IP_RANGE cbr-cidr: $MINION_IP_RANGE
minion_ip: $MINION_IP minion_ip: $MINION_IP
etcd_servers: $MASTER_NAME
EOF EOF
#Move all of this to salt #Move all of this to salt
apt-get update apt-get update
apt-get install bridge-utils -y apt-get install bridge-utils -y
brctl addbr cbr0 brctl addbr cbr0
ip link set dev cbr0 up ip l set dev cbr0 up
#for loop to add routes of other minions #for loop to add routes of other minions
for (( i=1; i<=${NUM_MINIONS[@]}; i++)); do for i in `seq 1 $NUM_MINIONS`
ip r a 10.240.$i.0/24 dev cbr0 do ip r a 10.240.$i.0/24 dev cbr0
done done
ip link add vxlan42 type vxlan id 42 group 239.0.0.42 dev eth2 ip l a vxlan42 type vxlan id 42 group 239.0.0.42 dev eth2
brctl addif cbr0 vxlan42 brctl addif cbr0 vxlan42
# Install Salt # Install Salt
# #
# We specify -X to avoid a race condition that can cause minion failure to # We specify -X to avoid a race condition that can cause minion failure to
# install. See https://github.com/saltstack/salt-bootstrap/issues/270 # install. See https://github.com/saltstack/salt-bootstrap/issues/270
curl -L http://bootstrap.saltstack.com | sh -s -- -X curl -L http://bootstrap.saltstack.com | sh -s -- -X
ip link set vxlan42 up ip l set vxlan42 up

View File

@ -71,7 +71,7 @@ rax-boot-master() {
) > ${KUBE_TEMP}/masterStart.sh ) > ${KUBE_TEMP}/masterStart.sh
# Copy cloud-config to KUBE_TEMP and work some sed magic # Copy cloud-config to KUBE_TEMP and work some sed magic
sed -e "s/KUBE_MASTER/$MASTER_NAME/" \ sed -e "s/KUBE_MASTER/$MASTER_NAME/g" \
-e "s/MASTER_HTPASSWD/$HTPASSWD/" \ -e "s/MASTER_HTPASSWD/$HTPASSWD/" \
$(dirname $0)/cloud-config/master-cloud-config.yaml > $KUBE_TEMP/master-cloud-config.yaml $(dirname $0)/cloud-config/master-cloud-config.yaml > $KUBE_TEMP/master-cloud-config.yaml
@ -197,8 +197,8 @@ kube-up() {
rax-boot-master rax-boot-master
# a bit of a hack to wait until master is has an IP from the extra network # a bit of a hack to wait until master is has an IP from the extra network
echo "cluster/rackspace/util.sh: sleeping 30 seconds" echo "cluster/rackspace/util.sh: sleeping 35 seconds"
sleep 30 sleep 35
detect-master-nova-net $NOVA_NETWORK_LABEL detect-master-nova-net $NOVA_NETWORK_LABEL
rax-boot-minions rax-boot-minions
@ -213,7 +213,7 @@ kube-up() {
exit 2 exit 2
fi fi
detect-master > /dev/null detect-master
echo "Waiting for cluster initialization." echo "Waiting for cluster initialization."
echo echo
@ -223,11 +223,11 @@ kube-up() {
echo echo
#This will fail until apiserver salt is updated #This will fail until apiserver salt is updated
#until $(curl --insecure --user ${user}:${passwd} --max-time 5 \ until $(curl --insecure --user ${user}:${passwd} --max-time 5 \
# --fail --output /dev/null --silent https://${KUBE_MASTER_IP}/api/v1beta1/pods); do --fail --output /dev/null --silent https://${KUBE_MASTER_IP}/api/v1beta1/pods); do
# printf "." printf "."
# sleep 2 sleep 2
#done done
echo "Kubernetes cluster created." echo "Kubernetes cluster created."
echo "Sanity checking cluster..." echo "Sanity checking cluster..."
@ -238,25 +238,8 @@ kube-up() {
set +e set +e
sleep 45 sleep 45
#detect-minions > /dev/null
detect-minions detect-minions
#This will fail until apiserver salt is updated
# Basic sanity checking
#for (( i=0; i<${#KUBE_MINION_IP_ADDRESSES[@]}; i++)); do
#
# # Make sure the kubelet is running
# if [ "$(curl --insecure --user ${user}:${passwd} https://${KUBE_MASTER_IP}/proxy/minion/${KUBE_MINION_IP_ADDRESSES[$i]}/healthz)" != "ok" ]; then
# echo "Kubelet failed to install on ${KUBE_MINION_IP_ADDRESSES[$i]} your cluster is unlikely to work correctly"
# echo "Please run ./cluster/kube-down.sh and re-create the cluster. (sorry!)"
# exit 1
# else
# echo "Kubelet is successfully installed on ${MINION_NAMES[$i]}"
#
# fi
#
#done
echo "All minions may not be online yet, this is okay." echo "All minions may not be online yet, this is okay."
echo echo
echo "Kubernetes cluster is running. Access the master at:" echo "Kubernetes cluster is running. Access the master at:"

View File

@ -28,7 +28,7 @@
MACHINES="{{ salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values()|join(',', attribute='hostnamef') }}" MACHINES="{{ salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values()|join(',', attribute='hostnamef') }}"
{% set machines = "-machines $MACHINES" %} {% set machines = "-machines $MACHINES" %}
{% endif %} {% endif %}
{% if grains.cloud == 'rackspace' or grains.cloud == 'vsphere' %} {% if grains.cloud == 'vsphere' %}
# Collect IPs of minions as machines list. # Collect IPs of minions as machines list.
# #
# Use a bash array to build the value we need. Jinja 2.7 does support a 'map' # Use a bash array to build the value we need. Jinja 2.7 does support a 'map'
@ -41,6 +41,15 @@
{% set machines = "-machines=$(echo ${MACHINE_IPS[@]} | xargs -n1 echo | paste -sd,)" %} {% set machines = "-machines=$(echo ${MACHINE_IPS[@]} | xargs -n1 echo | paste -sd,)" %}
{% set minion_regexp = "" %} {% set minion_regexp = "" %}
{% endif %} {% endif %}
{%- if grains.cloud == 'rackspace' %}
{%- set ip_addrs = [] %}
{%- for addrs in salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values() %}
{%- do ip_addrs.append(addrs.ip_interfaces.eth2[0]) %}
{%- endfor %}
MACHINES="{{ip_addrs|join(',')}}"
{%- set machines = "-machines=$MACHINES" %}
{%- set minion_regexp = "" %}
{% endif %}
{% endif %} {% endif %}
DAEMON_ARGS="{{daemon_args}} {{address}} {{machines}} {{etcd_servers}} {{ minion_regexp }} {{ cloud_provider }}" DAEMON_ARGS="{{daemon_args}} {{address}} {{machines}} {{etcd_servers}} {{ minion_regexp }} {{ cloud_provider }}"