Merge pull request #10275 from ddysher/ubuntu-flannel

Ubuntu cluster: run flannel on master
This commit is contained in:
Alex Robinson 2015-06-29 16:30:31 -07:00
commit af533b2b2a
7 changed files with 208 additions and 79 deletions

View File

@ -21,8 +21,8 @@
set -e
function cleanup {
# cleanup work
rm -rf flannel* kubernetes* etcd* binaries
# cleanup work
rm -rf flannel* kubernetes* etcd* binaries
}
trap cleanup SIGHUP SIGINT SIGTERM
@ -33,9 +33,10 @@ mkdir -p binaries/minion
echo "Download flannel release ..."
FLANNEL_VERSION="0.4.0"
if [ ! -f flannel.tar.gz ] ; then
curl -L https://github.com/coreos/flannel/releases/download/v${FLANNEL_VERSION}/flannel-${FLANNEL_VERSION}-linux-amd64.tar.gz -o flannel.tar.gz
tar xzf flannel.tar.gz
curl -L https://github.com/coreos/flannel/releases/download/v${FLANNEL_VERSION}/flannel-${FLANNEL_VERSION}-linux-amd64.tar.gz -o flannel.tar.gz
tar xzf flannel.tar.gz
fi
cp flannel-${FLANNEL_VERSION}/flanneld binaries/master
cp flannel-${FLANNEL_VERSION}/flanneld binaries/minion
# ectd
@ -43,8 +44,8 @@ echo "Download etcd release ..."
ETCD_VERSION="v2.0.9"
ETCD="etcd-${ETCD_VERSION}-linux-amd64"
if [ ! -f etcd.tar.gz ] ; then
curl -L https://github.com/coreos/etcd/releases/download/${ETCD_VERSION}/${ETCD}.tar.gz -o etcd.tar.gz
tar xzf etcd.tar.gz
curl -L https://github.com/coreos/etcd/releases/download/${ETCD_VERSION}/${ETCD}.tar.gz -o etcd.tar.gz
tar xzf etcd.tar.gz
fi
cp $ETCD/etcd $ETCD/etcdctl binaries/master
cp $ETCD/etcd $ETCD/etcdctl binaries/minion
@ -53,8 +54,8 @@ cp $ETCD/etcd $ETCD/etcdctl binaries/minion
echo "Download kubernetes release ..."
K8S_VERSION="v0.18.0"
if [ ! -f kubernetes.tar.gz ] ; then
curl -L https://github.com/GoogleCloudPlatform/kubernetes/releases/download/${K8S_VERSION}/kubernetes.tar.gz -o kubernetes.tar.gz
tar xzf kubernetes.tar.gz
curl -L https://github.com/GoogleCloudPlatform/kubernetes/releases/download/${K8S_VERSION}/kubernetes.tar.gz -o kubernetes.tar.gz
tar xzf kubernetes.tar.gz
fi
pushd kubernetes/server
tar xzf kubernetes-server-linux-amd64.tar.gz

View File

@ -17,9 +17,9 @@
## Contains configuration values for the Ubuntu cluster
# Define all your cluster nodes, MASTER node comes first"
# And separated with blank space like <user_1@ip_1> <user_2@ip_2> <user_3@ip_3>
# And separated with blank space like <user_1@ip_1> <user_2@ip_2> <user_3@ip_3>
export nodes="vcap@10.10.103.250 vcap@10.10.103.162 vcap@10.10.103.223"
# Define all your nodes role: a(master) or i(minion) or ai(both master and minion), must be the order same
# Define all your nodes role: a(master) or i(minion) or ai(both master and minion), must be the order same
export roles=("ai" "i" "i")
# Define minion numbers
export NUM_MINIONS=${NUM_MINIONS:-3}

View File

@ -22,11 +22,10 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "config-default.sh"
if [ "${ENABLE_CLUSTER_DNS}" == true ]; then
echo "Deploying DNS on kubernetes"
sed -e "s/{{ pillar\['dns_replicas'\] }}/${DNS_REPLICAS}/g;s/{{ pillar\['dns_domain'\] }}/${DNS_DOMAIN}/g" skydns-rc.yaml.template > skydns-rc.yaml
sed -e "s/{{ pillar\['dns_server'\] }}/${DNS_SERVER_IP}/g" skydns-svc.yaml.template > skydns-svc.yaml
# use kubectl to create skydns rc and service
"${KUBE_ROOT}/cluster/kubectl.sh" create -f skydns-rc.yaml
"${KUBE_ROOT}/cluster/kubectl.sh" create -f skydns-svc.yaml
fi
echo "Deploying DNS on kubernetes"
sed -e "s/{{ pillar\['dns_replicas'\] }}/${DNS_REPLICAS}/g;s/{{ pillar\['dns_domain'\] }}/${DNS_DOMAIN}/g" skydns-rc.yaml.template > skydns-rc.yaml
sed -e "s/{{ pillar\['dns_server'\] }}/${DNS_SERVER_IP}/g" skydns-svc.yaml.template > skydns-svc.yaml
# use kubectl to create skydns rc and service
"${KUBE_ROOT}/cluster/kubectl.sh" create -f skydns-rc.yaml
"${KUBE_ROOT}/cluster/kubectl.sh" create -f skydns-svc.yaml
fi

View File

@ -0,0 +1,29 @@
description "Flannel service"
author "@chenxingyu"
# respawn
# start in conjunction with etcd
start on started etcd
stop on stopping etcd
pre-start script
FLANNEL=/opt/bin/$UPSTART_JOB
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
if [ -f $FLANNEL ]; then
exit 0
fi
exit 22
end script
script
# modify these in /etc/default/$UPSTART_JOB (/etc/default/docker)
FLANNEL=/opt/bin/$UPSTART_JOB
FLANNEL_OPTS=""
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
exec "$FLANNEL" $FLANNEL_OPTS
end script

View File

@ -0,0 +1,99 @@
#!/bin/sh
set -e
### BEGIN INIT INFO
# Provides: flannel
# Required-Start: $etcd
# Required-Stop:
# Should-Start:
# Should-Stop:
# Default-Start:
# Default-Stop:
# Short-Description: Start flannel networking service
# Description:
# https://github.com/coreos/flannel
### END INIT INFO
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/opt/bin:
BASE=$(basename $0)
# modify these in /etc/default/$BASE (/etc/default/flannel)
FLANNEL=/opt/bin/$BASE
# This is the pid file managed by kube-apiserver itself
FLANNEL_PIDFILE=/var/run/$BASE.pid
FLANNEL_LOGFILE=/var/log/$BASE.log
FLANNEL_OPTS=""
FLANNEL_DESC="Flannel"
# Get lsb functions
. /lib/lsb/init-functions
if [ -f /etc/default/$BASE ]; then
. /etc/default/$BASE
fi
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then
log_failure_msg "$FLANNEL_DESC is managed via upstart, try using service $BASE $1"
exit 1
fi
# Check flanneld is present
if [ ! -x $FLANNEL ]; then
log_failure_msg "$FLANNEL not present or not executable"
exit 1
fi
fail_unless_root() {
if [ "$(id -u)" != '0' ]; then
log_failure_msg "$FLANNEL_DESC must be run as root"
exit 1
fi
}
FLANNEL_START="start-stop-daemon \
--start \
--background \
--quiet \
--exec $FLANNEL \
--make-pidfile --pidfile $FLANNEL_PIDFILE \
-- $FLANNEL_OPTS \
>> $FLANNEL_LOGFILE 2>&1"
FLANNEL_STOP="start-stop-daemon \
--stop \
--pidfile $FLANNEL_PIDFILE"
case "$1" in
start)
fail_unless_root
log_begin_msg "Starting $FLANNEL_DESC: $BASE"
$KUBE_APISERVER_START
log_end_msg $?
;;
stop)
fail_unless_root
log_begin_msg "Stopping $FLANNEL_DESC: $BASE"
$KUBE_APISERVER_STOP
log_end_msg $?
;;
restart | force-reload)
fail_unless_root
log_begin_msg "Stopping $FLANNEL_DESC: $BASE"
$KUBE_APISERVER_STOP
$KUBE_APISERVER_START
log_end_msg $?
;;
status)
status_of_proc -p "$FLANNEL_DESC" "$FLANNEL" "$FLANNEL_DESC"
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 1
;;
esac

View File

@ -25,11 +25,11 @@ source ~/kube/config-default.sh
attempt=0
while true; do
/opt/bin/etcdctl get /coreos.com/network/config
/opt/bin/etcdctl get /coreos.com/network/config
if [[ "$?" == 0 ]]; then
break
else
# enough timeout??
# enough timeout??
if (( attempt > 600 )); then
echo "timeout for waiting network config" > ~/kube/err.log
exit 2
@ -49,5 +49,5 @@ sudo brctl delbr docker0
source /run/flannel/subnet.env
echo DOCKER_OPTS=\"-H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock \
--bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU}\" > /etc/default/docker
sudo service docker restart
--bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU}\" > /etc/default/docker
sudo service docker restart

View File

@ -40,36 +40,36 @@ function setClusterInfo() {
ii=0
for i in $nodes
do
name="infra"$ii
nodeIP=${i#*@}
name="infra"$ii
nodeIP=${i#*@}
item="$name=http://$nodeIP:2380"
if [ "$ii" == 0 ]; then
CLUSTER=$item
else
CLUSTER="$CLUSTER,$item"
fi
mm[$nodeIP]=$name
item="$name=http://$nodeIP:2380"
if [ "$ii" == 0 ]; then
CLUSTER=$item
else
CLUSTER="$CLUSTER,$item"
fi
mm[$nodeIP]=$name
if [ "${roles[${ii}]}" == "ai" ]; then
MASTER_IP=$nodeIP
MASTER=$i
if [ "${roles[${ii}]}" == "ai" ]; then
MASTER_IP=$nodeIP
MASTER=$i
MINION_IPS="$nodeIP"
elif [ "${roles[${ii}]}" == "a" ]; then
MASTER_IP=$nodeIP
MASTER=$i
elif [ "${roles[${ii}]}" == "i" ]; then
if [ -z "${MINION_IPS}" ];then
MINION_IPS="$nodeIP"
elif [ "${roles[${ii}]}" == "a" ]; then
MASTER_IP=$nodeIP
MASTER=$i
elif [ "${roles[${ii}]}" == "i" ]; then
if [ -z "${MINION_IPS}" ];then
MINION_IPS="$nodeIP"
else
MINION_IPS="$MINION_IPS,$nodeIP"
fi
else
echo "unsupported role for ${i}. please check"
exit 1
MINION_IPS="$MINION_IPS,$nodeIP"
fi
else
echo "unsupported role for ${i}. please check"
exit 1
fi
((ii=ii+1))
((ii=ii+1))
done
}
@ -93,7 +93,7 @@ function verify-prereqs {
if [[ "${rc}" -eq 1 ]]; then
# Try adding one of the default identities, with or without passphrase.
ssh-add || true
fi
fi
# Expect at least one identity to be available.
if ! ssh-add -L 1> /dev/null 2> /dev/null; then
echo "Could not find or add an SSH identity."
@ -122,7 +122,7 @@ function verify-cluster {
for i in ${nodes}
do
if [ "${roles[${ii}]}" == "a" ]; then
verify-master
verify-master
elif [ "${roles[${ii}]}" == "i" ]; then
verify-minion $i
elif [ "${roles[${ii}]}" == "ai" ]; then
@ -153,7 +153,7 @@ function verify-master(){
validated="0"
local daemon
for daemon in "${required_daemon[@]}"; do
ssh "$MASTER" "pgrep -f ${daemon}" >/dev/null 2>&1 || {
ssh "$MASTER" "pgrep -f ${daemon}" >/dev/null 2>&1 || {
printf "."
validated="1"
sleep 2
@ -172,7 +172,7 @@ function verify-minion(){
validated="0"
local daemon
for daemon in "${required_daemon[@]}"; do
ssh "$1" "pgrep -f $daemon" >/dev/null 2>&1 || {
ssh "$1" "pgrep -f $daemon" >/dev/null 2>&1 || {
printf "."
validated="1"
sleep 2
@ -287,7 +287,7 @@ function detect-minions {
KUBE_MINION_IP_ADDRESSES=()
setClusterInfo
ii=0
for i in ${nodes}
do
@ -320,18 +320,18 @@ function kube-up {
for i in ${nodes}
do
{
if [ "${roles[${ii}]}" == "a" ]; then
provision-master
elif [ "${roles[${ii}]}" == "i" ]; then
provision-minion $i
elif [ "${roles[${ii}]}" == "ai" ]; then
provision-masterandminion
else
echo "unsupported role for ${i}. please check"
exit 1
fi
}
{
if [ "${roles[${ii}]}" == "a" ]; then
provision-master
elif [ "${roles[${ii}]}" == "i" ]; then
provision-minion $i
elif [ "${roles[${ii}]}" == "ai" ]; then
provision-masterandminion
else
echo "unsupported role for ${i}. please check"
exit 1
fi
}
((ii=ii+1))
done
@ -353,7 +353,7 @@ function kube-up {
function provision-master() {
# copy the binaries and scripts to the ~/kube directory on the master
echo "Deploying master on machine ${MASTER_IP}"
echo
echo
ssh $SSH_OPTS $MASTER "mkdir -p ~/kube/default"
scp -r $SSH_OPTS ubuntu/config-default.sh ubuntu/util.sh ubuntu/master/* ubuntu/binaries/master/ "${MASTER}:~/kube"
@ -364,23 +364,24 @@ function provision-master() {
create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}"; \
create-kube-controller-manager-opts "${MINION_IPS}"; \
create-kube-scheduler-opts; \
create-flanneld-opts; \
sudo -p '[sudo] password to copy files and start master: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ \
&& sudo mkdir -p /opt/bin/ && sudo cp ~/kube/master/* /opt/bin/; \
sudo service etcd start;"
}
function provision-minion() {
# copy the binaries and scripts to the ~/kube directory on the minion
echo "Deploying minion on machine ${1#*@}"
echo
ssh $SSH_OPTS $1 "mkdir -p ~/kube/default"
scp -r $SSH_OPTS ubuntu/config-default.sh ubuntu/util.sh ubuntu/reconfDocker.sh ubuntu/minion/* ubuntu/binaries/minion "${1}:~/kube"
# copy the binaries and scripts to the ~/kube directory on the minion
echo "Deploying minion on machine ${1#*@}"
echo
ssh $SSH_OPTS $1 "mkdir -p ~/kube/default"
scp -r $SSH_OPTS ubuntu/config-default.sh ubuntu/util.sh ubuntu/reconfDocker.sh ubuntu/minion/* ubuntu/binaries/minion "${1}:~/kube"
# remote login to MASTER and use sudo to configue k8s master
ssh $SSH_OPTS -t $1 "source ~/kube/util.sh; \
# remote login to MASTER and use sudo to configue k8s master
ssh $SSH_OPTS -t $1 "source ~/kube/util.sh; \
setClusterInfo; \
create-etcd-opts "${mm[${1#*@}]}" "${1#*@}" "${CLUSTER}"; \
create-kubelet-opts "${1#*@}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}";
create-kubelet-opts "${1#*@}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}";
create-kube-proxy-opts "${MASTER_IP}"; \
create-flanneld-opts; \
sudo -p '[sudo] password to copy files and start minion: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ \
@ -392,10 +393,10 @@ function provision-minion() {
function provision-masterandminion() {
# copy the binaries and scripts to the ~/kube directory on the master
echo "Deploying master and minion on machine ${MASTER_IP}"
echo
echo
ssh $SSH_OPTS $MASTER "mkdir -p ~/kube/default"
scp -r $SSH_OPTS ubuntu/config-default.sh ubuntu/util.sh ubuntu/master/* ubuntu/reconfDocker.sh ubuntu/minion/* ubuntu/binaries/master/ ubuntu/binaries/minion "${MASTER}:~/kube"
# remote login to the node and use sudo to configue k8s
ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \
setClusterInfo; \
@ -403,7 +404,7 @@ function provision-masterandminion() {
create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}"; \
create-kube-controller-manager-opts "${MINION_IPS}"; \
create-kube-scheduler-opts; \
create-kubelet-opts "${MASTER_IP}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}";
create-kubelet-opts "${MASTER_IP}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}";
create-kube-proxy-opts "${MASTER_IP}";\
create-flanneld-opts; \
sudo -p '[sudo] password to copy files and start node: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ \
@ -418,10 +419,10 @@ function kube-down {
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}"
for i in ${nodes}; do
{
echo "Cleaning on node ${i#*@}"
ssh -t $i 'pgrep etcd && sudo -p "[sudo] password for cleaning etcd data: " service etcd stop && sudo rm -rf /infra*'
}
{
echo "Cleaning on node ${i#*@}"
ssh -t $i 'pgrep etcd && sudo -p "[sudo] password for cleaning etcd data: " service etcd stop && sudo rm -rf /infra*'
}
done
wait
}