deprecate etcd on node

This commit is contained in:
He Simei 2015-08-27 21:53:09 +08:00
parent e6e69e31ec
commit a4fc22c849
9 changed files with 89 additions and 209 deletions

View File

@ -51,7 +51,6 @@ if [ ! -f etcd.tar.gz ] ; then
tar xzf etcd.tar.gz tar xzf etcd.tar.gz
fi fi
cp $ETCD/etcd $ETCD/etcdctl binaries/master cp $ETCD/etcd $ETCD/etcdctl binaries/master
cp $ETCD/etcd $ETCD/etcdctl binaries/minion
# k8s # k8s
echo "Download kubernetes release ..." echo "Download kubernetes release ..."

View File

@ -1,31 +0,0 @@
description "Etcd service"
author "@jainvipin"
start on (net-device-up
and local-filesystems
and runlevel [2345])
respawn
pre-start script
# see also https://github.com/jainvipin/kubernetes-ubuntu-start
ETCD=/opt/bin/$UPSTART_JOB
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
if [ -f $ETCD ]; then
exit 0
fi
echo "$ETCD binary not found, exiting"
exit 22
end script
script
# modify these in /etc/default/$UPSTART_JOB (/etc/default/docker)
ETCD=/opt/bin/$UPSTART_JOB
ETCD_OPTS=""
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
exec "$ETCD" $ETCD_OPTS
end script

View File

@ -3,10 +3,9 @@ author "@chenxingyu"
respawn respawn
# start in conjunction with etcd start on (net-device-up
start on started etcd and local-filesystems
stop on stopping etcd and runlevel [2345])
pre-start script pre-start script
FLANNEL=/opt/bin/$UPSTART_JOB FLANNEL=/opt/bin/$UPSTART_JOB
if [ -f /etc/default/$UPSTART_JOB ]; then if [ -f /etc/default/$UPSTART_JOB ]; then

View File

@ -3,9 +3,9 @@ author "@jainvipin"
respawn respawn
# start in conjunction with etcd # start in conjunction with flanneld
start on started etcd start on started flanneld
stop on stopping etcd stop on stopping flanneld
limit nofile 65536 65536 limit nofile 65536 65536

View File

@ -3,9 +3,9 @@ author "@jainvipin"
respawn respawn
# start in conjunction with etcd # start in conjunction with flanneld
start on started etcd start on started flanneld
stop on stopping etcd stop on stopping flanneld
pre-start script pre-start script
# see also https://github.com/jainvipin/kubernetes-ubuntu-start # see also https://github.com/jainvipin/kubernetes-ubuntu-start

View File

@ -1,100 +0,0 @@
#!/bin/sh
set -e
### BEGIN INIT INFO
# Provides: etcd
# Required-Start: $docker
# Required-Stop:
# Should-Start:
# Should-Stop:
# Default-Start:
# Default-Stop:
# Short-Description: Start distrubted key/value pair service
# Description:
# http://www.github.com/coreos/etcd
### END INIT INFO
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/opt/bin:
BASE=$(basename $0)
# modify these in /etc/default/$BASE (/etc/default/etcd)
ETCD=/opt/bin/$BASE
# This is the pid file managed by etcd itself
ETCD_PIDFILE=/var/run/$BASE.pid
ETCD_LOGFILE=/var/log/$BASE.log
ETCD_OPTS=""
ETCD_DESC="Etcd"
# Get lsb functions
. /lib/lsb/init-functions
if [ -f /etc/default/$BASE ]; then
. /etc/default/$BASE
fi
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
if false && [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then
log_failure_msg "$ETCD_DESC is managed via upstart, try using service $BASE $1"
exit 1
fi
# Check etcd is present
if [ ! -x $ETCD ]; then
log_failure_msg "$ETCD not present or not executable"
exit 1
fi
fail_unless_root() {
if [ "$(id -u)" != '0' ]; then
log_failure_msg "$ETCD_DESC must be run as root"
exit 1
fi
}
ETCD_START="start-stop-daemon \
--start \
--background \
--quiet \
--exec $ETCD \
--make-pidfile \
--pidfile $ETCD_PIDFILE \
-- $ETCD_OPTS \
>> $ETCD_LOGFILE 2>&1"
ETCD_STOP="start-stop-daemon \
--stop \
--pidfile $ETCD_PIDFILE"
case "$1" in
start)
fail_unless_root
log_begin_msg "Starting $ETCD_DESC: $BASE"
$ETCD_START
log_end_msg $?
;;
stop)
fail_unless_root
log_begin_msg "Stopping $ETCD_DESC: $BASE"
$ETCD_STOP
log_end_msg $?
;;
restart | force-reload)
fail_unless_root
log_begin_msg "Restarting $ETCD_DESC: $BASE"
$ETCD_STOP
$ETCD_START
log_end_msg $?
;;
status)
status_of_proc -p "$ETCD_PIDFILE" "$ETCD" "$ETCD_DESC"
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 1
;;
esac

View File

@ -24,7 +24,7 @@ KUBELET=/opt/bin/$BASE
KUBELET_PIDFILE=/var/run/$BASE.pid KUBELET_PIDFILE=/var/run/$BASE.pid
KUBELET_LOGFILE=/var/log/$BASE.log KUBELET_LOGFILE=/var/log/$BASE.log
KUBELET_OPTS="" KUBELET_OPTS=""
KUBELET_DESC="Kube-Apiserver" KUBELET_DESC="Kubelet"
# Get lsb functions # Get lsb functions
. /lib/lsb/init-functions . /lib/lsb/init-functions

View File

@ -21,33 +21,48 @@ if [ "$(id -u)" != "0" ]; then
exit 1 exit 1
fi fi
source ~/kube/config-default.sh
attempt=0 function config_etcd {
while true; do
/opt/bin/etcdctl get /coreos.com/network/config source ~/kube/config-default.sh
if [[ "$?" == 0 ]]; then
break attempt=0
else while true; do
# enough timeout?? /opt/bin/etcdctl get /coreos.com/network/config
if (( attempt > 600 )); then if [[ "$?" == 0 ]]; then
echo "timeout for waiting network config" > ~/kube/err.log break
exit 2 else
# enough timeout??
if (( attempt > 600 )); then
echo "timeout for waiting network config" > ~/kube/err.log
exit 2
fi
/opt/bin/etcdctl mk /coreos.com/network/config "{\"Network\":\"${FLANNEL_NET}\"}"
attempt=$((attempt+1))
sleep 3
fi fi
done
}
/opt/bin/etcdctl mk /coreos.com/network/config "{\"Network\":\"${FLANNEL_NET}\"}" function restart_docker {
attempt=$((attempt+1)) #wait some secs for /run/flannel/subnet.env ready
sleep 3 sleep 15
fi sudo ip link set dev docker0 down
done sudo brctl delbr docker0
#wait some secs for /run/flannel/subnet.env ready source /run/flannel/subnet.env
sleep 15
sudo ip link set dev docker0 down
sudo brctl delbr docker0
source /run/flannel/subnet.env echo DOCKER_OPTS=\"${DOCKER_OPTS} -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock \
--bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU}\" > /etc/default/docker
sudo service docker restart
}
echo DOCKER_OPTS=\"${DOCKER_OPTS} -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock \ if [[ $1 == "i" ]]; then
--bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU}\" > /etc/default/docker restart_docker
sudo service docker restart elif [[ $1 == "ai" ]]; then
config_etcd
restart_docker
elif [[ $1 == "a" ]]; then
config_etcd
fi

View File

@ -21,7 +21,6 @@ SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=E
# use an array to record name and ip # use an array to record name and ip
declare -A mm declare -A mm
CLUSTER=""
MASTER="" MASTER=""
MASTER_IP="" MASTER_IP=""
MINION_IPS="" MINION_IPS=""
@ -44,28 +43,18 @@ function setClusterInfo() {
MINION_IPS="" MINION_IPS=""
ii=0 ii=0
for i in $nodes for i in $nodes; do
do
name="infra"$ii
nodeIP=${i#*@} nodeIP=${i#*@}
item="$name=http://$nodeIP:2380" if [[ "${roles[${ii}]}" == "ai" ]]; then
if [ "$ii" == 0 ]; then
CLUSTER=$item
else
CLUSTER="$CLUSTER,$item"
fi
mm[$nodeIP]=$name
if [ "${roles[${ii}]}" == "ai" ]; then
MASTER_IP=$nodeIP MASTER_IP=$nodeIP
MASTER=$i MASTER=$i
MINION_IPS="$nodeIP" MINION_IPS="$nodeIP"
elif [ "${roles[${ii}]}" == "a" ]; then elif [[ "${roles[${ii}]}" == "a" ]]; then
MASTER_IP=$nodeIP MASTER_IP=$nodeIP
MASTER=$i MASTER=$i
elif [ "${roles[${ii}]}" == "i" ]; then elif [[ "${roles[${ii}]}" == "i" ]]; then
if [ -z "${MINION_IPS}" ];then if [[ -z "${MINION_IPS}" ]];then
MINION_IPS="$nodeIP" MINION_IPS="$nodeIP"
else else
MINION_IPS="$MINION_IPS,$nodeIP" MINION_IPS="$MINION_IPS,$nodeIP"
@ -191,12 +180,9 @@ function verify-minion(){
function create-etcd-opts(){ function create-etcd-opts(){
cat <<EOF > ~/kube/default/etcd cat <<EOF > ~/kube/default/etcd
ETCD_OPTS="-name $1 \ ETCD_OPTS="-name infra
-initial-advertise-peer-urls http://$2:2380 \ -listen-client-urls http://0.0.0.0:4001 \
-listen-peer-urls http://$2:2380 \ -advertise-client-urls http://127.0.0.1:4001"
-initial-cluster-token etcd-cluster-1 \
-initial-cluster $3 \
-initial-cluster-state new"
EOF EOF
} }
@ -256,7 +242,7 @@ EOF
function create-flanneld-opts(){ function create-flanneld-opts(){
cat <<EOF > ~/kube/default/flanneld cat <<EOF > ~/kube/default/flanneld
FLANNEL_OPTS="" FLANNEL_OPTS="--etcd-endpoints=http://${1}:4001"
EOF EOF
} }
@ -324,10 +310,10 @@ function kube-up() {
{ {
if [ "${roles[${ii}]}" == "a" ]; then if [ "${roles[${ii}]}" == "a" ]; then
provision-master provision-master
elif [ "${roles[${ii}]}" == "i" ]; then
provision-minion $i
elif [ "${roles[${ii}]}" == "ai" ]; then elif [ "${roles[${ii}]}" == "ai" ]; then
provision-masterandminion provision-masterandminion
elif [ "${roles[${ii}]}" == "i" ]; then
provision-minion $i
else else
echo "unsupported role for ${i}. please check" echo "unsupported role for ${i}. please check"
exit 1 exit 1
@ -356,21 +342,22 @@ function provision-master() {
echo "Deploying master on machine ${MASTER_IP}" echo "Deploying master on machine ${MASTER_IP}"
echo echo
ssh $SSH_OPTS $MASTER "mkdir -p ~/kube/default" ssh $SSH_OPTS $MASTER "mkdir -p ~/kube/default"
scp -r $SSH_OPTS saltbase/salt/generate-cert/make-ca-cert.sh ubuntu/config-default.sh ubuntu/util.sh ubuntu/master/* ubuntu/binaries/master/ "${MASTER}:~/kube" scp -r $SSH_OPTS saltbase/salt/generate-cert/make-ca-cert.sh ubuntu/reconfDocker.sh ubuntu/config-default.sh ubuntu/util.sh ubuntu/master/* ubuntu/binaries/master/ "${MASTER}:~/kube"
# remote login to MASTER and use sudo to configue k8s master # remote login to MASTER and use sudo to configue k8s master
ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \ ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \
setClusterInfo; \ setClusterInfo; \
create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \ create-etcd-opts; \
create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}" "${ADMISSION_CONTROL}" "${SERVICE_NODE_PORT_RANGE}"; \ create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}" "${ADMISSION_CONTROL}" "${SERVICE_NODE_PORT_RANGE}"; \
create-kube-controller-manager-opts "${MINION_IPS}"; \ create-kube-controller-manager-opts "${MINION_IPS}"; \
create-kube-scheduler-opts; \ create-kube-scheduler-opts; \
create-flanneld-opts; \ create-flanneld-opts "127.0.0.1"; \
sudo -p '[sudo] password to copy files and start master: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ ;\ sudo -p '[sudo] password to copy files and start master: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ ;\
sudo groupadd -f -r kube-cert; \ sudo groupadd -f -r kube-cert; \
sudo ~/kube/make-ca-cert.sh ${MASTER_IP} IP:${MASTER_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local; \ sudo ~/kube/make-ca-cert.sh ${MASTER_IP} IP:${MASTER_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local; \
sudo mkdir -p /opt/bin/ && sudo cp ~/kube/master/* /opt/bin/; \ sudo mkdir -p /opt/bin/ && sudo cp ~/kube/master/* /opt/bin/; \
sudo service etcd start;" sudo service etcd start; \
sudo FLANNEL_NET=${FLANNEL_NET} -b ~/kube/reconfDocker.sh "a";"
} }
function provision-minion() { function provision-minion() {
@ -383,14 +370,13 @@ function provision-minion() {
# remote login to MASTER and use sudo to configue k8s master # remote login to MASTER and use sudo to configue k8s master
ssh $SSH_OPTS -t $1 "source ~/kube/util.sh; \ ssh $SSH_OPTS -t $1 "source ~/kube/util.sh; \
setClusterInfo; \ setClusterInfo; \
create-etcd-opts "${mm[${1#*@}]}" "${1#*@}" "${CLUSTER}"; \ create-kubelet-opts "${1#*@}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}"; \
create-kubelet-opts "${1#*@}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}";
create-kube-proxy-opts "${MASTER_IP}"; \ create-kube-proxy-opts "${MASTER_IP}"; \
create-flanneld-opts; \ create-flanneld-opts "${MASTER_IP}"; \
sudo -p '[sudo] password to copy files and start minion: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ \ sudo -p '[sudo] password to copy files and start minion: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ \
&& sudo mkdir -p /opt/bin/ && sudo cp ~/kube/minion/* /opt/bin; \ && sudo mkdir -p /opt/bin/ && sudo cp ~/kube/minion/* /opt/bin; \
sudo service etcd start; \ sudo service flanneld start; \
sudo FLANNEL_NET=${FLANNEL_NET} -b ~/kube/reconfDocker.sh" sudo -b ~/kube/reconfDocker.sh "i";"
} }
function provision-masterandminion() { function provision-masterandminion() {
@ -398,24 +384,25 @@ function provision-masterandminion() {
echo "Deploying master and minion on machine ${MASTER_IP}" echo "Deploying master and minion on machine ${MASTER_IP}"
echo echo
ssh $SSH_OPTS $MASTER "mkdir -p ~/kube/default" ssh $SSH_OPTS $MASTER "mkdir -p ~/kube/default"
scp -r $SSH_OPTS saltbase/salt/generate-cert/make-ca-cert.sh ubuntu/config-default.sh ubuntu/util.sh ubuntu/master/* ubuntu/reconfDocker.sh ubuntu/minion/* ubuntu/binaries/master/ ubuntu/binaries/minion "${MASTER}:~/kube" # scp order matters
scp -r $SSH_OPTS saltbase/salt/generate-cert/make-ca-cert.sh ubuntu/config-default.sh ubuntu/util.sh ubuntu/minion/* ubuntu/master/* ubuntu/reconfDocker.sh ubuntu/binaries/master/ ubuntu/binaries/minion "${MASTER}:~/kube"
# remote login to the node and use sudo to configue k8s # remote login to the node and use sudo to configue k8s
ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \ ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \
setClusterInfo; \ setClusterInfo; \
create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \ create-etcd-opts; \
create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}" "${ADMISSION_CONTROL}" "${SERVICE_NODE_PORT_RANGE}"; \ create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}" "${ADMISSION_CONTROL}" "${SERVICE_NODE_PORT_RANGE}"; \
create-kube-controller-manager-opts "${MINION_IPS}"; \ create-kube-controller-manager-opts "${MINION_IPS}"; \
create-kube-scheduler-opts; \ create-kube-scheduler-opts; \
create-kubelet-opts "${MASTER_IP}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}"; create-kubelet-opts "${MASTER_IP}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}";
create-kube-proxy-opts "${MASTER_IP}";\ create-kube-proxy-opts "${MASTER_IP}";\
create-flanneld-opts; \ create-flanneld-opts "127.0.0.1"; \
sudo -p '[sudo] password to copy files and start node: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ ; \ sudo -p '[sudo] password to copy files and start node: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ ; \
sudo groupadd -f -r kube-cert; \ sudo groupadd -f -r kube-cert; \
sudo ~/kube/make-ca-cert.sh ${MASTER_IP} IP:${MASTER_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local; \ sudo ~/kube/make-ca-cert.sh ${MASTER_IP} IP:${MASTER_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local; \
sudo mkdir -p /opt/bin/ && sudo cp ~/kube/master/* /opt/bin/ && sudo cp ~/kube/minion/* /opt/bin/; \ sudo mkdir -p /opt/bin/ && sudo cp ~/kube/master/* /opt/bin/ && sudo cp ~/kube/minion/* /opt/bin/; \
sudo service etcd start; \ sudo service etcd start; \
sudo FLANNEL_NET=${FLANNEL_NET} -b ~/kube/reconfDocker.sh" sudo FLANNEL_NET=${FLANNEL_NET} -b ~/kube/reconfDocker.sh "ai";"
} }
# Delete a kubernetes cluster # Delete a kubernetes cluster
@ -423,15 +410,26 @@ function kube-down {
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}" source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}"
ii=0
for i in ${nodes}; do for i in ${nodes}; do
{ {
echo "Cleaning on node ${i#*@}" echo "Cleaning on node ${i#*@}"
ssh -t $i 'pgrep etcd && sudo -p "[sudo] password for cleaning etcd data: " service etcd stop && sudo rm -rf /infra*' if [[ "${roles[${ii}]}" == "ai" || "${roles[${ii}]}" == "a" ]]; then
# Delete the files in order to generate a clean environment, so you can change each node's role at next deployment. ssh -t $i 'pgrep etcd && sudo -p "[sudo] password for cleaning etcd data: " service etcd stop && sudo rm -rf /infra*;
ssh -t $i 'sudo rm -f /opt/bin/kube* /etc/init/kube* /etc/init.d/kube* /etc/default/kube*; sudo rm -rf ~/kube /var/lib/kubelet' sudo rm -rf /opt/bin/etcd* /etc/init/etcd.conf /etc/init.d/etcd /etc/default/etcd'
elif [[ "${roles[${ii}]}" == "i" ]]; then
ssh -t $i 'pgrep flanneld && sudo -p "[sudo] password for stopping flanneld: " service flanneld stop'
else
echo "unsupported role for ${i}"
fi
# Delete the files in order to generate a clean environment, so you can change each node's role at next deployment.
ssh -t $i 'sudo rm -f /opt/bin/kube* /opt/bin/flanneld;
sudo rm -rf /etc/init/kube* /etc/init/flanneld.conf /etc/init.d/kube* /etc/init.d/flanneld;
sudo rm -rf /etc/default/kube* /etc/default/flanneld;
sudo rm -rf ~/kube /var/lib/kubelet'
} }
((ii=ii+1))
done done
wait
} }
# Update a kubernetes cluster with latest source # Update a kubernetes cluster with latest source