keep kubeproxy hostname consistent with kubelet

This commit is contained in:
jiangyaoguo 2015-11-05 11:41:07 +08:00
parent 70d2a02959
commit a739fc44c4
8 changed files with 71 additions and 56 deletions

View File

@ -93,7 +93,8 @@ coreos:
--master=https://${MASTER_IP} \ --master=https://${MASTER_IP} \
--kubeconfig=/var/lib/kube-proxy/kubeconfig \ --kubeconfig=/var/lib/kube-proxy/kubeconfig \
--v=2 \ --v=2 \
--logtostderr=true --logtostderr=true \
--hostname-override=${HOSTNAME_OVERRIDE}
Restart=always Restart=always
RestartSec=10 RestartSec=10

View File

@ -48,9 +48,9 @@ EOF
KUBE_PROXY_OPTS=" \${KUBE_LOGTOSTDERR} \\ KUBE_PROXY_OPTS=" \${KUBE_LOGTOSTDERR} \\
\${KUBE_LOG_LEVEL} \\ \${KUBE_LOG_LEVEL} \\
\${NODE_ADDRESS} \\ \${NODE_ADDRESS} \\
\${NODE_PORT} \\ \${NODE_PORT} \\
\${NODE_HOSTNAME} \\ \${NODE_HOSTNAME} \\
\${KUBELET_API_SERVER} \\ \${KUBELET_API_SERVER} \\
\${KUBE_ALLOW_PRIV} \\ \${KUBE_ALLOW_PRIV} \\
\${KUBELET_ARGS}" \${KUBELET_ARGS}"

View File

@ -16,6 +16,7 @@
MASTER_ADDRESS=${1:-"8.8.8.18"} MASTER_ADDRESS=${1:-"8.8.8.18"}
NODE_ADDRESS=${2:-"8.8.8.20"}
cat <<EOF >/opt/kubernetes/cfg/kube-proxy cat <<EOF >/opt/kubernetes/cfg/kube-proxy
# --logtostderr=true: log to standard error instead of files # --logtostderr=true: log to standard error instead of files
@ -24,12 +25,16 @@ KUBE_LOGTOSTDERR="--logtostderr=true"
# --v=0: log level for V logs # --v=0: log level for V logs
KUBE_LOG_LEVEL="--v=4" KUBE_LOG_LEVEL="--v=4"
# --hostname-override="": If non-empty, will use this string as identification instead of the actual hostname.
NODE_HOSTNAME="--hostname-override=${NODE_ADDRESS}"
# --master="": The address of the Kubernetes API server (overrides any value in kubeconfig) # --master="": The address of the Kubernetes API server (overrides any value in kubeconfig)
KUBE_MASTER="--master=http://${MASTER_ADDRESS}:8080" KUBE_MASTER="--master=http://${MASTER_ADDRESS}:8080"
EOF EOF
KUBE_PROXY_OPTS=" \${KUBE_LOGTOSTDERR} \\ KUBE_PROXY_OPTS=" \${KUBE_LOGTOSTDERR} \\
\${KUBE_LOG_LEVEL} \\ \${KUBE_LOG_LEVEL} \\
\${NODE_HOSTNAME} \\
\${KUBE_MASTER}" \${KUBE_MASTER}"
cat <<EOF >/usr/lib/systemd/system/kube-proxy.service cat <<EOF >/usr/lib/systemd/system/kube-proxy.service

View File

@ -9,4 +9,5 @@ kill timeout 60 # wait 60s between SIGTERM and SIGKILL.
exec /usr/local/bin/proxy \ exec /usr/local/bin/proxy \
--master=%(kubeapi_server)s \ --master=%(kubeapi_server)s \
--logtostderr=true --logtostderr=true \
--hostname-override=%(kubelet_bind_addr)s

View File

@ -37,7 +37,8 @@ coreos:
[Service] [Service]
ExecStart=/opt/kubernetes/bin/kube-proxy \ ExecStart=/opt/kubernetes/bin/kube-proxy \
--master=http://${MASTER_IP}:8080 --master=http://${MASTER_IP}:8080 \
--hostname-override=${NODE_IPS[$i]}
Restart=always Restart=always
RestartSec=2 RestartSec=2

View File

@ -179,6 +179,7 @@ coreos:
--bind-address=$private_ipv4 \ --bind-address=$private_ipv4 \
--kubeconfig=/var/lib/kube-proxy/kubeconfig \ --kubeconfig=/var/lib/kube-proxy/kubeconfig \
--logtostderr=true \ --logtostderr=true \
--hostname-override=$private_ipv4 \
--master=${FIRST_APISERVER_URL} --master=${FIRST_APISERVER_URL}
Restart=always Restart=always
RestartSec=5 RestartSec=5

View File

@ -34,12 +34,12 @@ function test-build-release() {
# From user input set the necessary k8s and etcd configuration information # From user input set the necessary k8s and etcd configuration information
function setClusterInfo() { function setClusterInfo() {
# Initialize NODE_IPS in setClusterInfo function # Initialize NODE_IPS in setClusterInfo function
# NODE_IPS is defined as a global variable, and is concatenated with other nodeIP # NODE_IPS is defined as a global variable, and is concatenated with other nodeIP
# When setClusterInfo is called for many times, this could cause potential problems # When setClusterInfo is called for many times, this could cause potential problems
# Such as, you will have NODE_IPS=192.168.0.2,192.168.0.3,192.168.0.2,192.168.0.3, # Such as, you will have NODE_IPS=192.168.0.2,192.168.0.3,192.168.0.2,192.168.0.3,
# which is obviously wrong. # which is obviously wrong.
NODE_IPS="" NODE_IPS=""
local ii=0 local ii=0
for i in $nodes; do for i in $nodes; do
nodeIP=${i#*@} nodeIP=${i#*@}
@ -246,7 +246,8 @@ EOF
function create-kube-proxy-opts() { function create-kube-proxy-opts() {
cat <<EOF > ~/kube/default/kube-proxy cat <<EOF > ~/kube/default/kube-proxy
KUBE_PROXY_OPTS="\ KUBE_PROXY_OPTS="\
--master=http://${1}:8080 \ --hostname-override=${1} \
--master=http://${2}:8080 \
--logtostderr=true" --logtostderr=true"
EOF EOF
@ -267,7 +268,7 @@ EOF
function detect-master() { function detect-master() {
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE:-config-default.sh}" source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE:-config-default.sh}"
setClusterInfo setClusterInfo
export KUBE_MASTER="${MASTER}" export KUBE_MASTER="${MASTER}"
export KUBE_MASTER_IP="${MASTER_IP}" export KUBE_MASTER_IP="${MASTER_IP}"
echo "Using master ${MASTER_IP}" echo "Using master ${MASTER_IP}"
} }
@ -348,7 +349,7 @@ function kube-up() {
} }
function provision-master() { function provision-master() {
echo -e "\nDeploying master on machine ${MASTER_IP}" echo -e "\nDeploying master on machine ${MASTER_IP}"
ssh $SSH_OPTS "$MASTER" "mkdir -p ~/kube/default" ssh $SSH_OPTS "$MASTER" "mkdir -p ~/kube/default"
@ -371,7 +372,7 @@ function provision-master() {
DNS:kubernetes.default.svc DNS:kubernetes.default.svc
DNS:kubernetes.default.svc.cluster.local DNS:kubernetes.default.svc.cluster.local
) )
EXTRA_SANS=$(echo "${EXTRA_SANS[@]}" | tr ' ' ,) EXTRA_SANS=$(echo "${EXTRA_SANS[@]}" | tr ' ' ,)
# remote login to MASTER and configue k8s master # remote login to MASTER and configue k8s master
@ -388,10 +389,10 @@ function provision-master() {
create-kube-scheduler-opts create-kube-scheduler-opts
create-flanneld-opts '127.0.0.1' create-flanneld-opts '127.0.0.1'
sudo -E -p '[sudo] password to start master: ' -- /bin/bash -c ' sudo -E -p '[sudo] password to start master: ' -- /bin/bash -c '
cp ~/kube/default/* /etc/default/ cp ~/kube/default/* /etc/default/
cp ~/kube/init_conf/* /etc/init/ cp ~/kube/init_conf/* /etc/init/
cp ~/kube/init_scripts/* /etc/init.d/ cp ~/kube/init_scripts/* /etc/init.d/
groupadd -f -r kube-cert groupadd -f -r kube-cert
${PROXY_SETTING} ~/kube/make-ca-cert.sh \"${MASTER_IP}\" \"${EXTRA_SANS}\" ${PROXY_SETTING} ~/kube/make-ca-cert.sh \"${MASTER_IP}\" \"${EXTRA_SANS}\"
mkdir -p /opt/bin/ mkdir -p /opt/bin/
@ -402,10 +403,10 @@ function provision-master() {
echo "Deploying master on machine ${MASTER_IP} failed" echo "Deploying master on machine ${MASTER_IP} failed"
exit 1 exit 1
} }
} }
function provision-node() { function provision-node() {
echo -e "\nDeploying node on machine ${1#*@}" echo -e "\nDeploying node on machine ${1#*@}"
ssh $SSH_OPTS $1 "mkdir -p ~/kube/default" ssh $SSH_OPTS $1 "mkdir -p ~/kube/default"
@ -422,21 +423,23 @@ function provision-node() {
# remote login to node and configue k8s node # remote login to node and configue k8s node
ssh $SSH_OPTS -t "$1" " ssh $SSH_OPTS -t "$1" "
source ~/kube/util.sh source ~/kube/util.sh
setClusterInfo setClusterInfo
create-kubelet-opts \ create-kubelet-opts \
'${1#*@}' \ '${1#*@}' \
'${MASTER_IP}' \ '${MASTER_IP}' \
'${DNS_SERVER_IP}' \ '${DNS_SERVER_IP}' \
'${DNS_DOMAIN}' '${DNS_DOMAIN}'
create-kube-proxy-opts '${MASTER_IP}' create-kube-proxy-opts \
'${1#*@}' \
'${MASTER_IP}'
create-flanneld-opts '${MASTER_IP}' create-flanneld-opts '${MASTER_IP}'
sudo -E -p '[sudo] password to start node: ' -- /bin/bash -c ' sudo -E -p '[sudo] password to start node: ' -- /bin/bash -c '
cp ~/kube/default/* /etc/default/ cp ~/kube/default/* /etc/default/
cp ~/kube/init_conf/* /etc/init/ cp ~/kube/init_conf/* /etc/init/
cp ~/kube/init_scripts/* /etc/init.d/ cp ~/kube/init_scripts/* /etc/init.d/
mkdir -p /opt/bin/ mkdir -p /opt/bin/
cp ~/kube/minion/* /opt/bin cp ~/kube/minion/* /opt/bin
service flanneld start service flanneld start
~/kube/reconfDocker.sh i ~/kube/reconfDocker.sh i
@ -447,7 +450,7 @@ function provision-node() {
} }
function provision-masterandnode() { function provision-masterandnode() {
echo -e "\nDeploying master and node on machine ${MASTER_IP}" echo -e "\nDeploying master and node on machine ${MASTER_IP}"
ssh $SSH_OPTS $MASTER "mkdir -p ~/kube/default" ssh $SSH_OPTS $MASTER "mkdir -p ~/kube/default"
@ -464,7 +467,7 @@ function provision-masterandnode() {
ubuntu/binaries/master/ \ ubuntu/binaries/master/ \
ubuntu/binaries/minion \ ubuntu/binaries/minion \
"${MASTER}:~/kube" "${MASTER}:~/kube"
EXTRA_SANS=( EXTRA_SANS=(
IP:${MASTER_IP} IP:${MASTER_IP}
IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1 IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1
@ -473,13 +476,13 @@ function provision-masterandnode() {
DNS:kubernetes.default.svc DNS:kubernetes.default.svc
DNS:kubernetes.default.svc.cluster.local DNS:kubernetes.default.svc.cluster.local
) )
EXTRA_SANS=$(echo "${EXTRA_SANS[@]}" | tr ' ' ,) EXTRA_SANS=$(echo "${EXTRA_SANS[@]}" | tr ' ' ,)
# remote login to the master/node and configue k8s # remote login to the master/node and configue k8s
ssh $SSH_OPTS -t "$MASTER" " ssh $SSH_OPTS -t "$MASTER" "
source ~/kube/util.sh source ~/kube/util.sh
setClusterInfo setClusterInfo
create-etcd-opts '${MASTER_IP}' create-etcd-opts '${MASTER_IP}'
create-kube-apiserver-opts \ create-kube-apiserver-opts \
@ -493,17 +496,19 @@ function provision-masterandnode() {
'${MASTER_IP}' \ '${MASTER_IP}' \
'${DNS_SERVER_IP}' \ '${DNS_SERVER_IP}' \
'${DNS_DOMAIN}' '${DNS_DOMAIN}'
create-kube-proxy-opts '${MASTER_IP}' create-kube-proxy-opts \
'${MASTER_IP}' \
'${MASTER_IP}'
create-flanneld-opts '127.0.0.1' create-flanneld-opts '127.0.0.1'
sudo -E -p '[sudo] password to start master: ' -- /bin/bash -c ' sudo -E -p '[sudo] password to start master: ' -- /bin/bash -c '
cp ~/kube/default/* /etc/default/ cp ~/kube/default/* /etc/default/
cp ~/kube/init_conf/* /etc/init/ cp ~/kube/init_conf/* /etc/init/
cp ~/kube/init_scripts/* /etc/init.d/ cp ~/kube/init_scripts/* /etc/init.d/
groupadd -f -r kube-cert groupadd -f -r kube-cert
${PROXY_SETTING} ~/kube/make-ca-cert.sh \"${MASTER_IP}\" \"${EXTRA_SANS}\" ${PROXY_SETTING} ~/kube/make-ca-cert.sh \"${MASTER_IP}\" \"${EXTRA_SANS}\"
mkdir -p /opt/bin/ mkdir -p /opt/bin/
cp ~/kube/master/* /opt/bin/ cp ~/kube/master/* /opt/bin/
cp ~/kube/minion/* /opt/bin/ cp ~/kube/minion/* /opt/bin/
@ -512,7 +517,7 @@ function provision-masterandnode() {
'" || { '" || {
echo "Deploying master and node on machine ${MASTER_IP} failed" echo "Deploying master and node on machine ${MASTER_IP} failed"
exit 1 exit 1
} }
} }
# check whether kubelet has torn down all of the pods # check whether kubelet has torn down all of the pods
@ -531,14 +536,14 @@ function check-pods-torn-down() {
# Delete a kubernetes cluster # Delete a kubernetes cluster
function kube-down() { function kube-down() {
export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl" export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl"
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE:-config-default.sh}" source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE:-config-default.sh}"
source "${KUBE_ROOT}/cluster/common.sh" source "${KUBE_ROOT}/cluster/common.sh"
tear_down_alive_resources tear_down_alive_resources
check-pods-torn-down check-pods-torn-down
local ii=0 local ii=0
for i in ${nodes}; do for i in ${nodes}; do
if [[ "${roles[${ii}]}" == "ai" || "${roles[${ii}]}" == "a" ]]; then if [[ "${roles[${ii}]}" == "ai" || "${roles[${ii}]}" == "a" ]]; then
@ -553,7 +558,7 @@ function kube-down() {
/etc/init/etcd.conf \ /etc/init/etcd.conf \
/etc/init.d/etcd \ /etc/init.d/etcd \
/etc/default/etcd /etc/default/etcd
rm -rf /infra* rm -rf /infra*
rm -rf /srv/kubernetes rm -rf /srv/kubernetes
' '
@ -562,20 +567,20 @@ function kube-down() {
if [[ "${roles[${ii}]}" == "ai" ]]; then if [[ "${roles[${ii}]}" == "ai" ]]; then
ssh $SSH_OPTS -t "$i" "sudo rm -rf /var/lib/kubelet" ssh $SSH_OPTS -t "$i" "sudo rm -rf /var/lib/kubelet"
fi fi
elif [[ "${roles[${ii}]}" == "i" ]]; then elif [[ "${roles[${ii}]}" == "i" ]]; then
echo "Cleaning on node ${i#*@}" echo "Cleaning on node ${i#*@}"
ssh $SSH_OPTS -t "$i" " ssh $SSH_OPTS -t "$i" "
pgrep flanneld && \ pgrep flanneld && \
sudo -p '[sudo] password to stop node: ' -- /bin/bash -c ' sudo -p '[sudo] password to stop node: ' -- /bin/bash -c '
service flanneld stop service flanneld stop
rm -rf /var/lib/kubelet rm -rf /var/lib/kubelet
' '
" || echo "Cleaning on node ${i#*@} failed" " || echo "Cleaning on node ${i#*@} failed"
else else
echo "unsupported role for ${i}" echo "unsupported role for ${i}"
fi fi
ssh $SSH_OPTS -t "$i" "sudo -- /bin/bash -c ' ssh $SSH_OPTS -t "$i" "sudo -- /bin/bash -c '
rm -f \ rm -f \
/opt/bin/kube* \ /opt/bin/kube* \
@ -586,7 +591,7 @@ function kube-down() {
/etc/init.d/flanneld \ /etc/init.d/flanneld \
/etc/default/kube* \ /etc/default/kube* \
/etc/default/flanneld /etc/default/flanneld
rm -rf ~/kube rm -rf ~/kube
rm -f /run/flannel/subnet.env rm -f /run/flannel/subnet.env
'" || echo "cleaning legacy files on ${i#*@} failed" '" || echo "cleaning legacy files on ${i#*@} failed"
@ -599,16 +604,16 @@ function kube-down() {
function prepare-push() { function prepare-push() {
# Use local binaries for kube-push # Use local binaries for kube-push
if [[ -z "${KUBE_VERSION}" ]]; then if [[ -z "${KUBE_VERSION}" ]]; then
echo "Use local binaries for kube-push" echo "Use local binaries for kube-push"
if [[ ! -d "${KUBE_ROOT}/cluster/ubuntu/binaries" ]]; then if [[ ! -d "${KUBE_ROOT}/cluster/ubuntu/binaries" ]]; then
echo "No local binaries.Please check" echo "No local binaries.Please check"
exit 1 exit 1
else else
echo "Please make sure all the required local binaries are prepared ahead" echo "Please make sure all the required local binaries are prepared ahead"
sleep 3 sleep 3
fi fi
else else
# Run download-release.sh to get the required release # Run download-release.sh to get the required release
export KUBE_VERSION export KUBE_VERSION
"${KUBE_ROOT}/cluster/ubuntu/download-release.sh" "${KUBE_ROOT}/cluster/ubuntu/download-release.sh"
fi fi
@ -617,13 +622,13 @@ function prepare-push() {
# Update a kubernetes master with expected release # Update a kubernetes master with expected release
function push-master() { function push-master() {
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE:-config-default.sh}" source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE:-config-default.sh}"
if [[ ! -f "${KUBE_ROOT}/cluster/ubuntu/binaries/master/kube-apiserver" ]]; then if [[ ! -f "${KUBE_ROOT}/cluster/ubuntu/binaries/master/kube-apiserver" ]]; then
echo "There is no required release of kubernetes, please check first" echo "There is no required release of kubernetes, please check first"
exit 1 exit 1
fi fi
export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl" export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl"
setClusterInfo setClusterInfo
local ii=0 local ii=0
@ -651,8 +656,8 @@ function push-master() {
rm -f /run/flannel/subnet.env rm -f /run/flannel/subnet.env
rm -rf ~/kube rm -rf ~/kube
'" || echo "Cleaning master ${i#*@} failed" '" || echo "Cleaning master ${i#*@} failed"
fi fi
if [[ "${roles[${ii}]}" == "a" ]]; then if [[ "${roles[${ii}]}" == "a" ]]; then
provision-master provision-master
elif [[ "${roles[${ii}]}" == "ai" ]]; then elif [[ "${roles[${ii}]}" == "ai" ]]; then
@ -679,9 +684,9 @@ function push-node() {
fi fi
export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl" export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl"
setClusterInfo setClusterInfo
local node_ip=${1} local node_ip=${1}
local ii=0 local ii=0
local existing=false local existing=false
@ -726,12 +731,12 @@ function push-node() {
echo "node ${node_ip} does not exist" echo "node ${node_ip} does not exist"
else else
verify-cluster verify-cluster
fi fi
} }
# Update a kubernetes cluster with expected source # Update a kubernetes cluster with expected source
function kube-push() { function kube-push() {
prepare-push prepare-push
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE:-config-default.sh}" source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE:-config-default.sh}"
@ -739,9 +744,9 @@ function kube-push() {
echo "There is no required release of kubernetes, please check first" echo "There is no required release of kubernetes, please check first"
exit 1 exit 1
fi fi
export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl" export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl"
#stop all the kube's process & etcd #stop all the kube's process & etcd
local ii=0 local ii=0
for i in ${nodes}; do for i in ${nodes}; do
if [[ "${roles[${ii}]}" == "ai" || "${roles[${ii}]}" == "a" ]]; then if [[ "${roles[${ii}]}" == "ai" || "${roles[${ii}]}" == "a" ]]; then

View File

@ -306,6 +306,7 @@ function start_kubeproxy {
PROXY_LOG=/tmp/kube-proxy.log PROXY_LOG=/tmp/kube-proxy.log
sudo -E "${GO_OUT}/kube-proxy" \ sudo -E "${GO_OUT}/kube-proxy" \
--v=${LOG_LEVEL} \ --v=${LOG_LEVEL} \
--hostname-override="127.0.0.1" \
--master="http://${API_HOST}:${API_PORT}" >"${PROXY_LOG}" 2>&1 & --master="http://${API_HOST}:${API_PORT}" >"${PROXY_LOG}" 2>&1 &
PROXY_PID=$! PROXY_PID=$!