Merge pull request #34002 from xiaoping378/fix-broken-cluster/centos4

Automatic merge from submit-queue

fix broken cluster/centos and enhance the style

replace the PR #33818
This commit is contained in:
Kubernetes Submit Queue 2016-12-09 17:50:30 -08:00 committed by GitHub
commit c2aaac9178
13 changed files with 191 additions and 28 deletions

View File

@ -65,8 +65,8 @@ function unpack-releases() {
# flannel
if [[ -f ${RELEASES_DIR}/flannel.tar.gz ]] ; then
tar xzf ${RELEASES_DIR}/flannel.tar.gz -C ${RELEASES_DIR}
cp ${RELEASES_DIR}/flannel-${FLANNEL_VERSION}/flanneld ${BINARY_DIR}/master/bin
cp ${RELEASES_DIR}/flannel-${FLANNEL_VERSION}/flanneld ${BINARY_DIR}/node/bin
cp ${RELEASES_DIR}/flanneld ${BINARY_DIR}/master/bin
cp ${RELEASES_DIR}/flanneld ${BINARY_DIR}/node/bin
fi
# ectd
@ -95,8 +95,8 @@ function unpack-releases() {
fi
# docker
if [[ -f ${RELEASES_DIR}/docker.tar.gz ]]; then
tar xzf ${RELEASES_DIR}/docker.tar.gz -C ${RELEASES_DIR}
if [[ -f ${RELEASES_DIR}/docker.tgz ]]; then
tar xzf ${RELEASES_DIR}/docker.tgz -C ${RELEASES_DIR}
cp ${RELEASES_DIR}/docker/docker* ${BINARY_DIR}/node/bin
fi

View File

@ -20,22 +20,22 @@
RELEASES_DIR=${RELEASES_DIR:-/tmp/downloads}
# Define docker version to use.
DOCKER_VERSION=${DOCKER_VERSION:-"1.12.0"}
DOCKER_VERSION=${DOCKER_VERSION:-"1.12.1"}
# Define flannel version to use.
FLANNEL_VERSION=${FLANNEL_VERSION:-"0.5.5"}
FLANNEL_VERSION=${FLANNEL_VERSION:-"0.6.1"}
# Define etcd version to use.
ETCD_VERSION=${ETCD_VERSION:-"2.2.1"}
ETCD_VERSION=${ETCD_VERSION:-"3.0.9"}
# Define k8s version to use.
K8S_VERSION=${K8S_VERSION:-"1.3.5"}
K8S_VERSION=${K8S_VERSION:-"1.3.7"}
DOCKER_DOWNLOAD_URL=\
"https://get.docker.com/builds/Linux/x86_64/docker-${DOCKER_VERSION}.tgz"
FLANNEL_DOWNLOAD_URL=\
"https://github.com/coreos/flannel/releases/download/v${FLANNEL_VERSION}/flannel-${FLANNEL_VERSION}-linux-amd64.tar.gz"
"https://github.com/coreos/flannel/releases/download/v${FLANNEL_VERSION}/flannel-v${FLANNEL_VERSION}-linux-amd64.tar.gz"
ETCD_DOWNLOAD_URL=\
"https://github.com/coreos/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz"

View File

@ -37,12 +37,22 @@ export ETCD_SERVERS=${ETCD_SERVERS:-"http://$MASTER_IP:2379"}
# according to rfc 1918 ref: https://tools.ietf.org/html/rfc1918 choose a private ip range here.
export SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-"192.168.3.0/24"}
# Optional: Install cluster DNS.
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
# DNS_SERVER_IP must be a IP in SERVICE_CLUSTER_IP_RANGE
DNS_SERVER_IP=${DNS_SERVER_IP:-"192.168.3.100"}
DNS_DOMAIN=${DNS_DOMAIN:-"cluster.local"}
DNS_REPLICAS=${DNS_REPLICAS:-1}
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
# define the IP range used for flannel overlay network, should not conflict with above SERVICE_CLUSTER_IP_RANGE
export FLANNEL_NET=${FLANNEL_NET:-"172.16.0.0/16"}
# Admission Controllers to invoke prior to persisting objects in cluster
# If we included ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely.
export ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,ServiceAccount,SecurityContextDeny,DefaultStorageClass,ResourceQuota
export ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota
# Extra options to set on the Docker command line.
# This is useful for setting --insecure-registry for local registries.

73
cluster/centos/deployAddons.sh Executable file
View File

@ -0,0 +1,73 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# deploy the add-on services after the cluster is available
set -e
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "config-default.sh"
KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh"
export KUBECTL_PATH="${KUBE_ROOT}/cluster/centos/binaries/kubectl"
export KUBE_CONFIG_FILE=${KUBE_CONFIG_FILE:-${KUBE_ROOT}/cluster/centos/config-default.sh}
function deploy_dns {
echo "Deploying DNS on Kubernetes"
sed -e "s/\\\$DNS_REPLICAS/${DNS_REPLICAS}/g;s/\\\$DNS_DOMAIN/${DNS_DOMAIN}/g;" "${KUBE_ROOT}/cluster/addons/dns/skydns-rc.yaml.sed" > skydns-rc.yaml
sed -e "s/\\\$DNS_SERVER_IP/${DNS_SERVER_IP}/g" "${KUBE_ROOT}/cluster/addons/dns/skydns-svc.yaml.sed" > skydns-svc.yaml
KUBEDNS=`eval "${KUBECTL} get services --namespace=kube-system | grep kube-dns | cat"`
if [ ! "$KUBEDNS" ]; then
# use kubectl to create skydns rc and service
${KUBECTL} --namespace=kube-system create -f skydns-rc.yaml
${KUBECTL} --namespace=kube-system create -f skydns-svc.yaml
echo "Kube-dns rc and service is successfully deployed."
else
echo "Kube-dns rc and service is already deployed. Skipping."
fi
echo
}
function deploy_dashboard {
if ${KUBECTL} get rc -l k8s-app=kubernetes-dashboard --namespace=kube-system | grep kubernetes-dashboard-v &> /dev/null; then
echo "Kubernetes Dashboard replicationController already exists"
else
echo "Creating Kubernetes Dashboard replicationController"
${KUBECTL} create -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-controller.yaml
fi
if ${KUBECTL} get service/kubernetes-dashboard --namespace=kube-system &> /dev/null; then
echo "Kubernetes Dashboard service already exists"
else
echo "Creating Kubernetes Dashboard service"
${KUBECTL} create -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-service.yaml
fi
echo
}
if [ "${ENABLE_CLUSTER_DNS}" == true ]; then
deploy_dns
fi
if [ "${ENABLE_CLUSTER_UI}" == true ]; then
deploy_dashboard
fi

View File

@ -27,7 +27,7 @@ KUBE_LOGTOSTDERR="--logtostderr=true"
# --v=0: log level for V logs
KUBE_LOG_LEVEL="--v=4"
# --etcd-servers=[]: List of etcd servers to watch (http://ip:port),
# --etcd-servers=[]: List of etcd servers to watch (http://ip:port),
# comma separated. Mutually exclusive with -etcd-config
KUBE_ETCD_SERVERS="--etcd-servers=${ETCD_SERVERS}"
@ -40,21 +40,21 @@ KUBE_API_PORT="--insecure-port=8080"
# --kubelet-port=10250: Kubelet port
NODE_PORT="--kubelet-port=10250"
# --advertise-address=<nil>: The IP address on which to advertise
# --advertise-address=<nil>: The IP address on which to advertise
# the apiserver to members of the cluster.
KUBE_ADVERTISE_ADDR="--advertise-address=${MASTER_ADDRESS}"
# --allow-privileged=false: If true, allow privileged containers.
KUBE_ALLOW_PRIV="--allow-privileged=false"
# --service-cluster-ip-range=<nil>: A CIDR notation IP range from which to assign service cluster IPs.
# --service-cluster-ip-range=<nil>: A CIDR notation IP range from which to assign service cluster IPs.
# This must not overlap with any IP ranges assigned to nodes for pods.
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
# --admission-control="AlwaysAdmit": Ordered list of plug-ins
# to do admission control of resources into cluster.
# Comma-delimited list of:
# LimitRanger, AlwaysDeny, SecurityContextDeny, NamespaceExists,
# --admission-control="AlwaysAdmit": Ordered list of plug-ins
# to do admission control of resources into cluster.
# Comma-delimited list of:
# LimitRanger, AlwaysDeny, SecurityContextDeny, NamespaceExists,
# NamespaceLifecycle, NamespaceAutoProvision,
# AlwaysAdmit, ServiceAccount, ResourceQuota, DefaultStorageClass
KUBE_ADMISSION_CONTROL="--admission-control=${ADMISSION_CONTROL}"
@ -105,4 +105,4 @@ EOF
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver
systemctl restart kube-apiserver

View File

@ -53,4 +53,4 @@ EOF
systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
systemctl restart kube-controller-manager

View File

@ -35,7 +35,7 @@ ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
#
#[cluster]
#ETCD_INITIAL_ADVERTISE_PEER_URLS="http://localhost:2380,http://localhost:7001"
# if you use different ETCD_NAME (e.g. test),
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
#ETCD_INITIAL_CLUSTER="default=http://localhost:2380,default=http://localhost:7001"
#ETCD_INITIAL_CLUSTER_STATE="new"
@ -76,4 +76,4 @@ EOF
systemctl daemon-reload
systemctl enable etcd
systemctl start etcd
systemctl restart etcd

View File

@ -0,0 +1,65 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ETCD_SERVERS=${1:-"http://8.8.8.18:4001"}
FLANNEL_NET=${2:-"172.16.0.0/16"}
cat <<EOF >/opt/kubernetes/cfg/flannel
FLANNEL_ETCD="-etcd-endpoints=${ETCD_SERVERS}"
FLANNEL_ETCD_KEY="-etcd-prefix=/coreos.com/network"
EOF
cat <<EOF >/usr/lib/systemd/system/flannel.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/flannel
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \${FLANNEL_ETCD} \${FLANNEL_ETCD_KEY}
Type=notify
[Install]
WantedBy=multi-user.target
EOF
# Store FLANNEL_NET to etcd.
attempt=0
while true; do
/opt/kubernetes/bin/etcdctl --no-sync -C ${ETCD_SERVERS} \
get /coreos.com/network/config >/dev/null 2>&1
if [[ "$?" == 0 ]]; then
break
else
if (( attempt > 600 )); then
echo "timeout for waiting network config" > ~/kube/err.log
exit 2
fi
/opt/kubernetes/bin/etcdctl --no-sync -C ${ETCD_SERVERS} \
mk /coreos.com/network/config "{\"Network\":\"${FLANNEL_NET}\"}" >/dev/null 2>&1
attempt=$((attempt+1))
sleep 3
fi
done
wait
systemctl enable flannel
systemctl daemon-reload
systemctl restart flannel

View File

@ -55,4 +55,4 @@ EOF
systemctl daemon-reload
systemctl enable kube-scheduler
systemctl start kube-scheduler
systemctl restart kube-scheduler

View File

@ -20,7 +20,7 @@ DOCKER_OPTS=${1:-""}
DOCKER_CONFIG=/opt/kubernetes/cfg/docker
cat <<EOF >$DOCKER_CONFIG
DOCKER_OPTS="-H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock -s devicemapper --selinux-enabled=false ${DOCKER_OPTS}"
DOCKER_OPTS="-H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock -s overlay --selinux-enabled=false ${DOCKER_OPTS}"
EOF
cat <<EOF >/usr/lib/systemd/system/docker.service
@ -45,4 +45,4 @@ EOF
systemctl daemon-reload
systemctl enable docker
systemctl start docker
systemctl restart docker

View File

@ -17,6 +17,8 @@
MASTER_ADDRESS=${1:-"8.8.8.18"}
NODE_ADDRESS=${2:-"8.8.8.20"}
DNS_SERVER_IP=${3:-"192.168.3.100"}
DNS_DOMAIN=${4:-"cluster.local"}
cat <<EOF >/opt/kubernetes/cfg/kubelet
@ -35,13 +37,17 @@ NODE_PORT="--port=10250"
# --hostname-override="": If non-empty, will use this string as identification instead of the actual hostname.
NODE_HOSTNAME="--hostname-override=${NODE_ADDRESS}"
# --api-servers=[]: List of Kubernetes API servers for publishing events,
# --api-servers=[]: List of Kubernetes API servers for publishing events,
# and reading pods and services. (ip:port), comma separated.
KUBELET_API_SERVER="--api-servers=${MASTER_ADDRESS}:8080"
# --allow-privileged=false: If true, allow containers to request privileged mode. [default=false]
KUBE_ALLOW_PRIV="--allow-privileged=false"
# DNS info
KUBELET__DNS_IP="--cluster-dns=${DNS_SERVER_IP}"
KUBELET_DNS_DOMAIN="--cluster-domain=${DNS_DOMAIN}"
# Add your own!
KUBELET_ARGS=""
EOF
@ -53,6 +59,8 @@ KUBE_PROXY_OPTS=" \${KUBE_LOGTOSTDERR} \\
\${NODE_HOSTNAME} \\
\${KUBELET_API_SERVER} \\
\${KUBE_ALLOW_PRIV} \\
\${KUBELET__DNS_IP} \\
\${KUBELET_DNS_DOMAIN} \\
\${KUBELET_ARGS}"
cat <<EOF >/usr/lib/systemd/system/kubelet.service
@ -73,4 +81,4 @@ EOF
systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
systemctl restart kubelet

View File

@ -53,4 +53,4 @@ EOF
systemctl daemon-reload
systemctl enable kube-proxy
systemctl start kube-proxy
systemctl restart kube-proxy

View File

@ -225,12 +225,15 @@ function provision-master() {
# scp -r ${SSH_OPTS} master config-default.sh copy-files.sh util.sh "${MASTER}:${KUBE_TEMP}"
kube-scp ${MASTER} "${ROOT}/../saltbase/salt/generate-cert/make-ca-cert.sh ${ROOT}/binaries/master ${ROOT}/master ${ROOT}/config-default.sh ${ROOT}/util.sh" "${KUBE_TEMP}"
kube-ssh "${MASTER}" " \
rm -rf /opt/kubernetes/bin; \
sudo cp -r ${KUBE_TEMP}/master/bin /opt/kubernetes; \
sudo chmod -R +x /opt/kubernetes/bin; \
sudo ln -s /opt/kubernetes/bin/* /usr/local/bin/; \
sudo bash ${KUBE_TEMP}/make-ca-cert.sh ${master_ip} IP:${master_ip},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local; \
sudo bash ${KUBE_TEMP}/master/scripts/etcd.sh; \
sudo bash ${KUBE_TEMP}/master/scripts/apiserver.sh ${master_ip} ${ETCD_SERVERS} ${SERVICE_CLUSTER_IP_RANGE} ${ADMISSION_CONTROL}; \
sudo bash ${KUBE_TEMP}/master/scripts/controller-manager.sh ${master_ip}; \
sudo bash ${KUBE_TEMP}/master/scripts/flannel.sh ${ETCD_SERVERS} ${FLANNEL_NET}; \
sudo bash ${KUBE_TEMP}/master/scripts/scheduler.sh ${master_ip}"
}
@ -249,15 +252,19 @@ function provision-node() {
local master_ip=${MASTER#*@}
local node=$1
local node_ip=${node#*@}
local dns_ip=${DNS_SERVER_IP#*@}
local dns_domain=${DNS_DOMAIN#*@}
ensure-setup-dir ${node}
kube-scp ${node} "${ROOT}/binaries/node ${ROOT}/node ${ROOT}/config-default.sh ${ROOT}/util.sh" ${KUBE_TEMP}
kube-ssh "${node}" " \
rm -rf /opt/kubernetes/bin; \
sudo cp -r ${KUBE_TEMP}/node/bin /opt/kubernetes; \
sudo chmod -R +x /opt/kubernetes/bin; \
sudo ln -s /opt/kubernetes/bin/* /usr/local/bin/; \
sudo bash ${KUBE_TEMP}/node/scripts/flannel.sh ${ETCD_SERVERS} ${FLANNEL_NET}; \
sudo bash ${KUBE_TEMP}/node/scripts/docker.sh \"${DOCKER_OPTS}\"; \
sudo bash ${KUBE_TEMP}/node/scripts/kubelet.sh ${master_ip} ${node_ip}; \
sudo bash ${KUBE_TEMP}/node/scripts/kubelet.sh ${master_ip} ${node_ip} ${dns_ip} ${dns_domain}; \
sudo bash ${KUBE_TEMP}/node/scripts/proxy.sh ${master_ip}"
}