mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-04 23:17:50 +00:00
Register the kubelet on the master node with an apiserver. This option is
separated from the apiserver running locally on the master node so that it can be optionally enabled or disabled as needed. Also, fix the healthchecking configuration for the master components, which was previously only working by coincidence: If a kubelet doesn't register with a master, it never bothers to figure out what its local address is. In which case it ends up constructing a URL like http://:8080/healthz for the http probe. This happens to work on the master because all of the pods are using host networking and explicitly binding to 127.0.0.1. Once the kubelet is registered with the master and it determines the local node address, it tries to healthcheck on an address where the pod isn't listening and the kubelet periodically restarts each master component when the liveness probe fails.
This commit is contained in:
@@ -25,6 +25,7 @@ MASTER_DISK_TYPE=pd-ssd
|
||||
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB}
|
||||
MINION_DISK_TYPE=pd-standard
|
||||
MINION_DISK_SIZE=${MINION_DISK_SIZE:-100GB}
|
||||
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true}
|
||||
|
||||
OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-debian}
|
||||
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20150715}
|
||||
|
||||
@@ -25,6 +25,7 @@ MASTER_DISK_TYPE=pd-ssd
|
||||
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB}
|
||||
MINION_DISK_TYPE=pd-standard
|
||||
MINION_DISK_SIZE=${MINION_DISK_SIZE:-100GB}
|
||||
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false}
|
||||
KUBE_APISERVER_REQUEST_TIMEOUT=300
|
||||
|
||||
OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-debian}
|
||||
|
||||
@@ -358,13 +358,35 @@ function create-salt-master-auth() {
|
||||
fi
|
||||
}
|
||||
|
||||
# This should happen only on cluster initialization. After the first boot
|
||||
# and on upgrade, the kubeconfig file exists on the master-pd and should
|
||||
# never be touched again.
|
||||
#
|
||||
# - Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and
|
||||
# KUBELET_KEY to generate a kubeconfig file for the kubelet to securely
|
||||
# connect to the apiserver.
|
||||
function create-salt-master-kubelet-auth() {
|
||||
# Only configure the kubelet on the master if the required variables are
|
||||
# set in the environment.
|
||||
if [[ ! -z "${KUBELET_APISERVER:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then
|
||||
create-salt-kubelet-auth
|
||||
fi
|
||||
}
|
||||
|
||||
# This should happen both on cluster initialization and node upgrades.
|
||||
#
|
||||
# - Uses CA_CERT, KUBELET_CERT, and KUBELET_KEY to generate a kubeconfig file
|
||||
# for the kubelet to securely connect to the apiserver.
|
||||
# - Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and
|
||||
# KUBELET_KEY to generate a kubeconfig file for the kubelet to securely
|
||||
# connect to the apiserver.
|
||||
|
||||
function create-salt-kubelet-auth() {
|
||||
local -r kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig"
|
||||
if [ ! -e "${kubelet_kubeconfig_file}" ]; then
|
||||
# If there isn't a CA certificate set specifically for the kubelet, use
|
||||
# the cluster CA certificate.
|
||||
if [[ -z "${KUBELET_CA_CERT:-}" ]]; then
|
||||
KUBELET_CA_CERT="${CA_CERT}"
|
||||
fi
|
||||
mkdir -p /srv/salt-overlay/salt/kubelet
|
||||
(umask 077;
|
||||
cat > "${kubelet_kubeconfig_file}" <<EOF
|
||||
@@ -378,7 +400,7 @@ users:
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
certificate-authority-data: ${CA_CERT}
|
||||
certificate-authority-data: ${KUBELET_CA_CERT}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
@@ -493,7 +515,6 @@ function salt-master-role() {
|
||||
grains:
|
||||
roles:
|
||||
- kubernetes-master
|
||||
cbr-cidr: ${MASTER_IP_RANGE}
|
||||
cloud: gce
|
||||
EOF
|
||||
if ! [[ -z "${PROJECT_ID:-}" ]] && ! [[ -z "${TOKEN_URL:-}" ]] && ! [[ -z "${NODE_NETWORK:-}" ]] ; then
|
||||
@@ -508,6 +529,21 @@ EOF
|
||||
cloud_config: /etc/gce.conf
|
||||
advertise_address: '${EXTERNAL_IP}'
|
||||
proxy_ssh_user: '${PROXY_SSH_USER}'
|
||||
EOF
|
||||
fi
|
||||
|
||||
# If the kubelet on the master is enabled, give it the same CIDR range
|
||||
# as a generic node.
|
||||
if [[ ! -z "${KUBELET_APISERVER:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then
|
||||
cat <<EOF >>/etc/salt/minion.d/grains.conf
|
||||
kubelet_api_servers: '${KUBELET_APISERVER}'
|
||||
cbr-cidr: 10.123.45.0/30
|
||||
EOF
|
||||
else
|
||||
# If the kubelet is running disconnected from a master, give it a fixed
|
||||
# CIDR range.
|
||||
cat <<EOF >>/etc/salt/minion.d/grains.conf
|
||||
cbr-cidr: ${MASTER_IP_RANGE}
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
@@ -519,6 +555,7 @@ grains:
|
||||
- kubernetes-pool
|
||||
cbr-cidr: 10.123.45.0/30
|
||||
cloud: gce
|
||||
api_servers: '${KUBERNETES_MASTER_NAME}'
|
||||
EOF
|
||||
}
|
||||
|
||||
@@ -536,12 +573,6 @@ EOF
|
||||
fi
|
||||
}
|
||||
|
||||
function salt-set-apiserver() {
|
||||
cat <<EOF >>/etc/salt/minion.d/grains.conf
|
||||
api_servers: '${KUBERNETES_MASTER_NAME}'
|
||||
EOF
|
||||
}
|
||||
|
||||
function configure-salt() {
|
||||
fix-apt-sources
|
||||
mkdir -p /etc/salt/minion.d
|
||||
@@ -554,7 +585,6 @@ function configure-salt() {
|
||||
else
|
||||
salt-node-role
|
||||
salt-docker-opts
|
||||
salt-set-apiserver
|
||||
fi
|
||||
install-salt
|
||||
stop-salt-minion
|
||||
@@ -577,6 +607,7 @@ if [[ -z "${is_push}" ]]; then
|
||||
create-salt-pillar
|
||||
if [[ "${KUBERNETES_MASTER}" == "true" ]]; then
|
||||
create-salt-master-auth
|
||||
create-salt-master-kubelet-auth
|
||||
else
|
||||
create-salt-kubelet-auth
|
||||
create-salt-kubeproxy-auth
|
||||
|
||||
@@ -59,8 +59,11 @@ RKT_VERSION: $(yaml-quote ${RKT_VERSION})
|
||||
CA_CERT: $(yaml-quote ${CA_CERT_BASE64})
|
||||
MASTER_CERT: $(yaml-quote ${MASTER_CERT_BASE64:-})
|
||||
MASTER_KEY: $(yaml-quote ${MASTER_KEY_BASE64:-})
|
||||
KUBELET_CERT: $(yaml-quote ${KUBELET_CERT_BASE64:-})
|
||||
KUBELET_KEY: $(yaml-quote ${KUBELET_KEY_BASE64:-})
|
||||
KUBECFG_CERT: $(yaml-quote ${KUBECFG_CERT_BASE64:-})
|
||||
KUBECFG_KEY: $(yaml-quote ${KUBECFG_KEY_BASE64:-})
|
||||
KUBELET_APISERVER: $(yaml-quote ${KUBELET_APISERVER:-})
|
||||
EOF
|
||||
else
|
||||
cat >>$file <<EOF
|
||||
|
||||
@@ -49,6 +49,8 @@ KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-})
|
||||
ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-})
|
||||
MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE})
|
||||
CA_CERT: $(yaml-quote ${CA_CERT_BASE64:-})
|
||||
KUBELET_CERT: $(yaml-quote ${KUBELET_CERT_BASE64:-})
|
||||
KUBELET_KEY: $(yaml-quote ${KUBELET_KEY_BASE64:-})
|
||||
EOF
|
||||
if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}" ]; then
|
||||
cat >>$file <<EOF
|
||||
@@ -66,6 +68,7 @@ MASTER_CERT: $(yaml-quote ${MASTER_CERT_BASE64:-})
|
||||
MASTER_KEY: $(yaml-quote ${MASTER_KEY_BASE64:-})
|
||||
KUBECFG_CERT: $(yaml-quote ${KUBECFG_CERT_BASE64:-})
|
||||
KUBECFG_KEY: $(yaml-quote ${KUBECFG_KEY_BASE64:-})
|
||||
KUBELET_APISERVER: $(yaml-quote ${KUBELET_APISERVER:-})
|
||||
EOF
|
||||
if [ -n "${APISERVER_TEST_ARGS:-}" ]; then
|
||||
cat >>$file <<EOF
|
||||
@@ -93,8 +96,6 @@ EOF
|
||||
KUBERNETES_MASTER: "false"
|
||||
ZONE: $(yaml-quote ${ZONE})
|
||||
EXTRA_DOCKER_OPTS: $(yaml-quote ${EXTRA_DOCKER_OPTS:-})
|
||||
KUBELET_CERT: $(yaml-quote ${KUBELET_CERT_BASE64:-})
|
||||
KUBELET_KEY: $(yaml-quote ${KUBELET_KEY_BASE64:-})
|
||||
EOF
|
||||
if [ -n "${KUBELET_TEST_ARGS:-}" ]; then
|
||||
cat >>$file <<EOF
|
||||
|
||||
@@ -487,6 +487,11 @@ function yaml-quote {
|
||||
}
|
||||
|
||||
function write-master-env {
|
||||
# If the user requested that the master be part of the cluster, set the
|
||||
# environment variable to program the master kubelet to register itself.
|
||||
if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" ]]; then
|
||||
KUBELET_APISERVER="${MASTER_NAME}"
|
||||
fi
|
||||
build-kube-env true "${KUBE_TEMP}/master-kube-env.yaml"
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user