Static cert distribution for GCE.

To make cert validation work, no longer use the
fqdn for the master name on the node VMs.
This commit is contained in:
Robert Bailey 2015-05-11 11:43:44 -07:00
parent 6c3365c418
commit 9ab41db7ea
4 changed files with 171 additions and 35 deletions

View File

@ -245,14 +245,34 @@ admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
EOF
}
# This should only happen on cluster initialization. Uses
# KUBE_PASSWORD and KUBE_USER to generate basic_auth.csv. Uses
# KUBE_BEARER_TOKEN, KUBELET_TOKEN, and KUBE_PROXY_TOKEN to generate
# known_tokens.csv (KNOWN_TOKENS_FILE). After the first boot and
# on upgrade, this file exists on the master-pd and should never
# be touched again (except perhaps an additional service account,
# see NB below.)
# This should only happen on cluster initialization.
#
# - Uses KUBE_PASSWORD and KUBE_USER to generate basic_auth.csv.
# - Uses KUBE_BEARER_TOKEN, KUBELET_TOKEN, and KUBE_PROXY_TOKEN to generate
# known_tokens.csv (KNOWN_TOKENS_FILE).
# - Uses CA_CERT, MASTER_CERT, and MASTER_KEY to populate the SSL credentials
# for the apiserver.
# - Optionally uses KUBECFG_CERT and KUBECFG_KEY to store a copy of the client
# cert credentials.
#
# After the first boot and on upgrade, these files exists on the master-pd
# and should never be touched again (except perhaps an additional service
# account, see NB below.)
function create-salt-master-auth() {
if [[ ! -e /srv/kubernetes/ca.crt ]]; then
if [[ ! -z "${CA_CERT:-}" ]] && [[ ! -z "${MASTER_CERT:-}" ]] && [[ ! -z "${MASTER_KEY:-}" ]]; then
mkdir -p /srv/kubernetes
(umask 077;
echo "${CA_CERT}" | base64 -d > /srv/kubernetes/ca.crt;
echo "${MASTER_CERT}" | base64 -d > /srv/kubernetes/server.cert;
echo "${MASTER_KEY}" | base64 -d > /srv/kubernetes/server.key;
# Kubecfg cert/key are optional and included for backwards compatibility.
# TODO(roberthbailey): Remove these two lines once GKE no longer requires
# fetching clients certs from the master VM.
echo "${KUBECFG_CERT:-}" | base64 -d > /srv/kubernetes/kubecfg.crt;
echo "${KUBECFG_KEY:-}" | base64 -d > /srv/kubernetes/kubecfg.key)
fi
fi
if [ ! -e "${BASIC_AUTH_FILE}" ]; then
mkdir -p /srv/salt-overlay/salt/kube-apiserver
(umask 077;
@ -278,12 +298,46 @@ function create-salt-master-auth() {
fi
}
# TODO(roberthbailey): Remove the insecure kubeconfig configuration files
# once the certs are being plumbed through for GKE.
function create-salt-node-auth() {
if [[ ! -e /srv/kubernetes/ca.crt ]]; then
if [[ ! -z "${CA_CERT:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then
mkdir -p /srv/kubernetes
(umask 077;
echo "${CA_CERT}" | base64 -d > /srv/kubernetes/ca.crt;
echo "${KUBELET_CERT}" | base64 -d > /srv/kubernetes/kubelet.crt;
echo "${KUBELET_KEY}" | base64 -d > /srv/kubernetes/kubelet.key)
fi
fi
kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig"
if [ ! -e "${kubelet_kubeconfig_file}" ]; then
mkdir -p /srv/salt-overlay/salt/kubelet
(umask 077;
cat > "${kubelet_kubeconfig_file}" <<EOF
if [[ ! -z "${CA_CERT:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then
(umask 077;
cat > "${kubelet_kubeconfig_file}" <<EOF
apiVersion: v1
kind: Config
users:
- name: kubelet
user:
client-certificate-data: ${KUBELET_CERT}
client-key-data: ${KUBELET_KEY}
clusters:
- name: local
cluster:
certificate-authority-data: ${CA_CERT}
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context
EOF
)
else
(umask 077;
cat > "${kubelet_kubeconfig_file}" <<EOF
apiVersion: v1
kind: Config
users:
@ -302,17 +356,36 @@ contexts:
current-context: service-account-context
EOF
)
fi
fi
kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig"
if [ ! -e "${kube_proxy_kubeconfig_file}" ]; then
mkdir -p /srv/salt-overlay/salt/kube-proxy
# Make a kubeconfig file with the token.
# TODO(etune): put apiserver certs into secret too, and reference from authfile,
# so that "Insecure" is not needed.
(umask 077;
cat > "${kube_proxy_kubeconfig_file}" <<EOF
if [[ ! -z "${CA_CERT}" ]]; then
(umask 077;
cat > "${kube_proxy_kubeconfig_file}" <<EOF
apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
clusters:
- name: local
cluster:
certificate-authority-data: ${CA_CERT}
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context
EOF
)
else
(umask 077;
cat > "${kube_proxy_kubeconfig_file}" <<EOF
apiVersion: v1
kind: Config
users:
@ -331,6 +404,7 @@ contexts:
current-context: service-account-context
EOF
)
fi
fi
}
@ -430,14 +504,8 @@ EOF
}
function salt-set-apiserver() {
local kube_master_fqdn
until kube_master_fqdn=$(getent hosts ${KUBERNETES_MASTER_NAME} | awk '{ print $2 }'); do
echo 'Waiting for DNS resolution of ${KUBERNETES_MASTER_NAME}...'
sleep 3
done
cat <<EOF >>/etc/salt/minion.d/grains.conf
api_servers: '${kube_master_fqdn}'
api_servers: '${KUBERNETES_MASTER_NAME}'
EOF
}

View File

@ -51,6 +51,11 @@ KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-})
ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-})
MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE})
KUBERNETES_CONTAINER_RUNTIME: $(yaml-quote ${CONTAINER_RUNTIME})
CA_CERT: $(yaml-quote ${CA_CERT_BASE64})
MASTER_CERT: $(yaml-quote ${MASTER_CERT_BASE64:-})
MASTER_KEY: $(yaml-quote ${MASTER_KEY_BASE64:-})
KUBECFG_CERT: $(yaml-quote ${KUBECFG_CERT_BASE64:-})
KUBECFG_KEY: $(yaml-quote ${KUBECFG_KEY_BASE64:-})
EOF
else
cat >>$file <<EOF
@ -79,6 +84,9 @@ EXTRA_DOCKER_OPTS=$(yaml-quote ${EXTRA_DOCKER_OPTS})
ENABLE_DOCKER_REGISTRY_CACHE=$(yaml-quote ${ENABLE_DOCKER_REGISTRY_CACHE:-false})
PROJECT_ID=$(yaml-quote ${PROJECT})
KUBERNETES_CONTAINER_RUNTIME=$(yaml-quote ${CONTAINER_RUNTIME})
CA_CERT: $(yaml-quote ${CA_CERT_BASE64})
KUBELET_CERT: $(yaml-quote ${KUBELET_CERT_BASE64:-})
KUBELET_KEY: $(yaml-quote ${KUBELET_KEY_BASE64:-})
EOF
fi
}

View File

@ -45,6 +45,7 @@ KUBELET_TOKEN: $(yaml-quote ${KUBELET_TOKEN:-})
KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-})
ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-})
MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE})
CA_CERT: $(yaml-quote ${CA_CERT_BASE64})
EOF
if [[ "${master}" == "true" ]]; then
@ -53,6 +54,10 @@ EOF
KUBE_USER: $(yaml-quote ${KUBE_USER})
KUBE_PASSWORD: $(yaml-quote ${KUBE_PASSWORD})
KUBE_BEARER_TOKEN: $(yaml-quote ${KUBE_BEARER_TOKEN})
MASTER_CERT: $(yaml-quote ${MASTER_CERT_BASE64:-})
MASTER_KEY: $(yaml-quote ${MASTER_KEY_BASE64:-})
KUBECFG_CERT: $(yaml-quote ${KUBECFG_CERT_BASE64:-})
KUBECFG_KEY: $(yaml-quote ${KUBECFG_KEY_BASE64:-})
EOF
else
# Node-only env vars.
@ -61,6 +66,8 @@ KUBERNETES_MASTER_NAME: $(yaml-quote ${MASTER_NAME})
ZONE: $(yaml-quote ${ZONE})
EXTRA_DOCKER_OPTS: $(yaml-quote ${EXTRA_DOCKER_OPTS})
ENABLE_DOCKER_REGISTRY_CACHE: $(yaml-quote ${ENABLE_DOCKER_REGISTRY_CACHE:-false})
KUBELET_CERT: $(yaml-quote ${KUBELET_CERT_BASE64:-})
KUBELET_KEY: $(yaml-quote ${KUBELET_KEY_BASE64:-})
EOF
fi
}

View File

@ -479,6 +479,64 @@ function write-node-env {
build-kube-env false "${KUBE_TEMP}/node-kube-env.yaml"
}
# Create certificate pairs for the cluster.
# $1: The public IP for the master.
#
# These are used for static cert distribution (e.g. static clustering) at
# cluster creation time. This will be obsoleted once we implement dynamic
# clustering.
#
# The following certificate pairs are created:
#
# - ca (the cluster's certificate authority)
# - server
# - kubelet
# - kubecfg (for kubectl)
#
# TODO(roberthbailey): Replace easyrsa with a simple Go program to generate
# the certs that we need.
#
# Assumed vars
# KUBE_TEMP
#
# Vars set:
# CERT_DIR
# CA_CERT_BASE64
# MASTER_CERT_BASE64
# MASTER_KEY_BASE64
# KUBELET_CERT_BASE64
# KUBELET_KEY_BASE64
# KUBECFG_CERT_BASE64
# KUBECFG_KEY_BASE64
function create-certs {
local cert_ip
cert_ip="${1}"
# Note: This was heavily cribbed from make-ca-cert.sh
(cd "${KUBE_TEMP}"
curl -L -O https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz > /dev/null 2>&1
tar xzf easy-rsa.tar.gz > /dev/null 2>&1
cd easy-rsa-master/easyrsa3
./easyrsa init-pki > /dev/null 2>&1
./easyrsa --batch "--req-cn=${cert_ip}@$(date +%s)" build-ca nopass > /dev/null 2>&1
./easyrsa --subject-alt-name=IP:"${cert_ip}" build-server-full "${MASTER_NAME}" nopass > /dev/null 2>&1
./easyrsa build-client-full kubelet nopass > /dev/null 2>&1
./easyrsa build-client-full kubecfg nopass > /dev/null 2>&1) || {
# If there was an error in the subshell, just die.
# TODO(roberthbailey): add better error handling here
echo "=== Failed to generate certificates: Aborting ==="
exit 2
}
CERT_DIR="${KUBE_TEMP}/easy-rsa-master/easyrsa3"
CA_CERT_BASE64=$(cat "${CERT_DIR}/pki/ca.crt" | base64)
MASTER_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/${MASTER_NAME}.crt" | base64)
MASTER_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/${MASTER_NAME}.key" | base64)
KUBELET_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/kubelet.crt" | base64)
KUBELET_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/kubelet.key" | base64)
KUBECFG_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/kubecfg.crt" | base64)
KUBECFG_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/kubecfg.key" | base64)
}
# Instantiate a kubernetes cluster
#
# Assumed vars
@ -549,7 +607,9 @@ function kube-up {
--project "${PROJECT}" \
--region "${REGION}" -q --format yaml | awk '/^address:/ { print $2 }')
create-master-instance $MASTER_RESERVED_IP &
create-certs "${MASTER_RESERVED_IP}"
create-master-instance "${MASTER_RESERVED_IP}" &
# Create a single firewall rule for all minions.
create-firewall-rule "${MINION_TAG}-all" "${CLUSTER_IP_RANGE}" "${MINION_TAG}" &
@ -594,7 +654,8 @@ function kube-up {
echo " up."
echo
until curl --insecure -H "Authorization: Bearer ${KUBE_BEARER_TOKEN}" \
until curl --cacert "${CERT_DIR}/pki/ca.crt" \
-H "Authorization: Bearer ${KUBE_BEARER_TOKEN}" \
--max-time 5 --fail --output /dev/null --silent \
"https://${KUBE_MASTER_IP}/api/v1beta3/pods"; do
printf "."
@ -603,20 +664,12 @@ function kube-up {
echo "Kubernetes cluster created."
# TODO use token instead of basic auth
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
export KUBE_CERT="${CERT_DIR}/pki/issued/kubecfg.crt"
export KUBE_KEY="${CERT_DIR}/pki/private/kubecfg.key"
export CA_CERT="${CERT_DIR}/pki/ca.crt"
export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}"
# TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
# config file. Distribute the same way the htpasswd is done.
(
umask 077
gcloud compute ssh --project "${PROJECT}" --zone "$ZONE" "${MASTER_NAME}" --command "sudo cat /srv/kubernetes/kubecfg.crt" >"${KUBE_CERT}" 2>/dev/null
gcloud compute ssh --project "${PROJECT}" --zone "$ZONE" "${MASTER_NAME}" --command "sudo cat /srv/kubernetes/kubecfg.key" >"${KUBE_KEY}" 2>/dev/null
gcloud compute ssh --project "${PROJECT}" --zone "$ZONE" "${MASTER_NAME}" --command "sudo cat /srv/kubernetes/ca.crt" >"${CA_CERT}" 2>/dev/null
create-kubeconfig
)