Merge pull request #7303 from erictune/kube_env3

kube-proxy uses token to access port 443 of apiserver
This commit is contained in:
CJ Cullen 2015-04-27 14:33:53 -07:00
commit 39c5bf363b
7 changed files with 111 additions and 13 deletions

View File

@ -40,19 +40,43 @@ mkdir -p /srv/salt-overlay/salt/nginx
echo $MASTER_HTPASSWD > /srv/salt-overlay/salt/nginx/htpasswd
# Generate and distribute a shared secret (bearer token) to
# apiserver and kubelet so that kubelet can authenticate to
# apiserver to send events.
# apiserver and the nodes so that kubelet and kube-proxy can
# authenticate to apiserver.
# This works on CoreOS, so it should work on a lot of distros.
kubelet_token=$(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null)
kube_proxy_token=$(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null)
# Make a list of tokens and usernames to be pushed to the apiserver
mkdir -p /srv/salt-overlay/salt/kube-apiserver
known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
(umask u=rw,go= ; echo "$kubelet_token,kubelet,kubelet" > $known_tokens_file)
(umask u=rw,go= ; echo "$kubelet_token,kubelet,kubelet" > $known_tokens_file ;
echo "$kube_proxy_token,kube_proxy,kube_proxy" >> $known_tokens_file)
mkdir -p /srv/salt-overlay/salt/kubelet
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
(umask u=rw,go= ; echo "{\"BearerToken\": \"$kubelet_token\", \"Insecure\": true }" > $kubelet_auth_file)
mkdir -p /srv/salt-overlay/salt/kube-proxy
kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube_proxy/kubeconfig"
cat > "${kube_proxy_kubeconfig_file}" <<EOF
apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: ${kube_proxy_token}
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context
EOF
# Generate tokens for other "service accounts". Append to known_tokens.
#
# NB: If this list ever changes, this script actually has to

View File

@ -238,7 +238,7 @@ EOF
}
# This should only happen on cluster initialization. Uses
# KUBE_BEARER_TOKEN, KUBELET_TOKEN, and /dev/urandom to generate
# KUBE_BEARER_TOKEN, KUBELET_TOKEN, and KUBE_PROXY_TOKEN to generate
# known_tokens.csv (KNOWN_TOKENS_FILE). After the first boot and
# on upgrade, this file exists on the master-pd and should never
# be touched again (except perhaps an additional service account,
@ -248,13 +248,40 @@ function create-salt-auth() {
mkdir -p /srv/salt-overlay/salt/kube-apiserver
(umask 077;
echo "${KUBE_BEARER_TOKEN},admin,admin" > "${KNOWN_TOKENS_FILE}";
echo "${KUBELET_TOKEN},kubelet,kubelet" >> "${KNOWN_TOKENS_FILE}")
echo "${KUBELET_TOKEN},kubelet,kubelet" >> "${KNOWN_TOKENS_FILE}";
echo "${KUBE_PROXY_TOKEN},kube_proxy,kube_proxy" >> "${KNOWN_TOKENS_FILE}")
mkdir -p /srv/salt-overlay/salt/kubelet
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
(umask 077;
echo "{\"BearerToken\": \"${KUBELET_TOKEN}\", \"Insecure\": true }" > "${kubelet_auth_file}")
mkdir -p /srv/salt-overlay/salt/kube-proxy
kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig"
# Make a kubeconfig file with the token.
# TODO(etune): put apiserver certs into secret too, and reference from authfile,
# so that "Insecure" is not needed.
(umask 077;
cat > "${kube_proxy_kubeconfig_file}" <<EOF
apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context
EOF
)
# Generate tokens for other "service accounts". Append to known_tokens.
#
# NB: If this list ever changes, this script actually has to

View File

@ -473,6 +473,7 @@ DNS_SERVER_IP: $(yaml-quote ${DNS_SERVER_IP:-})
DNS_DOMAIN: $(yaml-quote ${DNS_DOMAIN:-})
KUBE_BEARER_TOKEN: $(yaml-quote ${KUBE_BEARER_TOKEN})
KUBELET_TOKEN: $(yaml-quote ${KUBELET_TOKEN:-})
KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-})
ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-})
MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE})
EOF
@ -587,6 +588,7 @@ function kube-up {
# computer) can forget it later. This should disappear with
# https://github.com/GoogleCloudPlatform/kubernetes/issues/3168
KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
# Reserve the master's IP so that it can later be transferred to another VM
# without disrupting the kubelets. IPs are associated with regions, not zones,
@ -824,7 +826,7 @@ function kube-push {
# node-kube-env. This isn't important until the node-ip-range issue
# is solved (because that's blocking automatic dynamic nodes from
# working). The node-kube-env has to be composed with the KUBELET_TOKEN
# Ideally we would have
# and KUBE_PROXY_TOKEN. Ideally we would have
# https://github.com/GoogleCloudPlatform/kubernetes/issues/3168
# implemented before then, though, so avoiding this mess until then.

View File

@ -2,11 +2,18 @@
{% if grains['os_family'] == 'RedHat' -%}
{% set daemon_args = "" -%}
{% endif -%}
{% if grains.api_servers is defined -%}
{% set api_servers = "--master=http://" + grains.api_servers + ":7080" -%}
{% else -%}
{% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%}
{# TODO(azure-maintainer): add support for distributing kubeconfig with token to kube-proxy #}
{# so it can use https #}
{% if grains['cloud'] is defined and grains['cloud'] == 'azure' -%}
{% set api_servers = "--master=http://" + ips[0][0] + ":7080" -%}
{% set kubeconfig = "" -%}
{% else -%}
{% set kubeconfig = "--kubeconfig=/var/lib/kube-proxy/kubeconfig" -%}
{% if grains.api_servers is defined -%}
{% set api_servers = "--master=https://" + grains.api_servers -%}
{% else -%}
{% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%}
{% set api_servers = "--master=https://" + ips[0][0] -%}
{% endif -%}
{% endif -%}
DAEMON_ARGS="{{daemon_args}} {{api_servers}} {{pillar['log_level']}}"
DAEMON_ARGS="{{daemon_args}} {{api_servers}} {{kubeconfig}} {{pillar['log_level']}}"

View File

@ -55,3 +55,12 @@ kube-proxy:
{% if grains['os_family'] != 'RedHat' %}
- file: /etc/init.d/kube-proxy
{% endif %}
- file: /var/lib/kube-proxy/kubeconfig
/var/lib/kube-proxy/kubeconfig:
file.managed:
- source: salt://kube-proxy/kubeconfig
- user: root
- group: root
- mode: 400
- makedirs: true

View File

@ -137,15 +137,44 @@ EOF
known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
if [[ ! -f "${known_tokens_file}" ]]; then
kubelet_token=$(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null)
kube_proxy_token=$(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null)
mkdir -p /srv/salt-overlay/salt/kube-apiserver
known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
(umask u=rw,go= ; echo "$kubelet_token,kubelet,kubelet" > $known_tokens_file)
(umask u=rw,go= ;
echo "$kubelet_token,kubelet,kubelet" > $known_tokens_file;
echo "$kube_proxy_token,kube_proxy,kube_proxy" >> $known_tokens_file)
mkdir -p /srv/salt-overlay/salt/kubelet
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
(umask u=rw,go= ; echo "{\"BearerToken\": \"$kubelet_token\", \"Insecure\": true }" > $kubelet_auth_file)
mkdir -p /srv/salt-overlay/salt/kube-proxy
kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube_proxy/kubeconfig"
# Make a kubeconfig file with the token.
# TODO(etune): put apiserver certs into secret too, and reference from authfile,
# so that "Insecure" is not needed.
(umask 077;
cat > "${kube_proxy_kubeconfig_file}" <<EOF
apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: ${kube_proxy_token}
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context
EOF
)
# Generate tokens for other "service accounts". Append to known_tokens.
#
# NB: If this list ever changes, this script actually has to