diff --git a/cluster/aws/templates/create-dynamic-salt-files.sh b/cluster/aws/templates/create-dynamic-salt-files.sh index 33050b3591c..c06846ab9f4 100644 --- a/cluster/aws/templates/create-dynamic-salt-files.sh +++ b/cluster/aws/templates/create-dynamic-salt-files.sh @@ -40,19 +40,43 @@ mkdir -p /srv/salt-overlay/salt/nginx echo $MASTER_HTPASSWD > /srv/salt-overlay/salt/nginx/htpasswd # Generate and distribute a shared secret (bearer token) to -# apiserver and kubelet so that kubelet can authenticate to -# apiserver to send events. +# apiserver and the nodes so that kubelet and kube-proxy can +# authenticate to apiserver. # This works on CoreOS, so it should work on a lot of distros. kubelet_token=$(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null) +kube_proxy_token=$(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null) +# Make a list of tokens and usernames to be pushed to the apiserver mkdir -p /srv/salt-overlay/salt/kube-apiserver known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv" -(umask u=rw,go= ; echo "$kubelet_token,kubelet,kubelet" > $known_tokens_file) +(umask u=rw,go= ; echo "$kubelet_token,kubelet,kubelet" > $known_tokens_file ; +echo "$kube_proxy_token,kube_proxy,kube_proxy" >> $known_tokens_file) mkdir -p /srv/salt-overlay/salt/kubelet kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth" (umask u=rw,go= ; echo "{\"BearerToken\": \"$kubelet_token\", \"Insecure\": true }" > $kubelet_auth_file) +mkdir -p /srv/salt-overlay/salt/kube-proxy +kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube_proxy/kubeconfig" +cat > "${kube_proxy_kubeconfig_file}" < "${KNOWN_TOKENS_FILE}"; - echo "${KUBELET_TOKEN},kubelet,kubelet" >> "${KNOWN_TOKENS_FILE}") + echo "${KUBELET_TOKEN},kubelet,kubelet" >> "${KNOWN_TOKENS_FILE}"; + echo "${KUBE_PROXY_TOKEN},kube_proxy,kube_proxy" >> "${KNOWN_TOKENS_FILE}") mkdir -p /srv/salt-overlay/salt/kubelet kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth" (umask 077; echo "{\"BearerToken\": \"${KUBELET_TOKEN}\", \"Insecure\": true }" > "${kubelet_auth_file}") + mkdir -p /srv/salt-overlay/salt/kube-proxy + kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig" + # Make a kubeconfig file with the token. + # TODO(etune): put apiserver certs into secret too, and reference from authfile, + # so that "Insecure" is not needed. + (umask 077; + cat > "${kube_proxy_kubeconfig_file}" </dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) + KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) # Reserve the master's IP so that it can later be transferred to another VM # without disrupting the kubelets. IPs are associated with regions, not zones, @@ -824,7 +826,7 @@ function kube-push { # node-kube-env. This isn't important until the node-ip-range issue # is solved (because that's blocking automatic dynamic nodes from # working). The node-kube-env has to be composed with the KUBELET_TOKEN - # Ideally we would have + # and KUBE_PROXY_TOKEN. Ideally we would have # https://github.com/GoogleCloudPlatform/kubernetes/issues/3168 # implemented before then, though, so avoiding this mess until then. diff --git a/cluster/saltbase/salt/kube-proxy/default b/cluster/saltbase/salt/kube-proxy/default index 292c1829071..b19a6fc4071 100644 --- a/cluster/saltbase/salt/kube-proxy/default +++ b/cluster/saltbase/salt/kube-proxy/default @@ -2,11 +2,18 @@ {% if grains['os_family'] == 'RedHat' -%} {% set daemon_args = "" -%} {% endif -%} -{% if grains.api_servers is defined -%} - {% set api_servers = "--master=http://" + grains.api_servers + ":7080" -%} -{% else -%} - {% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%} +{# TODO(azure-maintainer): add support for distributing kubeconfig with token to kube-proxy #} +{# so it can use https #} +{% if grains['cloud'] is defined and grains['cloud'] == 'azure' -%} {% set api_servers = "--master=http://" + ips[0][0] + ":7080" -%} + {% set kubeconfig = "" -%} +{% else -%} + {% set kubeconfig = "--kubeconfig=/var/lib/kube-proxy/kubeconfig" -%} + {% if grains.api_servers is defined -%} + {% set api_servers = "--master=https://" + grains.api_servers -%} + {% else -%} + {% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%} + {% set api_servers = "--master=https://" + ips[0][0] -%} + {% endif -%} {% endif -%} - -DAEMON_ARGS="{{daemon_args}} {{api_servers}} {{pillar['log_level']}}" +DAEMON_ARGS="{{daemon_args}} {{api_servers}} {{kubeconfig}} {{pillar['log_level']}}" diff --git a/cluster/saltbase/salt/kube-proxy/init.sls b/cluster/saltbase/salt/kube-proxy/init.sls index e920a7c5fdf..3d54452b2d2 100644 --- a/cluster/saltbase/salt/kube-proxy/init.sls +++ b/cluster/saltbase/salt/kube-proxy/init.sls @@ -55,3 +55,12 @@ kube-proxy: {% if grains['os_family'] != 'RedHat' %} - file: /etc/init.d/kube-proxy {% endif %} + - file: /var/lib/kube-proxy/kubeconfig + +/var/lib/kube-proxy/kubeconfig: + file.managed: + - source: salt://kube-proxy/kubeconfig + - user: root + - group: root + - mode: 400 + - makedirs: true diff --git a/cluster/saltbase/salt/kube-proxy/kubeconfig b/cluster/saltbase/salt/kube-proxy/kubeconfig new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cluster/vagrant/provision-master.sh b/cluster/vagrant/provision-master.sh index 64e15bf51df..7459d8fbf36 100755 --- a/cluster/vagrant/provision-master.sh +++ b/cluster/vagrant/provision-master.sh @@ -137,15 +137,44 @@ EOF known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv" if [[ ! -f "${known_tokens_file}" ]]; then kubelet_token=$(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null) + kube_proxy_token=$(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null) mkdir -p /srv/salt-overlay/salt/kube-apiserver known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv" - (umask u=rw,go= ; echo "$kubelet_token,kubelet,kubelet" > $known_tokens_file) + (umask u=rw,go= ; + echo "$kubelet_token,kubelet,kubelet" > $known_tokens_file; + echo "$kube_proxy_token,kube_proxy,kube_proxy" >> $known_tokens_file) mkdir -p /srv/salt-overlay/salt/kubelet kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth" (umask u=rw,go= ; echo "{\"BearerToken\": \"$kubelet_token\", \"Insecure\": true }" > $kubelet_auth_file) + mkdir -p /srv/salt-overlay/salt/kube-proxy + kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube_proxy/kubeconfig" + # Make a kubeconfig file with the token. + # TODO(etune): put apiserver certs into secret too, and reference from authfile, + # so that "Insecure" is not needed. + (umask 077; + cat > "${kube_proxy_kubeconfig_file}" <