From bb179b6a4cf771d4b9a15d300e2e252b4e225e39 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 25 Jun 2015 15:09:09 -0400 Subject: [PATCH 1/8] Use a token for the scheduler, controller-manager, proxy and kubelet Before we used the kubecfg certificate for everything. Mint one token for each service and push it around where it belongs. --- .../roles/kubernetes-addons/tasks/main.yml | 7 +--- .../templates/kube-addons.service.j2 | 2 +- .../roles/kubernetes/defaults/main.yml | 2 + .../files/kube-gen-token.sh | 3 +- .../roles/kubernetes/tasks/gen_tokens.yml | 30 ++++++++++++++ .../ansible/roles/kubernetes/tasks/main.yml | 4 +- .../roles/kubernetes/tasks/place_certs.yml | 26 ------------ .../roles/kubernetes/tasks/place_secrets.yml | 40 +++++++++++++++++++ .../tasks/{certs.yml => secrets.yml} | 22 +++++++--- contrib/ansible/roles/master/tasks/main.yml | 40 ++++++++++++------- .../roles/master/templates/apiserver.j2 | 2 +- .../controller-manager.kubeconfig.j2 | 17 ++++---- .../master/templates/scheduler.kubeconfig.j2 | 17 ++++---- contrib/ansible/roles/node/tasks/main.yml | 24 +++++++---- .../node/templates/kubelet.kubeconfig.j2 | 11 +++-- .../roles/node/templates/proxy.kubeconfig.j2 | 25 ++++++------ 16 files changed, 171 insertions(+), 101 deletions(-) rename contrib/ansible/roles/{kubernetes-addons => kubernetes}/files/kube-gen-token.sh (91%) create mode 100644 contrib/ansible/roles/kubernetes/tasks/gen_tokens.yml delete mode 100644 contrib/ansible/roles/kubernetes/tasks/place_certs.yml create mode 100644 contrib/ansible/roles/kubernetes/tasks/place_secrets.yml rename contrib/ansible/roles/kubernetes/tasks/{certs.yml => secrets.yml} (74%) diff --git a/contrib/ansible/roles/kubernetes-addons/tasks/main.yml b/contrib/ansible/roles/kubernetes-addons/tasks/main.yml index 3da6617954c..67802394f35 100644 --- a/contrib/ansible/roles/kubernetes-addons/tasks/main.yml +++ b/contrib/ansible/roles/kubernetes-addons/tasks/main.yml @@ -33,13 +33,10 @@ - name: HACK | copy local kube-addon-update.sh copy: src=kube-addon-update.sh dest={{ kube_script_dir }}/kube-addon-update.sh mode=0755 -- name: Copy script to create known_tokens.csv - copy: src=kube-gen-token.sh dest={{ kube_script_dir }}/kube-gen-token.sh mode=0755 - -- name: Run kube-gen-token script to create {{ kube_config_dir }}/known_tokens.csv +- name: Run kube-gen-token script to create {{ kube_token_dir }}/known_tokens.csv command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item }}" environment: - TOKEN_DIR: "{{ kube_config_dir }}" + TOKEN_DIR: "{{ kube_token_dir }}" with_items: - "system:dns" register: gentoken diff --git a/contrib/ansible/roles/kubernetes-addons/templates/kube-addons.service.j2 b/contrib/ansible/roles/kubernetes-addons/templates/kube-addons.service.j2 index 38db35c8e5a..0bd58b057cd 100644 --- a/contrib/ansible/roles/kubernetes-addons/templates/kube-addons.service.j2 +++ b/contrib/ansible/roles/kubernetes-addons/templates/kube-addons.service.j2 @@ -3,7 +3,7 @@ Description=Kubernetes Addon Object Manager Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] -Environment="TOKEN_DIR={{ kube_config_dir }}" +Environment="TOKEN_DIR={{ kube_token_dir }}" Environment="KUBECTL_BIN=/usr/bin/kubectl" Environment="KUBERNETES_MASTER_NAME={{ groups['masters'][0] }}" ExecStart={{ kube_script_dir }}/kube-addons.sh diff --git a/contrib/ansible/roles/kubernetes/defaults/main.yml b/contrib/ansible/roles/kubernetes/defaults/main.yml index 805b069b476..3a4eedc71cd 100644 --- a/contrib/ansible/roles/kubernetes/defaults/main.yml +++ b/contrib/ansible/roles/kubernetes/defaults/main.yml @@ -14,6 +14,8 @@ kube_config_dir: /etc/kubernetes # This is where all the cert scripts and certs will be located kube_cert_dir: "{{ kube_config_dir }}/certs" +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" # This is the group that the cert creation scripts chgrp the # cert files to. Not really changable... diff --git a/contrib/ansible/roles/kubernetes-addons/files/kube-gen-token.sh b/contrib/ansible/roles/kubernetes/files/kube-gen-token.sh similarity index 91% rename from contrib/ansible/roles/kubernetes-addons/files/kube-gen-token.sh rename to contrib/ansible/roles/kubernetes/files/kube-gen-token.sh index baa950c0129..fa6a5ddc752 100644 --- a/contrib/ansible/roles/kubernetes-addons/files/kube-gen-token.sh +++ b/contrib/ansible/roles/kubernetes/files/kube-gen-token.sh @@ -21,10 +21,11 @@ create_accounts=($@) touch "${token_file}" for account in "${create_accounts[@]}"; do - if grep "${account}" "${token_file}" ; then + if grep ",${account}," "${token_file}" ; then continue fi token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) echo "${token},${account},${account}" >> "${token_file}" + echo "${token}" > "${token_dir}/${account}.token" echo "Added ${account}" done diff --git a/contrib/ansible/roles/kubernetes/tasks/gen_tokens.yml b/contrib/ansible/roles/kubernetes/tasks/gen_tokens.yml new file mode 100644 index 00000000000..fc13e74db66 --- /dev/null +++ b/contrib/ansible/roles/kubernetes/tasks/gen_tokens.yml @@ -0,0 +1,30 @@ +--- +- name: Copy the token gen script + copy: + src=kube-gen-token.sh + dest={{ kube_script_dir }} + mode=u+x + +- name: Generate tokens for master components + command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item }}" + environment: + TOKEN_DIR: "{{ kube_token_dir }}" + with_items: + - "system:controller_manager" + - "system:scheduler" + register: gentoken + changed_when: "'Added' in gentoken.stdout" + notify: + - restart daemons + +- name: Generate tokens for node components + command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item }}" + environment: + TOKEN_DIR: "{{ kube_token_dir }}" + with_items: + - "system:kubelet" + - "system:proxy" + register: gentoken + changed_when: "'Added' in gentoken.stdout" + notify: + - restart daemons diff --git a/contrib/ansible/roles/kubernetes/tasks/main.yml b/contrib/ansible/roles/kubernetes/tasks/main.yml index f1008991833..ce7699f2ac5 100644 --- a/contrib/ansible/roles/kubernetes/tasks/main.yml +++ b/contrib/ansible/roles/kubernetes/tasks/main.yml @@ -18,6 +18,6 @@ notify: - restart daemons -- include: certs.yml +- include: secrets.yml tags: - certs + secrets diff --git a/contrib/ansible/roles/kubernetes/tasks/place_certs.yml b/contrib/ansible/roles/kubernetes/tasks/place_certs.yml deleted file mode 100644 index 2271fd1115a..00000000000 --- a/contrib/ansible/roles/kubernetes/tasks/place_certs.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: place ssh public key on other nodes so apiserver can push certs - authorized_key: user=root key="{{ item }}" state=present - with_file: - - '/tmp/id_rsa.pub' - changed_when: false - -- name: Copy certificates directly from the apiserver to nodes - synchronize: - src={{ kube_cert_dir }}/{{ item }} - dest={{ kube_cert_dir }}/{{ item }} - rsync_timeout=30 - set_remote_user=no - delegate_to: "{{ groups['masters'][0] }}" - with_items: - - "ca.crt" - - "kubecfg.crt" - - "kubecfg.key" - notify: - - restart daemons - -- name: remove ssh public key so apiserver can not push stuff - authorized_key: user=root key="{{ item }}" state=absent - with_file: - - '/tmp/id_rsa.pub' - changed_when: false diff --git a/contrib/ansible/roles/kubernetes/tasks/place_secrets.yml b/contrib/ansible/roles/kubernetes/tasks/place_secrets.yml new file mode 100644 index 00000000000..e832b968b00 --- /dev/null +++ b/contrib/ansible/roles/kubernetes/tasks/place_secrets.yml @@ -0,0 +1,40 @@ +--- +- name: place ssh public key so apiserver can push certs + authorized_key: user=root key="{{ item }}" state=present + with_file: + - '/tmp/id_rsa.pub' + changed_when: false + +- name: Copy certificates directly from the apiserver to nodes + synchronize: src={{ kube_cert_dir }}/{{ item }} dest={{ kube_cert_dir }}/{{ item }} + delegate_to: "{{ groups['masters'][0] }}" + with_items: + - "ca.crt" + notify: + - restart daemons + +- name: Copy master tokens to the masters + synchronize: src={{ kube_token_dir }}/{{ item }} dest={{ kube_token_dir }}/{{ item }} + delegate_to: "{{ groups['masters'][0] }}" + with_items: + - "system:controller_manager.token" + - "system:scheduler.token" + notify: + - restart daemons + when: inventory_hostname in groups['masters'] + +- name: Copy node tokens to the nodes + synchronize: src={{ kube_token_dir }}/{{ item }} dest={{ kube_token_dir }}/{{ item }} + delegate_to: "{{ groups['masters'][0] }}" + with_items: + - "system:kubelet.token" + - "system:proxy.token" + notify: + - restart daemons + when: inventory_hostname in groups['nodes'] + +- name: remove ssh public key so apiserver can not push stuff + authorized_key: user=root key="{{ item }}" state=absent + with_file: + - '/tmp/id_rsa.pub' + changed_when: false diff --git a/contrib/ansible/roles/kubernetes/tasks/certs.yml b/contrib/ansible/roles/kubernetes/tasks/secrets.yml similarity index 74% rename from contrib/ansible/roles/kubernetes/tasks/certs.yml rename to contrib/ansible/roles/kubernetes/tasks/secrets.yml index 338e975f61d..b52f4a179f1 100644 --- a/contrib/ansible/roles/kubernetes/tasks/certs.yml +++ b/contrib/ansible/roles/kubernetes/tasks/secrets.yml @@ -18,14 +18,27 @@ mode=o-rwx group={{ kube_cert_group }} -- name: Install rsync to push certs around +- name: make sure the tokens directory exits + file: + path={{ kube_token_dir }} + state=directory + mode=o-rwx + group={{ kube_cert_group }} + +- include: gen_certs.yml + when: inventory_hostname == groups['masters'][0] + +- include: gen_tokens.yml + when: inventory_hostname == groups['masters'][0] + +- name: Install rsync to push secrets around action: "{{ ansible_pkg_mgr }}" args: name: rsync state: latest when: not is_atomic -- name: Generating RSA key for cert node to push to others +- name: Generating RSA key for master node to push to others user: name=root generate_ssh_key=yes run_once: true delegate_to: "{{ groups['masters'][0] }}" @@ -40,10 +53,7 @@ delegate_to: "{{ groups['masters'][0] }}" changed_when: false -- include: gen_certs.yml - when: inventory_hostname == groups['masters'][0] - -- include: place_certs.yml +- include: place_secrets.yml - name: Delete the downloaded pub key local_action: file path=/tmp/id_rsa.pub state=absent diff --git a/contrib/ansible/roles/master/tasks/main.yml b/contrib/ansible/roles/master/tasks/main.yml index 3bbd9e204e2..a1b4511fcde 100644 --- a/contrib/ansible/roles/master/tasks/main.yml +++ b/contrib/ansible/roles/master/tasks/main.yml @@ -11,39 +11,49 @@ - restart apiserver - name: Ensure that a token auth file exists (addons may populate it) - file: path={{kube_config_dir }}/known_tokens.csv state=touch + file: path={{kube_token_dir }}/known_tokens.csv state=touch changed_when: false +- name: add cap_net_bind_service to kube-apiserver + capabilities: path=/usr/bin/kube-apiserver capability=cap_net_bind_service=ep state=present + when: not is_atomic + +- name: Enable apiserver + service: name=kube-apiserver enabled=yes state=started + - name: write the config file for the controller-manager template: src=controller-manager.j2 dest={{ kube_config_dir }}/controller-manager notify: - restart controller-manager -- name: write the config file for the scheduler - template: src=scheduler.j2 dest={{ kube_config_dir }}/scheduler - notify: - - restart scheduler - -- name: add cap_net_bind_service to kube-apiserver - capabilities: path=/usr/bin/kube-apiserver capability=cap_net_bind_service=ep state=present - when: not is_atomic +- name: Get the controller-manager token value + slurp: + src: "{{ kube_token_dir }}/system:controller_manager.token" + register: controller_manager_token - name: write the kubecfg (auth) file for controller-manager template: src=controller-manager.kubeconfig.j2 dest={{ kube_config_dir }}/controller-manager.kubeconfig notify: - restart controller-manager +- name: Enable controller-manager + service: name=kube-controller-manager enabled=yes state=started + +- name: write the config file for the scheduler + template: src=scheduler.j2 dest={{ kube_config_dir }}/scheduler + notify: + - restart scheduler + +- name: Get the scheduler token value + slurp: + src: "{{ kube_token_dir }}/system:scheduler.token" + register: scheduler_token + - name: write the kubecfg (auth) file for scheduler template: src=scheduler.kubeconfig.j2 dest={{ kube_config_dir }}/scheduler.kubeconfig notify: - restart scheduler -- name: Enable apiserver - service: name=kube-apiserver enabled=yes state=started - -- name: Enable controller-manager - service: name=kube-controller-manager enabled=yes state=started - - name: Enable scheduler service: name=kube-scheduler enabled=yes state=started diff --git a/contrib/ansible/roles/master/templates/apiserver.j2 b/contrib/ansible/roles/master/templates/apiserver.j2 index c0787286da7..03252ba31b6 100644 --- a/contrib/ansible/roles/master/templates/apiserver.j2 +++ b/contrib/ansible/roles/master/templates/apiserver.j2 @@ -23,4 +23,4 @@ KUBE_ETCD_SERVERS="--etcd_servers={% for node in groups['etcd'] %}http://{{ node KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" # Add your own! -KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.cert --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt --token_auth_file={{ kube_config_dir }}/known_tokens.csv" +KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.cert --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt --token_auth_file={{ kube_token_dir }}/known_tokens.csv" diff --git a/contrib/ansible/roles/master/templates/controller-manager.kubeconfig.j2 b/contrib/ansible/roles/master/templates/controller-manager.kubeconfig.j2 index adf812936b1..d36522091c6 100644 --- a/contrib/ansible/roles/master/templates/controller-manager.kubeconfig.j2 +++ b/contrib/ansible/roles/master/templates/controller-manager.kubeconfig.j2 @@ -1,19 +1,18 @@ apiVersion: v1 +kind: Config +current-context: controller-manager-to-{{ cluster_name }} +preferences: {} clusters: - cluster: certificate-authority: {{ kube_cert_dir }}/ca.crt - server: http://{{ groups['masters'][0] }}:443 + server: https://{{ groups['masters'][0] }}:443 name: {{ cluster_name }} contexts: - context: cluster: {{ cluster_name }} - user: kubelet - name: kubelet-to-{{ cluster_name }} -current-context: kubelet-to-{{ cluster_name }} -kind: Config -preferences: {} + user: controller-manager + name: controller-manager-to-{{ cluster_name }} users: -- name: kubelet +- name: controller-manager user: - client-certificate: {{ kube_cert_dir }}/kubecfg.crt - client-key: {{ kube_cert_dir }}/kubecfg.key + token: {{ controller_manager_token.content|b64decode }} diff --git a/contrib/ansible/roles/master/templates/scheduler.kubeconfig.j2 b/contrib/ansible/roles/master/templates/scheduler.kubeconfig.j2 index adf812936b1..d8031f761cb 100644 --- a/contrib/ansible/roles/master/templates/scheduler.kubeconfig.j2 +++ b/contrib/ansible/roles/master/templates/scheduler.kubeconfig.j2 @@ -1,19 +1,18 @@ apiVersion: v1 +kind: Config +current-context: scheduler-to-{{ cluster_name }} +preferences: {} clusters: - cluster: certificate-authority: {{ kube_cert_dir }}/ca.crt - server: http://{{ groups['masters'][0] }}:443 + server: https://{{ groups['masters'][0] }}:443 name: {{ cluster_name }} contexts: - context: cluster: {{ cluster_name }} - user: kubelet - name: kubelet-to-{{ cluster_name }} -current-context: kubelet-to-{{ cluster_name }} -kind: Config -preferences: {} + user: scheduler + name: scheduler-to-{{ cluster_name }} users: -- name: kubelet +- name: scheduler user: - client-certificate: {{ kube_cert_dir }}/kubecfg.crt - client-key: {{ kube_cert_dir }}/kubecfg.key + token: {{ scheduler_token.content|b64decode }} diff --git a/contrib/ansible/roles/node/tasks/main.yml b/contrib/ansible/roles/node/tasks/main.yml index 08c815878a6..f23bf1787fd 100644 --- a/contrib/ansible/roles/node/tasks/main.yml +++ b/contrib/ansible/roles/node/tasks/main.yml @@ -19,24 +19,34 @@ notify: - restart kubelet -- name: write the config files for proxy - template: src=proxy.j2 dest={{ kube_config_dir }}/proxy - notify: - - restart proxy +- name: Get the kubelet token value + slurp: + src: "{{ kube_token_dir }}/system:kubelet.token" + register: kubelet_token - name: write the kubecfg (auth) file for kubelet template: src=kubelet.kubeconfig.j2 dest={{ kube_config_dir }}/kubelet.kubeconfig notify: - restart kubelet +- name: Enable kubelet + service: name=kubelet enabled=yes state=started + +- name: write the config files for proxy + template: src=proxy.j2 dest={{ kube_config_dir }}/proxy + notify: + - restart proxy + +- name: Get the proxy token value + slurp: + src: "{{ kube_token_dir }}/system:proxy.token" + register: proxy_token + - name: write the kubecfg (auth) file for kube-proxy template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig notify: - restart proxy -- name: Enable kubelet - service: name=kubelet enabled=yes state=started - - name: Enable proxy service: name=kube-proxy enabled=yes state=started diff --git a/contrib/ansible/roles/node/templates/kubelet.kubeconfig.j2 b/contrib/ansible/roles/node/templates/kubelet.kubeconfig.j2 index adf812936b1..1c15a436542 100644 --- a/contrib/ansible/roles/node/templates/kubelet.kubeconfig.j2 +++ b/contrib/ansible/roles/node/templates/kubelet.kubeconfig.j2 @@ -1,19 +1,18 @@ apiVersion: v1 +kind: Config +current-context: kubelet-to-{{ cluster_name }} +preferences: {} clusters: - cluster: certificate-authority: {{ kube_cert_dir }}/ca.crt - server: http://{{ groups['masters'][0] }}:443 + server: https://{{ groups['masters'][0] }}:443 name: {{ cluster_name }} contexts: - context: cluster: {{ cluster_name }} user: kubelet name: kubelet-to-{{ cluster_name }} -current-context: kubelet-to-{{ cluster_name }} -kind: Config -preferences: {} users: - name: kubelet user: - client-certificate: {{ kube_cert_dir }}/kubecfg.crt - client-key: {{ kube_cert_dir }}/kubecfg.key + token: {{ kubelet_token.content|b64decode }} diff --git a/contrib/ansible/roles/node/templates/proxy.kubeconfig.j2 b/contrib/ansible/roles/node/templates/proxy.kubeconfig.j2 index adf812936b1..35018bea3f0 100644 --- a/contrib/ansible/roles/node/templates/proxy.kubeconfig.j2 +++ b/contrib/ansible/roles/node/templates/proxy.kubeconfig.j2 @@ -1,19 +1,18 @@ apiVersion: v1 -clusters: -- cluster: - certificate-authority: {{ kube_cert_dir }}/ca.crt - server: http://{{ groups['masters'][0] }}:443 - name: {{ cluster_name }} +kind: Config +current-context: proxy-to-{{ cluster_name }} +preferences: {} contexts: - context: cluster: {{ cluster_name }} - user: kubelet - name: kubelet-to-{{ cluster_name }} -current-context: kubelet-to-{{ cluster_name }} -kind: Config -preferences: {} + user: proxy + name: proxy-to-{{ cluster_name }} +clusters: +- cluster: + certificate-authority: {{ kube_cert_dir }}/ca.crt + server: https://{{ groups['masters'][0] }}:443 + name: {{ cluster_name }} users: -- name: kubelet +- name: proxy user: - client-certificate: {{ kube_cert_dir }}/kubecfg.crt - client-key: {{ kube_cert_dir }}/kubecfg.key + token: {{ proxy_token.content|b64decode }} From c6f284183947e70a81a4914639129ac5096fe0cf Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 25 Jun 2015 18:35:41 -0400 Subject: [PATCH 2/8] Place a different token for every node/daemon combination We can now revoke one token at a time! --- .../roles/kubernetes/tasks/gen_tokens.yml | 8 +++---- .../roles/kubernetes/tasks/place_secrets.yml | 10 -------- contrib/ansible/roles/node/tasks/main.yml | 24 +++++++++++-------- .../node/templates/kubelet.kubeconfig.j2 | 2 +- .../roles/node/templates/proxy.kubeconfig.j2 | 2 +- 5 files changed, 20 insertions(+), 26 deletions(-) diff --git a/contrib/ansible/roles/kubernetes/tasks/gen_tokens.yml b/contrib/ansible/roles/kubernetes/tasks/gen_tokens.yml index fc13e74db66..2920b46ef8c 100644 --- a/contrib/ansible/roles/kubernetes/tasks/gen_tokens.yml +++ b/contrib/ansible/roles/kubernetes/tasks/gen_tokens.yml @@ -18,12 +18,12 @@ - restart daemons - name: Generate tokens for node components - command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item }}" + command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}" environment: TOKEN_DIR: "{{ kube_token_dir }}" - with_items: - - "system:kubelet" - - "system:proxy" + with_nested: + - [ 'system:kubelet', 'system:proxy' ] + - "{{ groups['nodes'] }}" register: gentoken changed_when: "'Added' in gentoken.stdout" notify: diff --git a/contrib/ansible/roles/kubernetes/tasks/place_secrets.yml b/contrib/ansible/roles/kubernetes/tasks/place_secrets.yml index e832b968b00..61a53c3d8d5 100644 --- a/contrib/ansible/roles/kubernetes/tasks/place_secrets.yml +++ b/contrib/ansible/roles/kubernetes/tasks/place_secrets.yml @@ -23,16 +23,6 @@ - restart daemons when: inventory_hostname in groups['masters'] -- name: Copy node tokens to the nodes - synchronize: src={{ kube_token_dir }}/{{ item }} dest={{ kube_token_dir }}/{{ item }} - delegate_to: "{{ groups['masters'][0] }}" - with_items: - - "system:kubelet.token" - - "system:proxy.token" - notify: - - restart daemons - when: inventory_hostname in groups['nodes'] - - name: remove ssh public key so apiserver can not push stuff authorized_key: user=root key="{{ item }}" state=absent with_file: diff --git a/contrib/ansible/roles/node/tasks/main.yml b/contrib/ansible/roles/node/tasks/main.yml index f23bf1787fd..1d74a2821cf 100644 --- a/contrib/ansible/roles/node/tasks/main.yml +++ b/contrib/ansible/roles/node/tasks/main.yml @@ -14,16 +14,25 @@ - include: centos.yml when: not is_atomic and ansible_distribution == "CentOS" +- name: Get the node token values + slurp: + src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token" + with_items: + - "system:kubelet" + - "system:proxy" + register: tokens + delegate_to: "{{ groups['masters'][0] }}" + +- name: Set token facts + set_fact: + kubelet_token: "{{ tokens.results[0].content|b64decode }}" + proxy_token: "{{ tokens.results[1].content|b64decode }}" + - name: write the config files for kubelet template: src=kubelet.j2 dest={{ kube_config_dir }}/kubelet notify: - restart kubelet -- name: Get the kubelet token value - slurp: - src: "{{ kube_token_dir }}/system:kubelet.token" - register: kubelet_token - - name: write the kubecfg (auth) file for kubelet template: src=kubelet.kubeconfig.j2 dest={{ kube_config_dir }}/kubelet.kubeconfig notify: @@ -37,11 +46,6 @@ notify: - restart proxy -- name: Get the proxy token value - slurp: - src: "{{ kube_token_dir }}/system:proxy.token" - register: proxy_token - - name: write the kubecfg (auth) file for kube-proxy template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig notify: diff --git a/contrib/ansible/roles/node/templates/kubelet.kubeconfig.j2 b/contrib/ansible/roles/node/templates/kubelet.kubeconfig.j2 index 1c15a436542..b9c22fa63f5 100644 --- a/contrib/ansible/roles/node/templates/kubelet.kubeconfig.j2 +++ b/contrib/ansible/roles/node/templates/kubelet.kubeconfig.j2 @@ -15,4 +15,4 @@ contexts: users: - name: kubelet user: - token: {{ kubelet_token.content|b64decode }} + token: {{ kubelet_token }} diff --git a/contrib/ansible/roles/node/templates/proxy.kubeconfig.j2 b/contrib/ansible/roles/node/templates/proxy.kubeconfig.j2 index 35018bea3f0..f5d109816a5 100644 --- a/contrib/ansible/roles/node/templates/proxy.kubeconfig.j2 +++ b/contrib/ansible/roles/node/templates/proxy.kubeconfig.j2 @@ -15,4 +15,4 @@ clusters: users: - name: proxy user: - token: {{ proxy_token.content|b64decode }} + token: {{ proxy_token }} From 9f4bfd144fed848f2df9d3b78daafd31915f3234 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 25 Jun 2015 19:18:29 -0400 Subject: [PATCH 3/8] Per master tokens for the scheduler and controller-manager --- .../roles/kubernetes/tasks/gen_tokens.yml | 8 +++---- .../roles/kubernetes/tasks/place_secrets.yml | 10 -------- contrib/ansible/roles/master/tasks/main.yml | 24 +++++++++++-------- .../controller-manager.kubeconfig.j2 | 2 +- .../master/templates/scheduler.kubeconfig.j2 | 2 +- 5 files changed, 20 insertions(+), 26 deletions(-) diff --git a/contrib/ansible/roles/kubernetes/tasks/gen_tokens.yml b/contrib/ansible/roles/kubernetes/tasks/gen_tokens.yml index 2920b46ef8c..ad11be10a27 100644 --- a/contrib/ansible/roles/kubernetes/tasks/gen_tokens.yml +++ b/contrib/ansible/roles/kubernetes/tasks/gen_tokens.yml @@ -6,12 +6,12 @@ mode=u+x - name: Generate tokens for master components - command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item }}" + command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}" environment: TOKEN_DIR: "{{ kube_token_dir }}" - with_items: - - "system:controller_manager" - - "system:scheduler" + with_nested: + - [ "system:controller_manager", "system:scheduler" ] + - "{{ groups['masters'] }}" register: gentoken changed_when: "'Added' in gentoken.stdout" notify: diff --git a/contrib/ansible/roles/kubernetes/tasks/place_secrets.yml b/contrib/ansible/roles/kubernetes/tasks/place_secrets.yml index 61a53c3d8d5..7cde9821544 100644 --- a/contrib/ansible/roles/kubernetes/tasks/place_secrets.yml +++ b/contrib/ansible/roles/kubernetes/tasks/place_secrets.yml @@ -13,16 +13,6 @@ notify: - restart daemons -- name: Copy master tokens to the masters - synchronize: src={{ kube_token_dir }}/{{ item }} dest={{ kube_token_dir }}/{{ item }} - delegate_to: "{{ groups['masters'][0] }}" - with_items: - - "system:controller_manager.token" - - "system:scheduler.token" - notify: - - restart daemons - when: inventory_hostname in groups['masters'] - - name: remove ssh public key so apiserver can not push stuff authorized_key: user=root key="{{ item }}" state=absent with_file: diff --git a/contrib/ansible/roles/master/tasks/main.yml b/contrib/ansible/roles/master/tasks/main.yml index a1b4511fcde..81ca16aefe0 100644 --- a/contrib/ansible/roles/master/tasks/main.yml +++ b/contrib/ansible/roles/master/tasks/main.yml @@ -21,16 +21,25 @@ - name: Enable apiserver service: name=kube-apiserver enabled=yes state=started +- name: Get the node token values + slurp: + src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token" + with_items: + - "system:controller_manager" + - "system:scheduler" + register: tokens + delegate_to: "{{ groups['masters'][0] }}" + +- name: Set token facts + set_fact: + controller_manager_token: "{{ tokens.results[0].content|b64decode }}" + scheduler_token: "{{ tokens.results[1].content|b64decode }}" + - name: write the config file for the controller-manager template: src=controller-manager.j2 dest={{ kube_config_dir }}/controller-manager notify: - restart controller-manager -- name: Get the controller-manager token value - slurp: - src: "{{ kube_token_dir }}/system:controller_manager.token" - register: controller_manager_token - - name: write the kubecfg (auth) file for controller-manager template: src=controller-manager.kubeconfig.j2 dest={{ kube_config_dir }}/controller-manager.kubeconfig notify: @@ -44,11 +53,6 @@ notify: - restart scheduler -- name: Get the scheduler token value - slurp: - src: "{{ kube_token_dir }}/system:scheduler.token" - register: scheduler_token - - name: write the kubecfg (auth) file for scheduler template: src=scheduler.kubeconfig.j2 dest={{ kube_config_dir }}/scheduler.kubeconfig notify: diff --git a/contrib/ansible/roles/master/templates/controller-manager.kubeconfig.j2 b/contrib/ansible/roles/master/templates/controller-manager.kubeconfig.j2 index d36522091c6..96703b5ed5b 100644 --- a/contrib/ansible/roles/master/templates/controller-manager.kubeconfig.j2 +++ b/contrib/ansible/roles/master/templates/controller-manager.kubeconfig.j2 @@ -15,4 +15,4 @@ contexts: users: - name: controller-manager user: - token: {{ controller_manager_token.content|b64decode }} + token: {{ controller_manager_token }} diff --git a/contrib/ansible/roles/master/templates/scheduler.kubeconfig.j2 b/contrib/ansible/roles/master/templates/scheduler.kubeconfig.j2 index d8031f761cb..300783dd3e2 100644 --- a/contrib/ansible/roles/master/templates/scheduler.kubeconfig.j2 +++ b/contrib/ansible/roles/master/templates/scheduler.kubeconfig.j2 @@ -15,4 +15,4 @@ contexts: users: - name: scheduler user: - token: {{ scheduler_token.content|b64decode }} + token: {{ scheduler_token }} From a127ce7266844b2e469cfe90af50d2a6615953cb Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 25 Jun 2015 19:44:48 -0400 Subject: [PATCH 4/8] Use slurp to get the ca.crt --- .../roles/kubernetes/tasks/place_secrets.yml | 20 --------- .../roles/kubernetes/tasks/secrets.yml | 42 ++++++------------- 2 files changed, 12 insertions(+), 50 deletions(-) delete mode 100644 contrib/ansible/roles/kubernetes/tasks/place_secrets.yml diff --git a/contrib/ansible/roles/kubernetes/tasks/place_secrets.yml b/contrib/ansible/roles/kubernetes/tasks/place_secrets.yml deleted file mode 100644 index 7cde9821544..00000000000 --- a/contrib/ansible/roles/kubernetes/tasks/place_secrets.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- name: place ssh public key so apiserver can push certs - authorized_key: user=root key="{{ item }}" state=present - with_file: - - '/tmp/id_rsa.pub' - changed_when: false - -- name: Copy certificates directly from the apiserver to nodes - synchronize: src={{ kube_cert_dir }}/{{ item }} dest={{ kube_cert_dir }}/{{ item }} - delegate_to: "{{ groups['masters'][0] }}" - with_items: - - "ca.crt" - notify: - - restart daemons - -- name: remove ssh public key so apiserver can not push stuff - authorized_key: user=root key="{{ item }}" state=absent - with_file: - - '/tmp/id_rsa.pub' - changed_when: false diff --git a/contrib/ansible/roles/kubernetes/tasks/secrets.yml b/contrib/ansible/roles/kubernetes/tasks/secrets.yml index b52f4a179f1..3778bf89490 100644 --- a/contrib/ansible/roles/kubernetes/tasks/secrets.yml +++ b/contrib/ansible/roles/kubernetes/tasks/secrets.yml @@ -28,35 +28,17 @@ - include: gen_certs.yml when: inventory_hostname == groups['masters'][0] +- name: Read back the CA certificate + slurp: + src: "{{ kube_cert_dir }}/ca.crt" + register: ca_cert + run_once: true + delegate_to: "{{ groups['masters'][0] }}" + +- name: Place CA certificate everywhere + copy: content="{{ ca_cert.content|b64decode }}" dest="{{ kube_cert_dir }}/ca.crt" + notify: + - restart daemons + - include: gen_tokens.yml when: inventory_hostname == groups['masters'][0] - -- name: Install rsync to push secrets around - action: "{{ ansible_pkg_mgr }}" - args: - name: rsync - state: latest - when: not is_atomic - -- name: Generating RSA key for master node to push to others - user: name=root generate_ssh_key=yes - run_once: true - delegate_to: "{{ groups['masters'][0] }}" - -- name: Downloading pub key - fetch: - src=/root/.ssh/id_rsa.pub - dest=/tmp/id_rsa.pub - flat=yes - fail_on_missing=true - run_once: true - delegate_to: "{{ groups['masters'][0] }}" - changed_when: false - -- include: place_secrets.yml - -- name: Delete the downloaded pub key - local_action: file path=/tmp/id_rsa.pub state=absent - sudo: false - run_once: true - changed_when: false From 7d7d5d4c4e788328debc4c8f0eb95df3cd39ead2 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 25 Jun 2015 20:20:53 -0400 Subject: [PATCH 5/8] Add influxdb cluster monitoring --- contrib/ansible/group_vars/all.yml | 3 +++ .../tasks/cluster-monitoring.yml | 15 +++++++++++++++ .../roles/kubernetes-addons/tasks/main.yml | 5 ++++- 3 files changed, 22 insertions(+), 1 deletion(-) create mode 100644 contrib/ansible/roles/kubernetes-addons/tasks/cluster-monitoring.yml diff --git a/contrib/ansible/group_vars/all.yml b/contrib/ansible/group_vars/all.yml index 413c4d22414..9e8b65d5ef4 100644 --- a/contrib/ansible/group_vars/all.yml +++ b/contrib/ansible/group_vars/all.yml @@ -43,6 +43,9 @@ flannel_prefix: 12 # room for 4096 nodes with 254 pods per node. flannel_host_prefix: 24 +# Turn to false to disable cluster monitoring with heapster and influxdb +cluster_monitoring: true + # Turn this varable to 'false' to disable whole DNS configuration. dns_setup: true # How many replicas in the Replication Controller diff --git a/contrib/ansible/roles/kubernetes-addons/tasks/cluster-monitoring.yml b/contrib/ansible/roles/kubernetes-addons/tasks/cluster-monitoring.yml new file mode 100644 index 00000000000..71120c7618e --- /dev/null +++ b/contrib/ansible/roles/kubernetes-addons/tasks/cluster-monitoring.yml @@ -0,0 +1,15 @@ +--- +- name: MONITORING | Assures {{ kube_config_dir }}/addons/cluster-monitoring dir exists + file: path={{ kube_config_dir }}/addons/cluster-monitoring state=directory + +- name: MONITORING | Download monitoring files from Kubernetes repo + get_url: + url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/addons/cluster-monitoring/influxdb/{{ item }} + dest="{{ kube_config_dir }}/addons/cluster-monitoring/" + force=yes + with_items: + - grafana-service.yaml + - heapster-controller.yaml + - heapster-service.yaml + - influxdb-grafana-controller.yaml + - influxdb-service.yaml diff --git a/contrib/ansible/roles/kubernetes-addons/tasks/main.yml b/contrib/ansible/roles/kubernetes-addons/tasks/main.yml index 67802394f35..a319594f865 100644 --- a/contrib/ansible/roles/kubernetes-addons/tasks/main.yml +++ b/contrib/ansible/roles/kubernetes-addons/tasks/main.yml @@ -13,7 +13,9 @@ - include: dns.yml when: dns_setup - tags: dns + +- include: cluster-monitoring.yml + when: cluster_monitoring #- name: Get kube-addons script from Kubernetes # get_url: @@ -39,6 +41,7 @@ TOKEN_DIR: "{{ kube_token_dir }}" with_items: - "system:dns" + - "system:monitoring" register: gentoken changed_when: "'Added' in gentoken.stdout" notify: From 3a78104267497f25b783e30d4b782b14b1f78e53 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 25 Jun 2015 22:47:21 -0400 Subject: [PATCH 6/8] Add cluster logging with fluentd and elastic search --- contrib/ansible/group_vars/all.yml | 3 +++ .../kubernetes-addons/tasks/cluster-logging.yml | 14 ++++++++++++++ .../ansible/roles/kubernetes-addons/tasks/main.yml | 4 ++++ contrib/ansible/roles/kubernetes/defaults/main.yml | 4 ++++ contrib/ansible/roles/node/tasks/main.yml | 10 ++++++++++ contrib/ansible/roles/node/templates/kubelet.j2 | 4 ++-- 6 files changed, 37 insertions(+), 2 deletions(-) create mode 100644 contrib/ansible/roles/kubernetes-addons/tasks/cluster-logging.yml diff --git a/contrib/ansible/group_vars/all.yml b/contrib/ansible/group_vars/all.yml index 9e8b65d5ef4..19faa9b1b2c 100644 --- a/contrib/ansible/group_vars/all.yml +++ b/contrib/ansible/group_vars/all.yml @@ -43,6 +43,9 @@ flannel_prefix: 12 # room for 4096 nodes with 254 pods per node. flannel_host_prefix: 24 +# Set to false to disable logging with elasticsearch +cluster_logging: true + # Turn to false to disable cluster monitoring with heapster and influxdb cluster_monitoring: true diff --git a/contrib/ansible/roles/kubernetes-addons/tasks/cluster-logging.yml b/contrib/ansible/roles/kubernetes-addons/tasks/cluster-logging.yml new file mode 100644 index 00000000000..3cc9c0951bc --- /dev/null +++ b/contrib/ansible/roles/kubernetes-addons/tasks/cluster-logging.yml @@ -0,0 +1,14 @@ +--- +- name: LOGGING | Assures {{ kube_config_dir }}/addons/cluster-logging dir exists + file: path={{ kube_config_dir }}/addons/cluster-logging state=directory + +- name: LOGGING | Download logging files from Kubernetes repo + get_url: + url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/addons/fluentd-elasticsearch/{{ item }} + dest="{{ kube_config_dir }}/addons/cluster-logging/" + force=yes + with_items: + - es-controller.yaml + - es-service.yaml + - kibana-controller.yaml + - kibana-service.yaml diff --git a/contrib/ansible/roles/kubernetes-addons/tasks/main.yml b/contrib/ansible/roles/kubernetes-addons/tasks/main.yml index a319594f865..775edd9669a 100644 --- a/contrib/ansible/roles/kubernetes-addons/tasks/main.yml +++ b/contrib/ansible/roles/kubernetes-addons/tasks/main.yml @@ -17,6 +17,9 @@ - include: cluster-monitoring.yml when: cluster_monitoring +- include: cluster-logging.yml + when: cluster_logging + #- name: Get kube-addons script from Kubernetes # get_url: # url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/kube-addons/kube-addons.sh @@ -42,6 +45,7 @@ with_items: - "system:dns" - "system:monitoring" + - "system:logging" register: gentoken changed_when: "'Added' in gentoken.stdout" notify: diff --git a/contrib/ansible/roles/kubernetes/defaults/main.yml b/contrib/ansible/roles/kubernetes/defaults/main.yml index 3a4eedc71cd..32965747b90 100644 --- a/contrib/ansible/roles/kubernetes/defaults/main.yml +++ b/contrib/ansible/roles/kubernetes/defaults/main.yml @@ -17,6 +17,10 @@ kube_cert_dir: "{{ kube_config_dir }}/certs" # This is where all of the bearer tokens will be stored kube_token_dir: "{{ kube_config_dir }}/tokens" +# This is where you can drop yaml/json files and the kubelet will run those +# pods on startup +kube_manifest_dir: "{{ kube_config_dir }}/manifests" + # This is the group that the cert creation scripts chgrp the # cert files to. Not really changable... kube_cert_group: kube-cert diff --git a/contrib/ansible/roles/node/tasks/main.yml b/contrib/ansible/roles/node/tasks/main.yml index 1d74a2821cf..1c88a864466 100644 --- a/contrib/ansible/roles/node/tasks/main.yml +++ b/contrib/ansible/roles/node/tasks/main.yml @@ -14,6 +14,16 @@ - include: centos.yml when: not is_atomic and ansible_distribution == "CentOS" +- name: Make sure manifest directory exists + file: path={{ kube_manifest_dir }} state=directory + +- name: Install fluentd pod into each node + get_url: + url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/fluentd-es/fluentd-es.yaml + dest="{{ kube_manifest_dir }}" + force=yes + when: cluster_logging + - name: Get the node token values slurp: src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token" diff --git a/contrib/ansible/roles/node/templates/kubelet.j2 b/contrib/ansible/roles/node/templates/kubelet.j2 index 953e5e05fbc..f880f4201ae 100644 --- a/contrib/ansible/roles/node/templates/kubelet.j2 +++ b/contrib/ansible/roles/node/templates/kubelet.j2 @@ -15,7 +15,7 @@ KUBELET_API_SERVER="--api_servers=https://{{ groups['masters'][0]}}:443" # Add your own! {% if dns_setup %} -KUBELET_ARGS="--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig" +KUBELET_ARGS="--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}" {% else %} -KUBELET_ARGS="--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig" +KUBELET_ARGS="--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}" {% endif %} From 3cacc42985cca7080ed547e1a7654c81584b0123 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Fri, 26 Jun 2015 18:06:33 -0400 Subject: [PATCH 7/8] Hide DNS variables away from the user Fewer knobs == more winning. Also rename from kube.local to cluster.local. Some e2e tests really want that. --- contrib/ansible/group_vars/all.yml | 23 ++++++------------- .../roles/kubernetes/defaults/main.yml | 13 +++++++++++ 2 files changed, 20 insertions(+), 16 deletions(-) diff --git a/contrib/ansible/group_vars/all.yml b/contrib/ansible/group_vars/all.yml index 19faa9b1b2c..da6b88e1dcd 100644 --- a/contrib/ansible/group_vars/all.yml +++ b/contrib/ansible/group_vars/all.yml @@ -1,6 +1,7 @@ -# Only used for the location to store flannel info in etcd, but may be used -# for dns purposes and cluster id purposes in the future. -cluster_name: kube.local +# will be used as the Internal dns domain name if DNS is enabled. Services +# will be discoverable under ..svc., e.g. +# myservice.default.svc.cluster.local +cluster_name: cluster.local # Account name of remote user. Ansible will use this user account to ssh into # the managed machines. The user must be able to use sudo without asking @@ -54,16 +55,6 @@ dns_setup: true # How many replicas in the Replication Controller dns_replicas: 1 -# Internal DNS domain name. -# This domain must not be used in your network. Services will be discoverable -# under .., e.g. -# myservice.default.kube.local -dns_domain: kube.local - -# IP address of the DNS server. -# Kubernetes will create a pod with several containers, serving as the DNS -# server and expose it under this IP address. The IP address must be from -# the range specified as kube_service_addresses above. -# And this is the IP address you should use as address of the DNS server -# in your containers. -dns_server: 10.254.0.10 +# There are other variable in roles/kubernetes/defaults/main.yml but changing +# them comes with a much higher risk to your cluster. So proceed over there +# with caution. diff --git a/contrib/ansible/roles/kubernetes/defaults/main.yml b/contrib/ansible/roles/kubernetes/defaults/main.yml index 32965747b90..77d89be2116 100644 --- a/contrib/ansible/roles/kubernetes/defaults/main.yml +++ b/contrib/ansible/roles/kubernetes/defaults/main.yml @@ -24,3 +24,16 @@ kube_manifest_dir: "{{ kube_config_dir }}/manifests" # This is the group that the cert creation scripts chgrp the # cert files to. Not really changable... kube_cert_group: kube-cert + +# Internal DNS domain name. +# This domain must not be used in your network. Services will be discoverable +# under .., e.g. +# myservice.default.cluster.local +dns_domain: "{{ cluster_name }}" + +# IP address of the DNS server. +# Kubernetes will create a pod with several containers, serving as the DNS +# server and expose it under this IP address. The IP address must be from +# the range specified as kube_service_addresses. This magic will actually +# pick the 10th ip address in the kube_service_addresses range and use that. +dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(10)|ipaddr('address') }}" From ddf52a854e18f3cd2b12b1afea3ed976b05f084c Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Tue, 30 Jun 2015 13:03:10 -0400 Subject: [PATCH 8/8] Make sure serviceaccount tokens are created and recognized --- contrib/ansible/roles/master/templates/apiserver.j2 | 2 +- contrib/ansible/roles/master/templates/controller-manager.j2 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/ansible/roles/master/templates/apiserver.j2 b/contrib/ansible/roles/master/templates/apiserver.j2 index 03252ba31b6..bf691cacaf5 100644 --- a/contrib/ansible/roles/master/templates/apiserver.j2 +++ b/contrib/ansible/roles/master/templates/apiserver.j2 @@ -23,4 +23,4 @@ KUBE_ETCD_SERVERS="--etcd_servers={% for node in groups['etcd'] %}http://{{ node KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" # Add your own! -KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.cert --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt --token_auth_file={{ kube_token_dir }}/known_tokens.csv" +KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.cert --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt --token_auth_file={{ kube_token_dir }}/known_tokens.csv --service_account_key_file={{ kube_cert_dir }}/server.cert" diff --git a/contrib/ansible/roles/master/templates/controller-manager.j2 b/contrib/ansible/roles/master/templates/controller-manager.j2 index 9d9d259257b..389a60fa0e3 100644 --- a/contrib/ansible/roles/master/templates/controller-manager.j2 +++ b/contrib/ansible/roles/master/templates/controller-manager.j2 @@ -4,4 +4,4 @@ # defaults from config and apiserver should be adequate # Add your own! -KUBE_CONTROLLER_MANAGER_ARGS="--kubeconfig={{ kube_config_dir }}/controller-manager.kubeconfig" +KUBE_CONTROLLER_MANAGER_ARGS="--kubeconfig={{ kube_config_dir }}/controller-manager.kubeconfig --service_account_private_key_file={{ kube_cert_dir }}/server.key --root_ca_file={{ kube_cert_dir }}/ca.crt"