mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 15:25:57 +00:00
Merge pull request #10544 from eparis/total-ansible
Ansible improvements to support more addons and better cert/token handling
This commit is contained in:
commit
f41c0d0202
@ -1,6 +1,7 @@
|
||||
# Only used for the location to store flannel info in etcd, but may be used
|
||||
# for dns purposes and cluster id purposes in the future.
|
||||
cluster_name: kube.local
|
||||
# will be used as the Internal dns domain name if DNS is enabled. Services
|
||||
# will be discoverable under <service-name>.<namespace>.svc.<domainname>, e.g.
|
||||
# myservice.default.svc.cluster.local
|
||||
cluster_name: cluster.local
|
||||
|
||||
# Account name of remote user. Ansible will use this user account to ssh into
|
||||
# the managed machines. The user must be able to use sudo without asking
|
||||
@ -43,21 +44,17 @@ flannel_prefix: 12
|
||||
# room for 4096 nodes with 254 pods per node.
|
||||
flannel_host_prefix: 24
|
||||
|
||||
# Set to false to disable logging with elasticsearch
|
||||
cluster_logging: true
|
||||
|
||||
# Turn to false to disable cluster monitoring with heapster and influxdb
|
||||
cluster_monitoring: true
|
||||
|
||||
# Turn this varable to 'false' to disable whole DNS configuration.
|
||||
dns_setup: true
|
||||
# How many replicas in the Replication Controller
|
||||
dns_replicas: 1
|
||||
|
||||
# Internal DNS domain name.
|
||||
# This domain must not be used in your network. Services will be discoverable
|
||||
# under <service-name>.<namespace>.<domainname>, e.g.
|
||||
# myservice.default.kube.local
|
||||
dns_domain: kube.local
|
||||
|
||||
# IP address of the DNS server.
|
||||
# Kubernetes will create a pod with several containers, serving as the DNS
|
||||
# server and expose it under this IP address. The IP address must be from
|
||||
# the range specified as kube_service_addresses above.
|
||||
# And this is the IP address you should use as address of the DNS server
|
||||
# in your containers.
|
||||
dns_server: 10.254.0.10
|
||||
# There are other variable in roles/kubernetes/defaults/main.yml but changing
|
||||
# them comes with a much higher risk to your cluster. So proceed over there
|
||||
# with caution.
|
||||
|
@ -0,0 +1,14 @@
|
||||
---
|
||||
- name: LOGGING | Assures {{ kube_config_dir }}/addons/cluster-logging dir exists
|
||||
file: path={{ kube_config_dir }}/addons/cluster-logging state=directory
|
||||
|
||||
- name: LOGGING | Download logging files from Kubernetes repo
|
||||
get_url:
|
||||
url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/addons/fluentd-elasticsearch/{{ item }}
|
||||
dest="{{ kube_config_dir }}/addons/cluster-logging/"
|
||||
force=yes
|
||||
with_items:
|
||||
- es-controller.yaml
|
||||
- es-service.yaml
|
||||
- kibana-controller.yaml
|
||||
- kibana-service.yaml
|
@ -0,0 +1,15 @@
|
||||
---
|
||||
- name: MONITORING | Assures {{ kube_config_dir }}/addons/cluster-monitoring dir exists
|
||||
file: path={{ kube_config_dir }}/addons/cluster-monitoring state=directory
|
||||
|
||||
- name: MONITORING | Download monitoring files from Kubernetes repo
|
||||
get_url:
|
||||
url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/addons/cluster-monitoring/influxdb/{{ item }}
|
||||
dest="{{ kube_config_dir }}/addons/cluster-monitoring/"
|
||||
force=yes
|
||||
with_items:
|
||||
- grafana-service.yaml
|
||||
- heapster-controller.yaml
|
||||
- heapster-service.yaml
|
||||
- influxdb-grafana-controller.yaml
|
||||
- influxdb-service.yaml
|
@ -13,7 +13,12 @@
|
||||
|
||||
- include: dns.yml
|
||||
when: dns_setup
|
||||
tags: dns
|
||||
|
||||
- include: cluster-monitoring.yml
|
||||
when: cluster_monitoring
|
||||
|
||||
- include: cluster-logging.yml
|
||||
when: cluster_logging
|
||||
|
||||
#- name: Get kube-addons script from Kubernetes
|
||||
# get_url:
|
||||
@ -33,15 +38,14 @@
|
||||
- name: HACK | copy local kube-addon-update.sh
|
||||
copy: src=kube-addon-update.sh dest={{ kube_script_dir }}/kube-addon-update.sh mode=0755
|
||||
|
||||
- name: Copy script to create known_tokens.csv
|
||||
copy: src=kube-gen-token.sh dest={{ kube_script_dir }}/kube-gen-token.sh mode=0755
|
||||
|
||||
- name: Run kube-gen-token script to create {{ kube_config_dir }}/known_tokens.csv
|
||||
- name: Run kube-gen-token script to create {{ kube_token_dir }}/known_tokens.csv
|
||||
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item }}"
|
||||
environment:
|
||||
TOKEN_DIR: "{{ kube_config_dir }}"
|
||||
TOKEN_DIR: "{{ kube_token_dir }}"
|
||||
with_items:
|
||||
- "system:dns"
|
||||
- "system:monitoring"
|
||||
- "system:logging"
|
||||
register: gentoken
|
||||
changed_when: "'Added' in gentoken.stdout"
|
||||
notify:
|
||||
|
@ -3,7 +3,7 @@ Description=Kubernetes Addon Object Manager
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
|
||||
[Service]
|
||||
Environment="TOKEN_DIR={{ kube_config_dir }}"
|
||||
Environment="TOKEN_DIR={{ kube_token_dir }}"
|
||||
Environment="KUBECTL_BIN=/usr/bin/kubectl"
|
||||
Environment="KUBERNETES_MASTER_NAME={{ groups['masters'][0] }}"
|
||||
ExecStart={{ kube_script_dir }}/kube-addons.sh
|
||||
|
@ -14,7 +14,26 @@ kube_config_dir: /etc/kubernetes
|
||||
# This is where all the cert scripts and certs will be located
|
||||
kube_cert_dir: "{{ kube_config_dir }}/certs"
|
||||
|
||||
# This is where all of the bearer tokens will be stored
|
||||
kube_token_dir: "{{ kube_config_dir }}/tokens"
|
||||
|
||||
# This is where you can drop yaml/json files and the kubelet will run those
|
||||
# pods on startup
|
||||
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
||||
|
||||
# This is the group that the cert creation scripts chgrp the
|
||||
# cert files to. Not really changable...
|
||||
kube_cert_group: kube-cert
|
||||
|
||||
# Internal DNS domain name.
|
||||
# This domain must not be used in your network. Services will be discoverable
|
||||
# under <service-name>.<namespace>.<domainname>, e.g.
|
||||
# myservice.default.cluster.local
|
||||
dns_domain: "{{ cluster_name }}"
|
||||
|
||||
# IP address of the DNS server.
|
||||
# Kubernetes will create a pod with several containers, serving as the DNS
|
||||
# server and expose it under this IP address. The IP address must be from
|
||||
# the range specified as kube_service_addresses. This magic will actually
|
||||
# pick the 10th ip address in the kube_service_addresses range and use that.
|
||||
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(10)|ipaddr('address') }}"
|
||||
|
@ -21,10 +21,11 @@ create_accounts=($@)
|
||||
|
||||
touch "${token_file}"
|
||||
for account in "${create_accounts[@]}"; do
|
||||
if grep "${account}" "${token_file}" ; then
|
||||
if grep ",${account}," "${token_file}" ; then
|
||||
continue
|
||||
fi
|
||||
token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
echo "${token},${account},${account}" >> "${token_file}"
|
||||
echo "${token}" > "${token_dir}/${account}.token"
|
||||
echo "Added ${account}"
|
||||
done
|
@ -1,52 +0,0 @@
|
||||
---
|
||||
- name: Create system kube-cert groups
|
||||
group: name={{ kube_cert_group }} state=present system=yes
|
||||
|
||||
- name: Create system kube user
|
||||
user:
|
||||
name=kube
|
||||
comment="Kubernetes user"
|
||||
shell=/sbin/nologin
|
||||
state=present
|
||||
system=yes
|
||||
groups={{ kube_cert_group }}
|
||||
|
||||
- name: make sure the certificate directory exits
|
||||
file:
|
||||
path={{ kube_cert_dir }}
|
||||
state=directory
|
||||
mode=o-rwx
|
||||
group={{ kube_cert_group }}
|
||||
|
||||
- name: Install rsync to push certs around
|
||||
action: "{{ ansible_pkg_mgr }}"
|
||||
args:
|
||||
name: rsync
|
||||
state: latest
|
||||
when: not is_atomic
|
||||
|
||||
- name: Generating RSA key for cert node to push to others
|
||||
user: name=root generate_ssh_key=yes
|
||||
run_once: true
|
||||
delegate_to: "{{ groups['masters'][0] }}"
|
||||
|
||||
- name: Downloading pub key
|
||||
fetch:
|
||||
src=/root/.ssh/id_rsa.pub
|
||||
dest=/tmp/id_rsa.pub
|
||||
flat=yes
|
||||
fail_on_missing=true
|
||||
run_once: true
|
||||
delegate_to: "{{ groups['masters'][0] }}"
|
||||
changed_when: false
|
||||
|
||||
- include: gen_certs.yml
|
||||
when: inventory_hostname == groups['masters'][0]
|
||||
|
||||
- include: place_certs.yml
|
||||
|
||||
- name: Delete the downloaded pub key
|
||||
local_action: file path=/tmp/id_rsa.pub state=absent
|
||||
sudo: false
|
||||
run_once: true
|
||||
changed_when: false
|
30
contrib/ansible/roles/kubernetes/tasks/gen_tokens.yml
Normal file
30
contrib/ansible/roles/kubernetes/tasks/gen_tokens.yml
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
- name: Copy the token gen script
|
||||
copy:
|
||||
src=kube-gen-token.sh
|
||||
dest={{ kube_script_dir }}
|
||||
mode=u+x
|
||||
|
||||
- name: Generate tokens for master components
|
||||
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
|
||||
environment:
|
||||
TOKEN_DIR: "{{ kube_token_dir }}"
|
||||
with_nested:
|
||||
- [ "system:controller_manager", "system:scheduler" ]
|
||||
- "{{ groups['masters'] }}"
|
||||
register: gentoken
|
||||
changed_when: "'Added' in gentoken.stdout"
|
||||
notify:
|
||||
- restart daemons
|
||||
|
||||
- name: Generate tokens for node components
|
||||
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
|
||||
environment:
|
||||
TOKEN_DIR: "{{ kube_token_dir }}"
|
||||
with_nested:
|
||||
- [ 'system:kubelet', 'system:proxy' ]
|
||||
- "{{ groups['nodes'] }}"
|
||||
register: gentoken
|
||||
changed_when: "'Added' in gentoken.stdout"
|
||||
notify:
|
||||
- restart daemons
|
@ -18,6 +18,6 @@
|
||||
notify:
|
||||
- restart daemons
|
||||
|
||||
- include: certs.yml
|
||||
- include: secrets.yml
|
||||
tags:
|
||||
certs
|
||||
secrets
|
||||
|
@ -1,26 +0,0 @@
|
||||
---
|
||||
- name: place ssh public key on other nodes so apiserver can push certs
|
||||
authorized_key: user=root key="{{ item }}" state=present
|
||||
with_file:
|
||||
- '/tmp/id_rsa.pub'
|
||||
changed_when: false
|
||||
|
||||
- name: Copy certificates directly from the apiserver to nodes
|
||||
synchronize:
|
||||
src={{ kube_cert_dir }}/{{ item }}
|
||||
dest={{ kube_cert_dir }}/{{ item }}
|
||||
rsync_timeout=30
|
||||
set_remote_user=no
|
||||
delegate_to: "{{ groups['masters'][0] }}"
|
||||
with_items:
|
||||
- "ca.crt"
|
||||
- "kubecfg.crt"
|
||||
- "kubecfg.key"
|
||||
notify:
|
||||
- restart daemons
|
||||
|
||||
- name: remove ssh public key so apiserver can not push stuff
|
||||
authorized_key: user=root key="{{ item }}" state=absent
|
||||
with_file:
|
||||
- '/tmp/id_rsa.pub'
|
||||
changed_when: false
|
44
contrib/ansible/roles/kubernetes/tasks/secrets.yml
Normal file
44
contrib/ansible/roles/kubernetes/tasks/secrets.yml
Normal file
@ -0,0 +1,44 @@
|
||||
---
|
||||
- name: Create system kube-cert groups
|
||||
group: name={{ kube_cert_group }} state=present system=yes
|
||||
|
||||
- name: Create system kube user
|
||||
user:
|
||||
name=kube
|
||||
comment="Kubernetes user"
|
||||
shell=/sbin/nologin
|
||||
state=present
|
||||
system=yes
|
||||
groups={{ kube_cert_group }}
|
||||
|
||||
- name: make sure the certificate directory exits
|
||||
file:
|
||||
path={{ kube_cert_dir }}
|
||||
state=directory
|
||||
mode=o-rwx
|
||||
group={{ kube_cert_group }}
|
||||
|
||||
- name: make sure the tokens directory exits
|
||||
file:
|
||||
path={{ kube_token_dir }}
|
||||
state=directory
|
||||
mode=o-rwx
|
||||
group={{ kube_cert_group }}
|
||||
|
||||
- include: gen_certs.yml
|
||||
when: inventory_hostname == groups['masters'][0]
|
||||
|
||||
- name: Read back the CA certificate
|
||||
slurp:
|
||||
src: "{{ kube_cert_dir }}/ca.crt"
|
||||
register: ca_cert
|
||||
run_once: true
|
||||
delegate_to: "{{ groups['masters'][0] }}"
|
||||
|
||||
- name: Place CA certificate everywhere
|
||||
copy: content="{{ ca_cert.content|b64decode }}" dest="{{ kube_cert_dir }}/ca.crt"
|
||||
notify:
|
||||
- restart daemons
|
||||
|
||||
- include: gen_tokens.yml
|
||||
when: inventory_hostname == groups['masters'][0]
|
@ -11,39 +11,53 @@
|
||||
- restart apiserver
|
||||
|
||||
- name: Ensure that a token auth file exists (addons may populate it)
|
||||
file: path={{kube_config_dir }}/known_tokens.csv state=touch
|
||||
file: path={{kube_token_dir }}/known_tokens.csv state=touch
|
||||
changed_when: false
|
||||
|
||||
- name: add cap_net_bind_service to kube-apiserver
|
||||
capabilities: path=/usr/bin/kube-apiserver capability=cap_net_bind_service=ep state=present
|
||||
when: not is_atomic
|
||||
|
||||
- name: Enable apiserver
|
||||
service: name=kube-apiserver enabled=yes state=started
|
||||
|
||||
- name: Get the node token values
|
||||
slurp:
|
||||
src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token"
|
||||
with_items:
|
||||
- "system:controller_manager"
|
||||
- "system:scheduler"
|
||||
register: tokens
|
||||
delegate_to: "{{ groups['masters'][0] }}"
|
||||
|
||||
- name: Set token facts
|
||||
set_fact:
|
||||
controller_manager_token: "{{ tokens.results[0].content|b64decode }}"
|
||||
scheduler_token: "{{ tokens.results[1].content|b64decode }}"
|
||||
|
||||
- name: write the config file for the controller-manager
|
||||
template: src=controller-manager.j2 dest={{ kube_config_dir }}/controller-manager
|
||||
notify:
|
||||
- restart controller-manager
|
||||
|
||||
- name: write the config file for the scheduler
|
||||
template: src=scheduler.j2 dest={{ kube_config_dir }}/scheduler
|
||||
notify:
|
||||
- restart scheduler
|
||||
|
||||
- name: add cap_net_bind_service to kube-apiserver
|
||||
capabilities: path=/usr/bin/kube-apiserver capability=cap_net_bind_service=ep state=present
|
||||
when: not is_atomic
|
||||
|
||||
- name: write the kubecfg (auth) file for controller-manager
|
||||
template: src=controller-manager.kubeconfig.j2 dest={{ kube_config_dir }}/controller-manager.kubeconfig
|
||||
notify:
|
||||
- restart controller-manager
|
||||
|
||||
- name: Enable controller-manager
|
||||
service: name=kube-controller-manager enabled=yes state=started
|
||||
|
||||
- name: write the config file for the scheduler
|
||||
template: src=scheduler.j2 dest={{ kube_config_dir }}/scheduler
|
||||
notify:
|
||||
- restart scheduler
|
||||
|
||||
- name: write the kubecfg (auth) file for scheduler
|
||||
template: src=scheduler.kubeconfig.j2 dest={{ kube_config_dir }}/scheduler.kubeconfig
|
||||
notify:
|
||||
- restart scheduler
|
||||
|
||||
- name: Enable apiserver
|
||||
service: name=kube-apiserver enabled=yes state=started
|
||||
|
||||
- name: Enable controller-manager
|
||||
service: name=kube-controller-manager enabled=yes state=started
|
||||
|
||||
- name: Enable scheduler
|
||||
service: name=kube-scheduler enabled=yes state=started
|
||||
|
||||
|
@ -23,4 +23,4 @@ KUBE_ETCD_SERVERS="--etcd_servers={% for node in groups['etcd'] %}http://{{ node
|
||||
KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
|
||||
|
||||
# Add your own!
|
||||
KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.cert --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt --token_auth_file={{ kube_config_dir }}/known_tokens.csv"
|
||||
KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.cert --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt --token_auth_file={{ kube_token_dir }}/known_tokens.csv --service_account_key_file={{ kube_cert_dir }}/server.cert"
|
||||
|
@ -4,4 +4,4 @@
|
||||
# defaults from config and apiserver should be adequate
|
||||
|
||||
# Add your own!
|
||||
KUBE_CONTROLLER_MANAGER_ARGS="--kubeconfig={{ kube_config_dir }}/controller-manager.kubeconfig"
|
||||
KUBE_CONTROLLER_MANAGER_ARGS="--kubeconfig={{ kube_config_dir }}/controller-manager.kubeconfig --service_account_private_key_file={{ kube_cert_dir }}/server.key --root_ca_file={{ kube_cert_dir }}/ca.crt"
|
||||
|
@ -1,19 +1,18 @@
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
current-context: controller-manager-to-{{ cluster_name }}
|
||||
preferences: {}
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
||||
server: http://{{ groups['masters'][0] }}:443
|
||||
server: https://{{ groups['masters'][0] }}:443
|
||||
name: {{ cluster_name }}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: {{ cluster_name }}
|
||||
user: kubelet
|
||||
name: kubelet-to-{{ cluster_name }}
|
||||
current-context: kubelet-to-{{ cluster_name }}
|
||||
kind: Config
|
||||
preferences: {}
|
||||
user: controller-manager
|
||||
name: controller-manager-to-{{ cluster_name }}
|
||||
users:
|
||||
- name: kubelet
|
||||
- name: controller-manager
|
||||
user:
|
||||
client-certificate: {{ kube_cert_dir }}/kubecfg.crt
|
||||
client-key: {{ kube_cert_dir }}/kubecfg.key
|
||||
token: {{ controller_manager_token }}
|
||||
|
@ -1,19 +1,18 @@
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
current-context: scheduler-to-{{ cluster_name }}
|
||||
preferences: {}
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
||||
server: http://{{ groups['masters'][0] }}:443
|
||||
server: https://{{ groups['masters'][0] }}:443
|
||||
name: {{ cluster_name }}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: {{ cluster_name }}
|
||||
user: kubelet
|
||||
name: kubelet-to-{{ cluster_name }}
|
||||
current-context: kubelet-to-{{ cluster_name }}
|
||||
kind: Config
|
||||
preferences: {}
|
||||
user: scheduler
|
||||
name: scheduler-to-{{ cluster_name }}
|
||||
users:
|
||||
- name: kubelet
|
||||
- name: scheduler
|
||||
user:
|
||||
client-certificate: {{ kube_cert_dir }}/kubecfg.crt
|
||||
client-key: {{ kube_cert_dir }}/kubecfg.key
|
||||
token: {{ scheduler_token }}
|
||||
|
@ -14,29 +14,53 @@
|
||||
- include: centos.yml
|
||||
when: not is_atomic and ansible_distribution == "CentOS"
|
||||
|
||||
- name: Make sure manifest directory exists
|
||||
file: path={{ kube_manifest_dir }} state=directory
|
||||
|
||||
- name: Install fluentd pod into each node
|
||||
get_url:
|
||||
url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/fluentd-es/fluentd-es.yaml
|
||||
dest="{{ kube_manifest_dir }}"
|
||||
force=yes
|
||||
when: cluster_logging
|
||||
|
||||
- name: Get the node token values
|
||||
slurp:
|
||||
src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token"
|
||||
with_items:
|
||||
- "system:kubelet"
|
||||
- "system:proxy"
|
||||
register: tokens
|
||||
delegate_to: "{{ groups['masters'][0] }}"
|
||||
|
||||
- name: Set token facts
|
||||
set_fact:
|
||||
kubelet_token: "{{ tokens.results[0].content|b64decode }}"
|
||||
proxy_token: "{{ tokens.results[1].content|b64decode }}"
|
||||
|
||||
- name: write the config files for kubelet
|
||||
template: src=kubelet.j2 dest={{ kube_config_dir }}/kubelet
|
||||
notify:
|
||||
- restart kubelet
|
||||
|
||||
- name: write the config files for proxy
|
||||
template: src=proxy.j2 dest={{ kube_config_dir }}/proxy
|
||||
notify:
|
||||
- restart proxy
|
||||
|
||||
- name: write the kubecfg (auth) file for kubelet
|
||||
template: src=kubelet.kubeconfig.j2 dest={{ kube_config_dir }}/kubelet.kubeconfig
|
||||
notify:
|
||||
- restart kubelet
|
||||
|
||||
- name: Enable kubelet
|
||||
service: name=kubelet enabled=yes state=started
|
||||
|
||||
- name: write the config files for proxy
|
||||
template: src=proxy.j2 dest={{ kube_config_dir }}/proxy
|
||||
notify:
|
||||
- restart proxy
|
||||
|
||||
- name: write the kubecfg (auth) file for kube-proxy
|
||||
template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig
|
||||
notify:
|
||||
- restart proxy
|
||||
|
||||
- name: Enable kubelet
|
||||
service: name=kubelet enabled=yes state=started
|
||||
|
||||
- name: Enable proxy
|
||||
service: name=kube-proxy enabled=yes state=started
|
||||
|
||||
|
@ -15,7 +15,7 @@ KUBELET_API_SERVER="--api_servers=https://{{ groups['masters'][0]}}:443"
|
||||
|
||||
# Add your own!
|
||||
{% if dns_setup %}
|
||||
KUBELET_ARGS="--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig"
|
||||
KUBELET_ARGS="--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}"
|
||||
{% else %}
|
||||
KUBELET_ARGS="--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig"
|
||||
KUBELET_ARGS="--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}"
|
||||
{% endif %}
|
||||
|
@ -1,19 +1,18 @@
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
current-context: kubelet-to-{{ cluster_name }}
|
||||
preferences: {}
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
||||
server: http://{{ groups['masters'][0] }}:443
|
||||
server: https://{{ groups['masters'][0] }}:443
|
||||
name: {{ cluster_name }}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: {{ cluster_name }}
|
||||
user: kubelet
|
||||
name: kubelet-to-{{ cluster_name }}
|
||||
current-context: kubelet-to-{{ cluster_name }}
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
client-certificate: {{ kube_cert_dir }}/kubecfg.crt
|
||||
client-key: {{ kube_cert_dir }}/kubecfg.key
|
||||
token: {{ kubelet_token }}
|
||||
|
@ -1,19 +1,18 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
||||
server: http://{{ groups['masters'][0] }}:443
|
||||
name: {{ cluster_name }}
|
||||
kind: Config
|
||||
current-context: proxy-to-{{ cluster_name }}
|
||||
preferences: {}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: {{ cluster_name }}
|
||||
user: kubelet
|
||||
name: kubelet-to-{{ cluster_name }}
|
||||
current-context: kubelet-to-{{ cluster_name }}
|
||||
kind: Config
|
||||
preferences: {}
|
||||
user: proxy
|
||||
name: proxy-to-{{ cluster_name }}
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
||||
server: https://{{ groups['masters'][0] }}:443
|
||||
name: {{ cluster_name }}
|
||||
users:
|
||||
- name: kubelet
|
||||
- name: proxy
|
||||
user:
|
||||
client-certificate: {{ kube_cert_dir }}/kubecfg.crt
|
||||
client-key: {{ kube_cert_dir }}/kubecfg.key
|
||||
token: {{ proxy_token }}
|
||||
|
Loading…
Reference in New Issue
Block a user