mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 07:20:13 +00:00
Merge pull request #6237 from eparis/ansible
example ansible setup repo
This commit is contained in:
commit
3a6c0370df
2
contrib/ansible/.gitignore
vendored
Normal file
2
contrib/ansible/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
myinventory
|
||||
*.swp
|
42
contrib/ansible/README.md
Normal file
42
contrib/ansible/README.md
Normal file
@ -0,0 +1,42 @@
|
||||
# Kubernetes Ansible
|
||||
|
||||
This playbook helps you to set up a Kubernetes cluster on machines where you
|
||||
can't or don't want to use the salt scripts and cluster up/down tools. They
|
||||
can be real hardware, VMs, things in a public cloud, etc.
|
||||
|
||||
## Usage
|
||||
|
||||
* Record the IP address of which machine you want to be your master
|
||||
* Record the IP address of the machine you want to be your etcd server (often same as master)
|
||||
* Record the IP addresses of the machines you want to be your minions. (master can be a minion)
|
||||
|
||||
Stick the system information into the 'inventory' file.
|
||||
|
||||
### Configure your cluster
|
||||
|
||||
You will want to look though all of the options in `group_vars/all.yml` and
|
||||
set the variables to reflect your needs. The options should be described there
|
||||
in full detail.
|
||||
|
||||
### Set up the actual kubernetes cluster
|
||||
|
||||
Now run the setup:
|
||||
|
||||
$ ansible-playbook -i inventory cluster.yml
|
||||
|
||||
In generel this will work on very recent Fedora, rawhide or F21. Future work to
|
||||
support RHEL7, CentOS, and possible other distros should be forthcoming.
|
||||
|
||||
### You can just set up certain parts instead of doing it all
|
||||
|
||||
Only the kubernetes daemons:
|
||||
|
||||
$ ansible-playbook -i inventory kubernetes-services.yml
|
||||
|
||||
Only etcd:
|
||||
|
||||
$ ansible-playbootk -i inventory etcd.yml
|
||||
|
||||
Only flannel:
|
||||
|
||||
$ ansible-playbook -i inventory flannel.yml
|
4
contrib/ansible/cluster.yml
Normal file
4
contrib/ansible/cluster.yml
Normal file
@ -0,0 +1,4 @@
|
||||
# Set up a whole working cluster!
|
||||
- include: etcd.yml
|
||||
- include: kubernetes-services.yml
|
||||
|
6
contrib/ansible/etcd.yml
Normal file
6
contrib/ansible/etcd.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
- hosts: etcd
|
||||
sudo: yes
|
||||
roles:
|
||||
- common
|
||||
- etcd
|
17
contrib/ansible/group_vars/all.yml
Normal file
17
contrib/ansible/group_vars/all.yml
Normal file
@ -0,0 +1,17 @@
|
||||
# Account name of remote user. Ansible will use this user account to ssh into
|
||||
# the managed machines. The user must be able to use sudo without asking
|
||||
# for password unless ansible_sudo_pass is set
|
||||
ansible_ssh_user: root
|
||||
|
||||
# password for the ansible_ssh_user. If this is unset you will need to set up
|
||||
# ssh keys so a password is not needed.
|
||||
#ansible_ssh_pass: password
|
||||
|
||||
# If a password is needed to sudo to root that password must be set here
|
||||
#ansible_sudo_pass: password
|
||||
|
||||
# Kubernetes internal network for services.
|
||||
# Kubernetes services will get fake IP addresses from this range.
|
||||
# This range must not conflict with anything in your infrastructure. These
|
||||
# addresses do not need to be routable and must just be an unused block of space.
|
||||
kube_service_addresses: 10.254.0.0/16
|
10
contrib/ansible/inventory
Normal file
10
contrib/ansible/inventory
Normal file
@ -0,0 +1,10 @@
|
||||
[masters]
|
||||
10.0.0.1
|
||||
|
||||
[etcd]
|
||||
10.0.0.2
|
||||
|
||||
[minions]
|
||||
10.0.0.3
|
||||
10.0.0.4
|
||||
10.0.0.5
|
17
contrib/ansible/kubernetes-services.yml
Normal file
17
contrib/ansible/kubernetes-services.yml
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
- hosts: masters:minions
|
||||
sudo: yes
|
||||
roles:
|
||||
- common
|
||||
|
||||
- hosts: masters
|
||||
sudo: yes
|
||||
roles:
|
||||
- kubernetes
|
||||
- master
|
||||
|
||||
- hosts: minions
|
||||
sudo: yes
|
||||
roles:
|
||||
- kubernetes
|
||||
- minion
|
18
contrib/ansible/roles/common/tasks/main.yml
Normal file
18
contrib/ansible/roles/common/tasks/main.yml
Normal file
@ -0,0 +1,18 @@
|
||||
---
|
||||
- name: Determine if Atomic
|
||||
stat: path=/run/ostree-booted
|
||||
register: s
|
||||
changed_when: false
|
||||
|
||||
- name: Init the is_atomic fact
|
||||
set_fact:
|
||||
is_atomic: false
|
||||
|
||||
- name: Set the is_atomic fact
|
||||
set_fact:
|
||||
is_atomic: true
|
||||
when: s.stat.exists
|
||||
|
||||
# collect information about what packages are installed
|
||||
- include: rpm.yml
|
||||
when: ansible_pkg_mgr == "yum"
|
30
contrib/ansible/roles/common/tasks/rpm.yml
Normal file
30
contrib/ansible/roles/common/tasks/rpm.yml
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
- name: Determine if firewalld installed
|
||||
command: "rpm -q firewalld"
|
||||
register: s
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Init the has_firewalld fact
|
||||
set_fact:
|
||||
has_firewalld: false
|
||||
|
||||
- name: Set the has_firewalld fact
|
||||
set_fact:
|
||||
has_firewalld: true
|
||||
when: s.rc == 0
|
||||
|
||||
- name: Determine if iptables-services installed
|
||||
command: "rpm -q iptables-services"
|
||||
register: s
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Init the has_iptables fact
|
||||
set_fact:
|
||||
has_iptables: false
|
||||
|
||||
- name: Set the has_iptables fact
|
||||
set_fact:
|
||||
has_iptables: true
|
||||
when: s.rc == 0
|
6
contrib/ansible/roles/etcd/handlers/main.yml
Normal file
6
contrib/ansible/roles/etcd/handlers/main.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: restart etcd
|
||||
service: name=etcd state=restarted
|
||||
|
||||
- name: Save iptables rules
|
||||
command: service iptables save
|
16
contrib/ansible/roles/etcd/tasks/firewalld.yml
Normal file
16
contrib/ansible/roles/etcd/tasks/firewalld.yml
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: Open firewalld port for etcd
|
||||
firewalld: port={{ item }}/tcp permanent=false state=enabled
|
||||
# in case this is also a minion where firewalld turned off
|
||||
ignore_errors: yes
|
||||
with_items:
|
||||
- 2379
|
||||
- 2380
|
||||
|
||||
- name: Save firewalld port for etcd
|
||||
firewalld: port={{ item }}/tcp permanent=true state=enabled
|
||||
# in case this is also a minion where firewalld turned off
|
||||
ignore_errors: yes
|
||||
with_items:
|
||||
- 2379
|
||||
- 2380
|
17
contrib/ansible/roles/etcd/tasks/iptables.yml
Normal file
17
contrib/ansible/roles/etcd/tasks/iptables.yml
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
- name: Get iptables rules
|
||||
shell: iptables -L
|
||||
register: iptablesrules
|
||||
always_run: yes
|
||||
|
||||
- name: Enable iptables at boot
|
||||
service: name=iptables enabled=yes state=started
|
||||
|
||||
- name: Open etcd client port with iptables
|
||||
command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "etcd"
|
||||
when: etcd not in iptablesrules.stdout
|
||||
notify:
|
||||
- Save iptables rules
|
||||
with_items:
|
||||
- 2379
|
||||
- 2380
|
20
contrib/ansible/roles/etcd/tasks/main.yml
Normal file
20
contrib/ansible/roles/etcd/tasks/main.yml
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
- name: Install etcd
|
||||
yum: pkg=etcd state=latest
|
||||
notify:
|
||||
- restart etcd
|
||||
when: not is_atomic
|
||||
|
||||
- name: Write etcd config file
|
||||
template: src=etcd.conf.j2 dest=/etc/etcd/etcd.conf
|
||||
notify:
|
||||
- restart etcd
|
||||
|
||||
- name: Enable etcd
|
||||
service: name=etcd enabled=yes state=started
|
||||
|
||||
- include: firewalld.yml
|
||||
when: has_firewalld
|
||||
|
||||
- include: iptables.yml
|
||||
when: not has_firewalld and has_iptables
|
4
contrib/ansible/roles/etcd/templates/etcd.conf.j2
Normal file
4
contrib/ansible/roles/etcd/templates/etcd.conf.j2
Normal file
@ -0,0 +1,4 @@
|
||||
# etcd2.0
|
||||
ETCD_NAME=default
|
||||
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
|
||||
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
|
4
contrib/ansible/roles/kubernetes/tasks/fedora.yml
Normal file
4
contrib/ansible/roles/kubernetes/tasks/fedora.yml
Normal file
@ -0,0 +1,4 @@
|
||||
---
|
||||
- name: Remove docker window manager on F20
|
||||
yum: pkg=docker state=absent
|
||||
when: not is_atomic and ansible_distribution_major_version == "20"
|
14
contrib/ansible/roles/kubernetes/tasks/main.yml
Normal file
14
contrib/ansible/roles/kubernetes/tasks/main.yml
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
- include: fedora.yml
|
||||
when: ansible_distribution == "Fedora"
|
||||
|
||||
- name: Install kubernetes
|
||||
yum: pkg=kubernetes state=latest
|
||||
notify:
|
||||
- restart daemons
|
||||
when: not is_atomic
|
||||
|
||||
- name: write the global config file
|
||||
template: src=config.j2 dest=/etc/kubernetes/config
|
||||
notify:
|
||||
- restart daemons
|
23
contrib/ansible/roles/kubernetes/templates/config.j2
Normal file
23
contrib/ansible/roles/kubernetes/templates/config.j2
Normal file
@ -0,0 +1,23 @@
|
||||
###
|
||||
# kubernetes system config
|
||||
#
|
||||
# The following values are used to configure various aspects of all
|
||||
# kubernetes services, including
|
||||
#
|
||||
# kube-apiserver.service
|
||||
# kube-controller-manager.service
|
||||
# kube-scheduler.service
|
||||
# kubelet.service
|
||||
# kube-proxy.service
|
||||
|
||||
# logging to stderr means we get it in the systemd journal
|
||||
KUBE_LOGTOSTDERR="--logtostderr=true"
|
||||
|
||||
# journal message level, 0 is debug
|
||||
KUBE_LOG_LEVEL="--v=0"
|
||||
|
||||
# Should this cluster be allowed to run privileged docker containers
|
||||
KUBE_ALLOW_PRIV="--allow_privileged=true"
|
||||
|
||||
# How the replication controller, scheduler, and proxy
|
||||
KUBE_MASTER="--master=http://{{ groups['masters'][0] }}:8080"
|
7
contrib/ansible/roles/master/files/controller-manager
Normal file
7
contrib/ansible/roles/master/files/controller-manager
Normal file
@ -0,0 +1,7 @@
|
||||
###
|
||||
# The following values are used to configure the kubernetes controller-manager
|
||||
|
||||
# defaults from config and apiserver should be adequate
|
||||
|
||||
# Add you own!
|
||||
KUBE_CONTROLLER_MANAGER_ARGS=""
|
7
contrib/ansible/roles/master/files/scheduler
Normal file
7
contrib/ansible/roles/master/files/scheduler
Normal file
@ -0,0 +1,7 @@
|
||||
###
|
||||
# kubernetes scheduler config
|
||||
|
||||
# default config should be adequate
|
||||
|
||||
# Add your own!
|
||||
KUBE_SCHEDULER_ARGS=""
|
19
contrib/ansible/roles/master/handlers/main.yml
Normal file
19
contrib/ansible/roles/master/handlers/main.yml
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
- name: restart daemons
|
||||
command: /bin/true
|
||||
notify:
|
||||
- restart apiserver
|
||||
- restart controller-manager
|
||||
- restart scheduler
|
||||
|
||||
- name: restart apiserver
|
||||
service: name=kube-apiserver state=restarted
|
||||
|
||||
- name: restart controller-manager
|
||||
service: name=kube-controller-manager state=restarted
|
||||
|
||||
- name: restart scheduler
|
||||
service: name=kube-scheduler state=restarted
|
||||
|
||||
- name: restart iptables
|
||||
service: name=iptables state=restarted
|
10
contrib/ansible/roles/master/tasks/firewalld.yml
Normal file
10
contrib/ansible/roles/master/tasks/firewalld.yml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
- name: Open firewalld port for apiserver
|
||||
firewalld: port=8080/tcp permanent=false state=enabled
|
||||
# in case this is also a minion with firewalld turned off
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Save firewalld port for apiserver
|
||||
firewalld: port=8080/tcp permanent=true state=enabled
|
||||
# in case this is also a minion with firewalld turned off
|
||||
ignore_errors: yes
|
15
contrib/ansible/roles/master/tasks/iptables.yml
Normal file
15
contrib/ansible/roles/master/tasks/iptables.yml
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
- name: Get iptables rules
|
||||
shell: iptables -L
|
||||
register: iptablesrules
|
||||
always_run: yes
|
||||
|
||||
- name: Open apiserver port with iptables
|
||||
command: /sbin/iptables -I INPUT 1 -p tcp --dport 8080 -j ACCEPT -m comment --comment "kube-apiserver"
|
||||
when: kube-apiserver not in iptablesrules.stdout
|
||||
notify:
|
||||
- restart iptables
|
||||
|
||||
- name: Save iptables rules
|
||||
command: service iptables save
|
||||
when: kube-apiserver not in iptablesrules.stdout
|
53
contrib/ansible/roles/master/tasks/main.yml
Normal file
53
contrib/ansible/roles/master/tasks/main.yml
Normal file
@ -0,0 +1,53 @@
|
||||
---
|
||||
- name: write the config file for the api server
|
||||
template: src=apiserver.j2 dest=/etc/kubernetes/apiserver
|
||||
notify:
|
||||
- restart apiserver
|
||||
|
||||
- name: write the config file for the controller-manager
|
||||
copy: src=controller-manager dest=/etc/kubernetes/controller-manager
|
||||
notify:
|
||||
- restart controller-manager
|
||||
|
||||
- name: write the config file for the scheduler
|
||||
copy: src=scheduler dest=/etc/kubernetes/scheduler
|
||||
notify:
|
||||
- restart scheduler
|
||||
|
||||
- name: Enable apiserver
|
||||
service: name=kube-apiserver enabled=yes state=started
|
||||
|
||||
- name: Enable controller-manager
|
||||
service: name=kube-controller-manager enabled=yes state=started
|
||||
|
||||
- name: Enable scheduler
|
||||
service: name=kube-scheduler enabled=yes state=started
|
||||
|
||||
- name: Copy minion definition json files to master
|
||||
template: src=node.j2 dest=/tmp/node-{{ item }}.json
|
||||
changed_when: false
|
||||
with_items:
|
||||
groups['minions']
|
||||
when: inventory_hostname == groups['masters'][0]
|
||||
|
||||
- name: Load minion definition into master
|
||||
command: /usr/bin/kubectl create -f /tmp/node-{{ item }}.json
|
||||
register: command_result
|
||||
failed_when: command_result.rc != 0 and 'already exists' not in command_result.stderr
|
||||
changed_when: "command_result.rc == 0"
|
||||
with_items:
|
||||
groups['minions']
|
||||
when: inventory_hostname == groups['masters'][0]
|
||||
|
||||
- name: Delete minion definitions from master
|
||||
file: path=/tmp/node-{{ item }}.json state=absent
|
||||
changed_when: false
|
||||
with_items:
|
||||
groups['minions']
|
||||
when: inventory_hostname == groups['masters'][0]
|
||||
|
||||
- include: firewalld.yml
|
||||
when: has_firewalld
|
||||
|
||||
- include: iptables.yml
|
||||
when: not has_firewalld and has_iptables
|
26
contrib/ansible/roles/master/templates/apiserver.j2
Normal file
26
contrib/ansible/roles/master/templates/apiserver.j2
Normal file
@ -0,0 +1,26 @@
|
||||
###
|
||||
# kubernetes system config
|
||||
#
|
||||
# The following values are used to configure the kube-apiserver
|
||||
#
|
||||
|
||||
# The address on the local server to listen to.
|
||||
KUBE_API_ADDRESS="--address=0.0.0.0"
|
||||
|
||||
# The port on the local server to listen on.
|
||||
# KUBE_API_PORT="--port=8080"
|
||||
|
||||
# Port minions listen on
|
||||
# KUBELET_PORT="--kubelet_port=10250"
|
||||
|
||||
# Address range to use for services
|
||||
KUBE_SERVICE_ADDRESSES="--portal_net={{ kube_service_addresses }}"
|
||||
|
||||
# Location of the etcd cluster
|
||||
KUBE_ETCD_SERVERS="--etcd_servers=http://{{ groups['etcd'][0] }}:2379"
|
||||
|
||||
# default admission control policies
|
||||
KUBE_ADMISSION_CONTROL="--admission_control=NamespaceAutoProvision,LimitRanger,ResourceQuota"
|
||||
|
||||
# Add you own!
|
||||
KUBE_API_ARGS=""
|
16
contrib/ansible/roles/master/templates/node.j2
Normal file
16
contrib/ansible/roles/master/templates/node.j2
Normal file
@ -0,0 +1,16 @@
|
||||
{
|
||||
"apiVersion": "v1beta3",
|
||||
"kind": "Node",
|
||||
"metadata": {
|
||||
"name": "{{ item }}"
|
||||
},
|
||||
"spec": {
|
||||
"externalID": "{{ item }}"
|
||||
},
|
||||
"status": {
|
||||
"capacity": {
|
||||
"cpu": "1",
|
||||
"memory": "1"
|
||||
}
|
||||
}
|
||||
}
|
7
contrib/ansible/roles/minion/files/proxy
Normal file
7
contrib/ansible/roles/minion/files/proxy
Normal file
@ -0,0 +1,7 @@
|
||||
###
|
||||
# kubernetes proxy config
|
||||
|
||||
# default config should be adequate
|
||||
|
||||
# Add your own!
|
||||
KUBE_PROXY_ARGS=""
|
15
contrib/ansible/roles/minion/handlers/main.yml
Normal file
15
contrib/ansible/roles/minion/handlers/main.yml
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
- name: restart daemons
|
||||
command: /bin/true
|
||||
notify:
|
||||
- restart kubelet
|
||||
- restart proxy
|
||||
|
||||
- name: restart kubelet
|
||||
service: name=kubelet state=restarted
|
||||
|
||||
- name: restart proxy
|
||||
service: name=kube-proxy state=restarted
|
||||
|
||||
- name: restart iptables
|
||||
service: name=iptables state=restarted
|
10
contrib/ansible/roles/minion/tasks/firewalld.yml
Normal file
10
contrib/ansible/roles/minion/tasks/firewalld.yml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1033606 and I think others say firewalld+docker == bad
|
||||
- name: disable firewalld
|
||||
service: name=firewalld enabled=no state=stopped
|
||||
|
||||
#- name: Open firewalld port for the kubelet
|
||||
#firewalld: port=10250/tcp permanent=false state=enabled
|
||||
|
||||
#- name: Save firewalld port for the kubelet
|
||||
#firewalld: port=10250/tcp permanent=true state=enabled
|
18
contrib/ansible/roles/minion/tasks/iptables.yml
Normal file
18
contrib/ansible/roles/minion/tasks/iptables.yml
Normal file
@ -0,0 +1,18 @@
|
||||
---
|
||||
- name: Get iptables rules
|
||||
shell: iptables -L
|
||||
register: iptablesrules
|
||||
always_run: yes
|
||||
|
||||
- name: Enable iptables at boot
|
||||
service: name=iptables enabled=yes state=started
|
||||
|
||||
- name: Open kubelet port with iptables
|
||||
command: /sbin/iptables -I INPUT 1 -p tcp --dport 10250 -j ACCEPT -m comment --comment "kubelet"
|
||||
when: kubelet not in iptablesrules.stdout
|
||||
notify:
|
||||
- restart iptables
|
||||
|
||||
- name: Save iptables rules
|
||||
command: service iptables save
|
||||
when: kubelet not in iptablesrules.stdout
|
22
contrib/ansible/roles/minion/tasks/main.yml
Normal file
22
contrib/ansible/roles/minion/tasks/main.yml
Normal file
@ -0,0 +1,22 @@
|
||||
---
|
||||
- name: write the config files for kubelet
|
||||
template: src=kubelet.j2 dest=/etc/kubernetes/kubelet
|
||||
notify:
|
||||
- restart kubelet
|
||||
|
||||
- name: write the config files for proxy
|
||||
copy: src=proxy dest=/etc/kubernetes/proxy
|
||||
notify:
|
||||
- restart proxy
|
||||
|
||||
- name: Enable kubelet
|
||||
service: name=kubelet enabled=yes state=started
|
||||
|
||||
- name: Enable proxy
|
||||
service: name=kube-proxy enabled=yes state=started
|
||||
|
||||
- include: firewalld.yml
|
||||
when: has_firewalld
|
||||
|
||||
- include: iptables.yml
|
||||
when: not has_firewalld and has_iptables
|
17
contrib/ansible/roles/minion/templates/kubelet.j2
Normal file
17
contrib/ansible/roles/minion/templates/kubelet.j2
Normal file
@ -0,0 +1,17 @@
|
||||
###
|
||||
# kubernetes kubelet (minion) config
|
||||
|
||||
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
|
||||
KUBELET_ADDRESS="--address=0.0.0.0"
|
||||
|
||||
# The port for the info server to serve on
|
||||
# KUBELET_PORT="--port=10250"
|
||||
|
||||
# You may leave this blank to use the actual hostname
|
||||
KUBELET_HOSTNAME="--hostname_override={{ inventory_hostname }}"
|
||||
|
||||
# location of the api-server
|
||||
KUBELET_API_SERVER="--api_servers=http://{{ groups['masters'][0]}}:8080"
|
||||
|
||||
# Add your own!
|
||||
KUBELET_ARGS=""
|
Loading…
Reference in New Issue
Block a user