diff --git a/contrib/ansible/.gitignore b/contrib/ansible/.gitignore new file mode 100644 index 00000000000..630444b6cd2 --- /dev/null +++ b/contrib/ansible/.gitignore @@ -0,0 +1,2 @@ +myinventory +*.swp diff --git a/contrib/ansible/README.md b/contrib/ansible/README.md new file mode 100644 index 00000000000..5552f448e4a --- /dev/null +++ b/contrib/ansible/README.md @@ -0,0 +1,42 @@ +# Kubernetes Ansible + +This playbook helps you to set up a Kubernetes cluster on machines where you +can't or don't want to use the salt scripts and cluster up/down tools. They +can be real hardware, VMs, things in a public cloud, etc. + +## Usage + +* Record the IP address of which machine you want to be your master +* Record the IP address of the machine you want to be your etcd server (often same as master) +* Record the IP addresses of the machines you want to be your minions. (master can be a minion) + +Stick the system information into the 'inventory' file. + +### Configure your cluster + +You will want to look though all of the options in `group_vars/all.yml` and +set the variables to reflect your needs. The options should be described there +in full detail. + +### Set up the actual kubernetes cluster + +Now run the setup: + + $ ansible-playbook -i inventory cluster.yml + +In generel this will work on very recent Fedora, rawhide or F21. Future work to +support RHEL7, CentOS, and possible other distros should be forthcoming. + +### You can just set up certain parts instead of doing it all + +Only the kubernetes daemons: + + $ ansible-playbook -i inventory kubernetes-services.yml + +Only etcd: + + $ ansible-playbootk -i inventory etcd.yml + +Only flannel: + + $ ansible-playbook -i inventory flannel.yml diff --git a/contrib/ansible/cluster.yml b/contrib/ansible/cluster.yml new file mode 100644 index 00000000000..68f7e6a44e6 --- /dev/null +++ b/contrib/ansible/cluster.yml @@ -0,0 +1,4 @@ +# Set up a whole working cluster! +- include: etcd.yml +- include: kubernetes-services.yml + diff --git a/contrib/ansible/etcd.yml b/contrib/ansible/etcd.yml new file mode 100644 index 00000000000..4a4540a889d --- /dev/null +++ b/contrib/ansible/etcd.yml @@ -0,0 +1,6 @@ +--- +- hosts: etcd + sudo: yes + roles: + - common + - etcd diff --git a/contrib/ansible/group_vars/all.yml b/contrib/ansible/group_vars/all.yml new file mode 100644 index 00000000000..ae9d7dc2fb0 --- /dev/null +++ b/contrib/ansible/group_vars/all.yml @@ -0,0 +1,17 @@ +# Account name of remote user. Ansible will use this user account to ssh into +# the managed machines. The user must be able to use sudo without asking +# for password unless ansible_sudo_pass is set +ansible_ssh_user: root + +# password for the ansible_ssh_user. If this is unset you will need to set up +# ssh keys so a password is not needed. +#ansible_ssh_pass: password + +# If a password is needed to sudo to root that password must be set here +#ansible_sudo_pass: password + +# Kubernetes internal network for services. +# Kubernetes services will get fake IP addresses from this range. +# This range must not conflict with anything in your infrastructure. These +# addresses do not need to be routable and must just be an unused block of space. +kube_service_addresses: 10.254.0.0/16 diff --git a/contrib/ansible/inventory b/contrib/ansible/inventory new file mode 100644 index 00000000000..07af7a61b30 --- /dev/null +++ b/contrib/ansible/inventory @@ -0,0 +1,10 @@ +[masters] +10.0.0.1 + +[etcd] +10.0.0.2 + +[minions] +10.0.0.3 +10.0.0.4 +10.0.0.5 diff --git a/contrib/ansible/kubernetes-services.yml b/contrib/ansible/kubernetes-services.yml new file mode 100644 index 00000000000..a5e53974f43 --- /dev/null +++ b/contrib/ansible/kubernetes-services.yml @@ -0,0 +1,17 @@ +--- +- hosts: masters:minions + sudo: yes + roles: + - common + +- hosts: masters + sudo: yes + roles: + - kubernetes + - master + +- hosts: minions + sudo: yes + roles: + - kubernetes + - minion diff --git a/contrib/ansible/roles/common/tasks/main.yml b/contrib/ansible/roles/common/tasks/main.yml new file mode 100644 index 00000000000..5404be01616 --- /dev/null +++ b/contrib/ansible/roles/common/tasks/main.yml @@ -0,0 +1,18 @@ +--- +- name: Determine if Atomic + stat: path=/run/ostree-booted + register: s + changed_when: false + +- name: Init the is_atomic fact + set_fact: + is_atomic: false + +- name: Set the is_atomic fact + set_fact: + is_atomic: true + when: s.stat.exists + +# collect information about what packages are installed +- include: rpm.yml + when: ansible_pkg_mgr == "yum" diff --git a/contrib/ansible/roles/common/tasks/rpm.yml b/contrib/ansible/roles/common/tasks/rpm.yml new file mode 100644 index 00000000000..959be43dbdf --- /dev/null +++ b/contrib/ansible/roles/common/tasks/rpm.yml @@ -0,0 +1,30 @@ +--- +- name: Determine if firewalld installed + command: "rpm -q firewalld" + register: s + changed_when: false + failed_when: false + +- name: Init the has_firewalld fact + set_fact: + has_firewalld: false + +- name: Set the has_firewalld fact + set_fact: + has_firewalld: true + when: s.rc == 0 + +- name: Determine if iptables-services installed + command: "rpm -q iptables-services" + register: s + changed_when: false + failed_when: false + +- name: Init the has_iptables fact + set_fact: + has_iptables: false + +- name: Set the has_iptables fact + set_fact: + has_iptables: true + when: s.rc == 0 diff --git a/contrib/ansible/roles/etcd/handlers/main.yml b/contrib/ansible/roles/etcd/handlers/main.yml new file mode 100644 index 00000000000..9218108772c --- /dev/null +++ b/contrib/ansible/roles/etcd/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: restart etcd + service: name=etcd state=restarted + +- name: Save iptables rules + command: service iptables save diff --git a/contrib/ansible/roles/etcd/tasks/firewalld.yml b/contrib/ansible/roles/etcd/tasks/firewalld.yml new file mode 100644 index 00000000000..8d730fd2e47 --- /dev/null +++ b/contrib/ansible/roles/etcd/tasks/firewalld.yml @@ -0,0 +1,16 @@ +--- +- name: Open firewalld port for etcd + firewalld: port={{ item }}/tcp permanent=false state=enabled + # in case this is also a minion where firewalld turned off + ignore_errors: yes + with_items: + - 2379 + - 2380 + +- name: Save firewalld port for etcd + firewalld: port={{ item }}/tcp permanent=true state=enabled + # in case this is also a minion where firewalld turned off + ignore_errors: yes + with_items: + - 2379 + - 2380 diff --git a/contrib/ansible/roles/etcd/tasks/iptables.yml b/contrib/ansible/roles/etcd/tasks/iptables.yml new file mode 100644 index 00000000000..a1035247d94 --- /dev/null +++ b/contrib/ansible/roles/etcd/tasks/iptables.yml @@ -0,0 +1,17 @@ +--- +- name: Get iptables rules + shell: iptables -L + register: iptablesrules + always_run: yes + +- name: Enable iptables at boot + service: name=iptables enabled=yes state=started + +- name: Open etcd client port with iptables + command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "etcd" + when: etcd not in iptablesrules.stdout + notify: + - Save iptables rules + with_items: + - 2379 + - 2380 diff --git a/contrib/ansible/roles/etcd/tasks/main.yml b/contrib/ansible/roles/etcd/tasks/main.yml new file mode 100644 index 00000000000..c655f5df1d4 --- /dev/null +++ b/contrib/ansible/roles/etcd/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Install etcd + yum: pkg=etcd state=latest + notify: + - restart etcd + when: not is_atomic + +- name: Write etcd config file + template: src=etcd.conf.j2 dest=/etc/etcd/etcd.conf + notify: + - restart etcd + +- name: Enable etcd + service: name=etcd enabled=yes state=started + +- include: firewalld.yml + when: has_firewalld + +- include: iptables.yml + when: not has_firewalld and has_iptables diff --git a/contrib/ansible/roles/etcd/templates/etcd.conf.j2 b/contrib/ansible/roles/etcd/templates/etcd.conf.j2 new file mode 100644 index 00000000000..1ce5063c903 --- /dev/null +++ b/contrib/ansible/roles/etcd/templates/etcd.conf.j2 @@ -0,0 +1,4 @@ +# etcd2.0 +ETCD_NAME=default +ETCD_DATA_DIR="/var/lib/etcd/default.etcd" +ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379" diff --git a/contrib/ansible/roles/kubernetes/tasks/fedora.yml b/contrib/ansible/roles/kubernetes/tasks/fedora.yml new file mode 100644 index 00000000000..be021d2d9d3 --- /dev/null +++ b/contrib/ansible/roles/kubernetes/tasks/fedora.yml @@ -0,0 +1,4 @@ +--- +- name: Remove docker window manager on F20 + yum: pkg=docker state=absent + when: not is_atomic and ansible_distribution_major_version == "20" diff --git a/contrib/ansible/roles/kubernetes/tasks/main.yml b/contrib/ansible/roles/kubernetes/tasks/main.yml new file mode 100644 index 00000000000..b03c55358b3 --- /dev/null +++ b/contrib/ansible/roles/kubernetes/tasks/main.yml @@ -0,0 +1,14 @@ +--- +- include: fedora.yml + when: ansible_distribution == "Fedora" + +- name: Install kubernetes + yum: pkg=kubernetes state=latest + notify: + - restart daemons + when: not is_atomic + +- name: write the global config file + template: src=config.j2 dest=/etc/kubernetes/config + notify: + - restart daemons diff --git a/contrib/ansible/roles/kubernetes/templates/config.j2 b/contrib/ansible/roles/kubernetes/templates/config.j2 new file mode 100644 index 00000000000..ccf38567ec1 --- /dev/null +++ b/contrib/ansible/roles/kubernetes/templates/config.j2 @@ -0,0 +1,23 @@ +### +# kubernetes system config +# +# The following values are used to configure various aspects of all +# kubernetes services, including +# +# kube-apiserver.service +# kube-controller-manager.service +# kube-scheduler.service +# kubelet.service +# kube-proxy.service + +# logging to stderr means we get it in the systemd journal +KUBE_LOGTOSTDERR="--logtostderr=true" + +# journal message level, 0 is debug +KUBE_LOG_LEVEL="--v=0" + +# Should this cluster be allowed to run privileged docker containers +KUBE_ALLOW_PRIV="--allow_privileged=true" + +# How the replication controller, scheduler, and proxy +KUBE_MASTER="--master=http://{{ groups['masters'][0] }}:8080" diff --git a/contrib/ansible/roles/master/files/controller-manager b/contrib/ansible/roles/master/files/controller-manager new file mode 100644 index 00000000000..8d8f4915684 --- /dev/null +++ b/contrib/ansible/roles/master/files/controller-manager @@ -0,0 +1,7 @@ +### +# The following values are used to configure the kubernetes controller-manager + +# defaults from config and apiserver should be adequate + +# Add you own! +KUBE_CONTROLLER_MANAGER_ARGS="" diff --git a/contrib/ansible/roles/master/files/scheduler b/contrib/ansible/roles/master/files/scheduler new file mode 100644 index 00000000000..f6fc507b72c --- /dev/null +++ b/contrib/ansible/roles/master/files/scheduler @@ -0,0 +1,7 @@ +### +# kubernetes scheduler config + +# default config should be adequate + +# Add your own! +KUBE_SCHEDULER_ARGS="" diff --git a/contrib/ansible/roles/master/handlers/main.yml b/contrib/ansible/roles/master/handlers/main.yml new file mode 100644 index 00000000000..fc40e9017de --- /dev/null +++ b/contrib/ansible/roles/master/handlers/main.yml @@ -0,0 +1,19 @@ +--- +- name: restart daemons + command: /bin/true + notify: + - restart apiserver + - restart controller-manager + - restart scheduler + +- name: restart apiserver + service: name=kube-apiserver state=restarted + +- name: restart controller-manager + service: name=kube-controller-manager state=restarted + +- name: restart scheduler + service: name=kube-scheduler state=restarted + +- name: restart iptables + service: name=iptables state=restarted diff --git a/contrib/ansible/roles/master/tasks/firewalld.yml b/contrib/ansible/roles/master/tasks/firewalld.yml new file mode 100644 index 00000000000..752094dc997 --- /dev/null +++ b/contrib/ansible/roles/master/tasks/firewalld.yml @@ -0,0 +1,10 @@ +--- +- name: Open firewalld port for apiserver + firewalld: port=8080/tcp permanent=false state=enabled + # in case this is also a minion with firewalld turned off + ignore_errors: yes + +- name: Save firewalld port for apiserver + firewalld: port=8080/tcp permanent=true state=enabled + # in case this is also a minion with firewalld turned off + ignore_errors: yes diff --git a/contrib/ansible/roles/master/tasks/iptables.yml b/contrib/ansible/roles/master/tasks/iptables.yml new file mode 100644 index 00000000000..a9570f108a9 --- /dev/null +++ b/contrib/ansible/roles/master/tasks/iptables.yml @@ -0,0 +1,15 @@ +--- +- name: Get iptables rules + shell: iptables -L + register: iptablesrules + always_run: yes + +- name: Open apiserver port with iptables + command: /sbin/iptables -I INPUT 1 -p tcp --dport 8080 -j ACCEPT -m comment --comment "kube-apiserver" + when: kube-apiserver not in iptablesrules.stdout + notify: + - restart iptables + +- name: Save iptables rules + command: service iptables save + when: kube-apiserver not in iptablesrules.stdout diff --git a/contrib/ansible/roles/master/tasks/main.yml b/contrib/ansible/roles/master/tasks/main.yml new file mode 100644 index 00000000000..25c0f6ce14f --- /dev/null +++ b/contrib/ansible/roles/master/tasks/main.yml @@ -0,0 +1,53 @@ +--- +- name: write the config file for the api server + template: src=apiserver.j2 dest=/etc/kubernetes/apiserver + notify: + - restart apiserver + +- name: write the config file for the controller-manager + copy: src=controller-manager dest=/etc/kubernetes/controller-manager + notify: + - restart controller-manager + +- name: write the config file for the scheduler + copy: src=scheduler dest=/etc/kubernetes/scheduler + notify: + - restart scheduler + +- name: Enable apiserver + service: name=kube-apiserver enabled=yes state=started + +- name: Enable controller-manager + service: name=kube-controller-manager enabled=yes state=started + +- name: Enable scheduler + service: name=kube-scheduler enabled=yes state=started + +- name: Copy minion definition json files to master + template: src=node.j2 dest=/tmp/node-{{ item }}.json + changed_when: false + with_items: + groups['minions'] + when: inventory_hostname == groups['masters'][0] + +- name: Load minion definition into master + command: /usr/bin/kubectl create -f /tmp/node-{{ item }}.json + register: command_result + failed_when: command_result.rc != 0 and 'already exists' not in command_result.stderr + changed_when: "command_result.rc == 0" + with_items: + groups['minions'] + when: inventory_hostname == groups['masters'][0] + +- name: Delete minion definitions from master + file: path=/tmp/node-{{ item }}.json state=absent + changed_when: false + with_items: + groups['minions'] + when: inventory_hostname == groups['masters'][0] + +- include: firewalld.yml + when: has_firewalld + +- include: iptables.yml + when: not has_firewalld and has_iptables diff --git a/contrib/ansible/roles/master/templates/apiserver.j2 b/contrib/ansible/roles/master/templates/apiserver.j2 new file mode 100644 index 00000000000..a8a89e826eb --- /dev/null +++ b/contrib/ansible/roles/master/templates/apiserver.j2 @@ -0,0 +1,26 @@ +### +# kubernetes system config +# +# The following values are used to configure the kube-apiserver +# + +# The address on the local server to listen to. +KUBE_API_ADDRESS="--address=0.0.0.0" + +# The port on the local server to listen on. +# KUBE_API_PORT="--port=8080" + +# Port minions listen on +# KUBELET_PORT="--kubelet_port=10250" + +# Address range to use for services +KUBE_SERVICE_ADDRESSES="--portal_net={{ kube_service_addresses }}" + +# Location of the etcd cluster +KUBE_ETCD_SERVERS="--etcd_servers=http://{{ groups['etcd'][0] }}:2379" + +# default admission control policies +KUBE_ADMISSION_CONTROL="--admission_control=NamespaceAutoProvision,LimitRanger,ResourceQuota" + +# Add you own! +KUBE_API_ARGS="" diff --git a/contrib/ansible/roles/master/templates/node.j2 b/contrib/ansible/roles/master/templates/node.j2 new file mode 100644 index 00000000000..f96459f8e09 --- /dev/null +++ b/contrib/ansible/roles/master/templates/node.j2 @@ -0,0 +1,16 @@ +{ + "apiVersion": "v1beta3", + "kind": "Node", + "metadata": { + "name": "{{ item }}" + }, + "spec": { + "externalID": "{{ item }}" + }, + "status": { + "capacity": { + "cpu": "1", + "memory": "1" + } + } +} diff --git a/contrib/ansible/roles/minion/files/proxy b/contrib/ansible/roles/minion/files/proxy new file mode 100644 index 00000000000..034276831ba --- /dev/null +++ b/contrib/ansible/roles/minion/files/proxy @@ -0,0 +1,7 @@ +### +# kubernetes proxy config + +# default config should be adequate + +# Add your own! +KUBE_PROXY_ARGS="" diff --git a/contrib/ansible/roles/minion/handlers/main.yml b/contrib/ansible/roles/minion/handlers/main.yml new file mode 100644 index 00000000000..196a3a6e5d9 --- /dev/null +++ b/contrib/ansible/roles/minion/handlers/main.yml @@ -0,0 +1,15 @@ +--- +- name: restart daemons + command: /bin/true + notify: + - restart kubelet + - restart proxy + +- name: restart kubelet + service: name=kubelet state=restarted + +- name: restart proxy + service: name=kube-proxy state=restarted + +- name: restart iptables + service: name=iptables state=restarted diff --git a/contrib/ansible/roles/minion/tasks/firewalld.yml b/contrib/ansible/roles/minion/tasks/firewalld.yml new file mode 100644 index 00000000000..24955a1870e --- /dev/null +++ b/contrib/ansible/roles/minion/tasks/firewalld.yml @@ -0,0 +1,10 @@ +--- +# https://bugzilla.redhat.com/show_bug.cgi?id=1033606 and I think others say firewalld+docker == bad +- name: disable firewalld + service: name=firewalld enabled=no state=stopped + +#- name: Open firewalld port for the kubelet +#firewalld: port=10250/tcp permanent=false state=enabled + +#- name: Save firewalld port for the kubelet +#firewalld: port=10250/tcp permanent=true state=enabled diff --git a/contrib/ansible/roles/minion/tasks/iptables.yml b/contrib/ansible/roles/minion/tasks/iptables.yml new file mode 100644 index 00000000000..5d5ae17f733 --- /dev/null +++ b/contrib/ansible/roles/minion/tasks/iptables.yml @@ -0,0 +1,18 @@ +--- +- name: Get iptables rules + shell: iptables -L + register: iptablesrules + always_run: yes + +- name: Enable iptables at boot + service: name=iptables enabled=yes state=started + +- name: Open kubelet port with iptables + command: /sbin/iptables -I INPUT 1 -p tcp --dport 10250 -j ACCEPT -m comment --comment "kubelet" + when: kubelet not in iptablesrules.stdout + notify: + - restart iptables + +- name: Save iptables rules + command: service iptables save + when: kubelet not in iptablesrules.stdout diff --git a/contrib/ansible/roles/minion/tasks/main.yml b/contrib/ansible/roles/minion/tasks/main.yml new file mode 100644 index 00000000000..2cafd18639a --- /dev/null +++ b/contrib/ansible/roles/minion/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- name: write the config files for kubelet + template: src=kubelet.j2 dest=/etc/kubernetes/kubelet + notify: + - restart kubelet + +- name: write the config files for proxy + copy: src=proxy dest=/etc/kubernetes/proxy + notify: + - restart proxy + +- name: Enable kubelet + service: name=kubelet enabled=yes state=started + +- name: Enable proxy + service: name=kube-proxy enabled=yes state=started + +- include: firewalld.yml + when: has_firewalld + +- include: iptables.yml + when: not has_firewalld and has_iptables diff --git a/contrib/ansible/roles/minion/templates/kubelet.j2 b/contrib/ansible/roles/minion/templates/kubelet.j2 new file mode 100644 index 00000000000..d1edc84cb56 --- /dev/null +++ b/contrib/ansible/roles/minion/templates/kubelet.j2 @@ -0,0 +1,17 @@ +### +# kubernetes kubelet (minion) config + +# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) +KUBELET_ADDRESS="--address=0.0.0.0" + +# The port for the info server to serve on +# KUBELET_PORT="--port=10250" + +# You may leave this blank to use the actual hostname +KUBELET_HOSTNAME="--hostname_override={{ inventory_hostname }}" + +# location of the api-server +KUBELET_API_SERVER="--api_servers=http://{{ groups['masters'][0]}}:8080" + +# Add your own! +KUBELET_ARGS=""