From 35c7b16592c87f26351fbcb9af8ff100ce9cdeb7 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Mon, 30 Mar 2015 17:21:08 -0400 Subject: [PATCH 1/8] example ansible setup repo This is a basic ansible repo that will do a couple of things 1) set up an etcd node 2) set up a master running apiserver, scheduler, controller-manager 3) setup any number of nodes Hopefully this can be expanded to do things like set up skydns, set up a private docker repo, set up an overlay network (flannel) etc etc. But right now all it does is set up etcd and configure a master and nodes. --- contrib/ansible/.gitignore | 2 + contrib/ansible/cluster.yml | 4 ++ contrib/ansible/etcd.yml | 6 +++ contrib/ansible/group_vars/all.yml | 3 ++ contrib/ansible/group_vars/masters.yml | 1 + contrib/ansible/inventory | 10 ++++ contrib/ansible/kubernetes-services.yml | 17 +++++++ contrib/ansible/library/rpm_facts.py | 37 ++++++++++++++ contrib/ansible/roles/common/tasks/main.yml | 18 +++++++ contrib/ansible/roles/etcd/handlers/main.yml | 6 +++ .../ansible/roles/etcd/tasks/firewalld.yml | 16 ++++++ contrib/ansible/roles/etcd/tasks/iptables.yml | 17 +++++++ contrib/ansible/roles/etcd/tasks/main.yml | 20 ++++++++ .../ansible/roles/etcd/templates/etcd.conf.j2 | 4 ++ .../ansible/roles/kubernetes/tasks/fedora.yml | 4 ++ .../ansible/roles/kubernetes/tasks/main.yml | 14 ++++++ .../roles/kubernetes/templates/config.j2 | 23 +++++++++ .../roles/master/files/controller-manager | 7 +++ contrib/ansible/roles/master/files/scheduler | 7 +++ .../ansible/roles/master/handlers/main.yml | 19 +++++++ .../ansible/roles/master/tasks/firewalld.yml | 10 ++++ .../ansible/roles/master/tasks/iptables.yml | 15 ++++++ contrib/ansible/roles/master/tasks/main.yml | 50 +++++++++++++++++++ .../roles/master/templates/apiserver.j2 | 26 ++++++++++ .../ansible/roles/master/templates/node.j2 | 3 ++ contrib/ansible/roles/minion/files/proxy | 7 +++ .../ansible/roles/minion/handlers/main.yml | 15 ++++++ .../ansible/roles/minion/tasks/firewalld.yml | 10 ++++ .../ansible/roles/minion/tasks/iptables.yml | 18 +++++++ contrib/ansible/roles/minion/tasks/main.yml | 22 ++++++++ .../ansible/roles/minion/templates/kubelet.j2 | 17 +++++++ 31 files changed, 428 insertions(+) create mode 100644 contrib/ansible/.gitignore create mode 100644 contrib/ansible/cluster.yml create mode 100644 contrib/ansible/etcd.yml create mode 100644 contrib/ansible/group_vars/all.yml create mode 100644 contrib/ansible/group_vars/masters.yml create mode 100644 contrib/ansible/inventory create mode 100644 contrib/ansible/kubernetes-services.yml create mode 100644 contrib/ansible/library/rpm_facts.py create mode 100644 contrib/ansible/roles/common/tasks/main.yml create mode 100644 contrib/ansible/roles/etcd/handlers/main.yml create mode 100644 contrib/ansible/roles/etcd/tasks/firewalld.yml create mode 100644 contrib/ansible/roles/etcd/tasks/iptables.yml create mode 100644 contrib/ansible/roles/etcd/tasks/main.yml create mode 100644 contrib/ansible/roles/etcd/templates/etcd.conf.j2 create mode 100644 contrib/ansible/roles/kubernetes/tasks/fedora.yml create mode 100644 contrib/ansible/roles/kubernetes/tasks/main.yml create mode 100644 contrib/ansible/roles/kubernetes/templates/config.j2 create mode 100644 contrib/ansible/roles/master/files/controller-manager create mode 100644 contrib/ansible/roles/master/files/scheduler create mode 100644 contrib/ansible/roles/master/handlers/main.yml create mode 100644 contrib/ansible/roles/master/tasks/firewalld.yml create mode 100644 contrib/ansible/roles/master/tasks/iptables.yml create mode 100644 contrib/ansible/roles/master/tasks/main.yml create mode 100644 contrib/ansible/roles/master/templates/apiserver.j2 create mode 100644 contrib/ansible/roles/master/templates/node.j2 create mode 100644 contrib/ansible/roles/minion/files/proxy create mode 100644 contrib/ansible/roles/minion/handlers/main.yml create mode 100644 contrib/ansible/roles/minion/tasks/firewalld.yml create mode 100644 contrib/ansible/roles/minion/tasks/iptables.yml create mode 100644 contrib/ansible/roles/minion/tasks/main.yml create mode 100644 contrib/ansible/roles/minion/templates/kubelet.j2 diff --git a/contrib/ansible/.gitignore b/contrib/ansible/.gitignore new file mode 100644 index 00000000000..630444b6cd2 --- /dev/null +++ b/contrib/ansible/.gitignore @@ -0,0 +1,2 @@ +myinventory +*.swp diff --git a/contrib/ansible/cluster.yml b/contrib/ansible/cluster.yml new file mode 100644 index 00000000000..68f7e6a44e6 --- /dev/null +++ b/contrib/ansible/cluster.yml @@ -0,0 +1,4 @@ +# Set up a whole working cluster! +- include: etcd.yml +- include: kubernetes-services.yml + diff --git a/contrib/ansible/etcd.yml b/contrib/ansible/etcd.yml new file mode 100644 index 00000000000..4a4540a889d --- /dev/null +++ b/contrib/ansible/etcd.yml @@ -0,0 +1,6 @@ +--- +- hosts: etcd + sudo: yes + roles: + - common + - etcd diff --git a/contrib/ansible/group_vars/all.yml b/contrib/ansible/group_vars/all.yml new file mode 100644 index 00000000000..ae2fbc8c954 --- /dev/null +++ b/contrib/ansible/group_vars/all.yml @@ -0,0 +1,3 @@ +ansible_ssh_user: root +#ansible_ssh_pass: password +#ansible_sudo_pass: password diff --git a/contrib/ansible/group_vars/masters.yml b/contrib/ansible/group_vars/masters.yml new file mode 100644 index 00000000000..f03cd1d3196 --- /dev/null +++ b/contrib/ansible/group_vars/masters.yml @@ -0,0 +1 @@ +kube_service_addresses: 10.254.0.0/16 # MUST be defined as a range not used in your infrastructure diff --git a/contrib/ansible/inventory b/contrib/ansible/inventory new file mode 100644 index 00000000000..07af7a61b30 --- /dev/null +++ b/contrib/ansible/inventory @@ -0,0 +1,10 @@ +[masters] +10.0.0.1 + +[etcd] +10.0.0.2 + +[minions] +10.0.0.3 +10.0.0.4 +10.0.0.5 diff --git a/contrib/ansible/kubernetes-services.yml b/contrib/ansible/kubernetes-services.yml new file mode 100644 index 00000000000..a5e53974f43 --- /dev/null +++ b/contrib/ansible/kubernetes-services.yml @@ -0,0 +1,17 @@ +--- +- hosts: masters:minions + sudo: yes + roles: + - common + +- hosts: masters + sudo: yes + roles: + - kubernetes + - master + +- hosts: minions + sudo: yes + roles: + - kubernetes + - minion diff --git a/contrib/ansible/library/rpm_facts.py b/contrib/ansible/library/rpm_facts.py new file mode 100644 index 00000000000..292197e6677 --- /dev/null +++ b/contrib/ansible/library/rpm_facts.py @@ -0,0 +1,37 @@ +#!/usr/bin/python + +import subprocess +import re + +def main(): + module = AnsibleModule( + argument_spec = dict( + ), + ) + + facts = {} + + result = {} + result['rc'] = 0 + result['changed'] = False + result['ansible_facts'] = facts + + args = ("rpm", "-q", "firewalld") + popen = subprocess.Popen(args, stdout=subprocess.PIPE) + rc = popen.wait() + facts['has_firewalld'] = False + if rc == 0: + facts['has_firewalld'] = True + + args = ("rpm", "-q", "iptables-services") + popen = subprocess.Popen(args, stdout=subprocess.PIPE) + rc = popen.wait() + facts['has_iptables'] = False + if rc == 0: + facts['has_iptables'] = True + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/contrib/ansible/roles/common/tasks/main.yml b/contrib/ansible/roles/common/tasks/main.yml new file mode 100644 index 00000000000..081b2fe5458 --- /dev/null +++ b/contrib/ansible/roles/common/tasks/main.yml @@ -0,0 +1,18 @@ +--- +- name: Determine if Atomic + stat: path=/run/ostree-booted + register: s + changed_when: false + +- name: Init the is_atomic fact + set_fact: + is_atomic: false + +- name: Set the is_atomic fact + set_fact: + is_atomic: true + when: s.stat.exists + +- name: Collect fact about what RPM's are installed + rpm_facts: + when: ansible_pkg_mgr == "yum" diff --git a/contrib/ansible/roles/etcd/handlers/main.yml b/contrib/ansible/roles/etcd/handlers/main.yml new file mode 100644 index 00000000000..9218108772c --- /dev/null +++ b/contrib/ansible/roles/etcd/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: restart etcd + service: name=etcd state=restarted + +- name: Save iptables rules + command: service iptables save diff --git a/contrib/ansible/roles/etcd/tasks/firewalld.yml b/contrib/ansible/roles/etcd/tasks/firewalld.yml new file mode 100644 index 00000000000..0321918b9a7 --- /dev/null +++ b/contrib/ansible/roles/etcd/tasks/firewalld.yml @@ -0,0 +1,16 @@ +--- +- name: Open firewalld port for etcd + firewalld: port={{ item }}/tcp permanent=false state=enabled + # in case this is also a minion where firewalld turned off + ignore_errors: yes + with_items: + - 4001 + - 7001 + +- name: Save firewalld port for etcd + firewalld: port={{ item }}/tcp permanent=true state=enabled + # in case this is also a minion where firewalld turned off + ignore_errors: yes + with_items: + - 4001 + - 7001 diff --git a/contrib/ansible/roles/etcd/tasks/iptables.yml b/contrib/ansible/roles/etcd/tasks/iptables.yml new file mode 100644 index 00000000000..8be5040c538 --- /dev/null +++ b/contrib/ansible/roles/etcd/tasks/iptables.yml @@ -0,0 +1,17 @@ +--- +- name: Get iptables rules + shell: iptables -L + register: iptablesrules + always_run: yes + +- name: Enable iptables at boot + service: name=iptables enabled=yes state=started + +- name: Open etcd client port with iptables + command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "etcd_client" + when: etcd_client not in iptablesrules.stdout + notify: + - Save iptables rules + with_items: + - 4001 + - 7001 diff --git a/contrib/ansible/roles/etcd/tasks/main.yml b/contrib/ansible/roles/etcd/tasks/main.yml new file mode 100644 index 00000000000..c655f5df1d4 --- /dev/null +++ b/contrib/ansible/roles/etcd/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Install etcd + yum: pkg=etcd state=latest + notify: + - restart etcd + when: not is_atomic + +- name: Write etcd config file + template: src=etcd.conf.j2 dest=/etc/etcd/etcd.conf + notify: + - restart etcd + +- name: Enable etcd + service: name=etcd enabled=yes state=started + +- include: firewalld.yml + when: has_firewalld + +- include: iptables.yml + when: not has_firewalld and has_iptables diff --git a/contrib/ansible/roles/etcd/templates/etcd.conf.j2 b/contrib/ansible/roles/etcd/templates/etcd.conf.j2 new file mode 100644 index 00000000000..33e703f2403 --- /dev/null +++ b/contrib/ansible/roles/etcd/templates/etcd.conf.j2 @@ -0,0 +1,4 @@ +# etcd2.0 +ETCD_NAME=default +ETCD_DATA_DIR="/var/lib/etcd/default.etcd" +ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:4001" diff --git a/contrib/ansible/roles/kubernetes/tasks/fedora.yml b/contrib/ansible/roles/kubernetes/tasks/fedora.yml new file mode 100644 index 00000000000..be021d2d9d3 --- /dev/null +++ b/contrib/ansible/roles/kubernetes/tasks/fedora.yml @@ -0,0 +1,4 @@ +--- +- name: Remove docker window manager on F20 + yum: pkg=docker state=absent + when: not is_atomic and ansible_distribution_major_version == "20" diff --git a/contrib/ansible/roles/kubernetes/tasks/main.yml b/contrib/ansible/roles/kubernetes/tasks/main.yml new file mode 100644 index 00000000000..b03c55358b3 --- /dev/null +++ b/contrib/ansible/roles/kubernetes/tasks/main.yml @@ -0,0 +1,14 @@ +--- +- include: fedora.yml + when: ansible_distribution == "Fedora" + +- name: Install kubernetes + yum: pkg=kubernetes state=latest + notify: + - restart daemons + when: not is_atomic + +- name: write the global config file + template: src=config.j2 dest=/etc/kubernetes/config + notify: + - restart daemons diff --git a/contrib/ansible/roles/kubernetes/templates/config.j2 b/contrib/ansible/roles/kubernetes/templates/config.j2 new file mode 100644 index 00000000000..ccf38567ec1 --- /dev/null +++ b/contrib/ansible/roles/kubernetes/templates/config.j2 @@ -0,0 +1,23 @@ +### +# kubernetes system config +# +# The following values are used to configure various aspects of all +# kubernetes services, including +# +# kube-apiserver.service +# kube-controller-manager.service +# kube-scheduler.service +# kubelet.service +# kube-proxy.service + +# logging to stderr means we get it in the systemd journal +KUBE_LOGTOSTDERR="--logtostderr=true" + +# journal message level, 0 is debug +KUBE_LOG_LEVEL="--v=0" + +# Should this cluster be allowed to run privileged docker containers +KUBE_ALLOW_PRIV="--allow_privileged=true" + +# How the replication controller, scheduler, and proxy +KUBE_MASTER="--master=http://{{ groups['masters'][0] }}:8080" diff --git a/contrib/ansible/roles/master/files/controller-manager b/contrib/ansible/roles/master/files/controller-manager new file mode 100644 index 00000000000..8d8f4915684 --- /dev/null +++ b/contrib/ansible/roles/master/files/controller-manager @@ -0,0 +1,7 @@ +### +# The following values are used to configure the kubernetes controller-manager + +# defaults from config and apiserver should be adequate + +# Add you own! +KUBE_CONTROLLER_MANAGER_ARGS="" diff --git a/contrib/ansible/roles/master/files/scheduler b/contrib/ansible/roles/master/files/scheduler new file mode 100644 index 00000000000..f6fc507b72c --- /dev/null +++ b/contrib/ansible/roles/master/files/scheduler @@ -0,0 +1,7 @@ +### +# kubernetes scheduler config + +# default config should be adequate + +# Add your own! +KUBE_SCHEDULER_ARGS="" diff --git a/contrib/ansible/roles/master/handlers/main.yml b/contrib/ansible/roles/master/handlers/main.yml new file mode 100644 index 00000000000..fc40e9017de --- /dev/null +++ b/contrib/ansible/roles/master/handlers/main.yml @@ -0,0 +1,19 @@ +--- +- name: restart daemons + command: /bin/true + notify: + - restart apiserver + - restart controller-manager + - restart scheduler + +- name: restart apiserver + service: name=kube-apiserver state=restarted + +- name: restart controller-manager + service: name=kube-controller-manager state=restarted + +- name: restart scheduler + service: name=kube-scheduler state=restarted + +- name: restart iptables + service: name=iptables state=restarted diff --git a/contrib/ansible/roles/master/tasks/firewalld.yml b/contrib/ansible/roles/master/tasks/firewalld.yml new file mode 100644 index 00000000000..752094dc997 --- /dev/null +++ b/contrib/ansible/roles/master/tasks/firewalld.yml @@ -0,0 +1,10 @@ +--- +- name: Open firewalld port for apiserver + firewalld: port=8080/tcp permanent=false state=enabled + # in case this is also a minion with firewalld turned off + ignore_errors: yes + +- name: Save firewalld port for apiserver + firewalld: port=8080/tcp permanent=true state=enabled + # in case this is also a minion with firewalld turned off + ignore_errors: yes diff --git a/contrib/ansible/roles/master/tasks/iptables.yml b/contrib/ansible/roles/master/tasks/iptables.yml new file mode 100644 index 00000000000..a9570f108a9 --- /dev/null +++ b/contrib/ansible/roles/master/tasks/iptables.yml @@ -0,0 +1,15 @@ +--- +- name: Get iptables rules + shell: iptables -L + register: iptablesrules + always_run: yes + +- name: Open apiserver port with iptables + command: /sbin/iptables -I INPUT 1 -p tcp --dport 8080 -j ACCEPT -m comment --comment "kube-apiserver" + when: kube-apiserver not in iptablesrules.stdout + notify: + - restart iptables + +- name: Save iptables rules + command: service iptables save + when: kube-apiserver not in iptablesrules.stdout diff --git a/contrib/ansible/roles/master/tasks/main.yml b/contrib/ansible/roles/master/tasks/main.yml new file mode 100644 index 00000000000..fc44378ee55 --- /dev/null +++ b/contrib/ansible/roles/master/tasks/main.yml @@ -0,0 +1,50 @@ +--- +- name: write the config file for the api server + template: src=apiserver.j2 dest=/etc/kubernetes/apiserver + notify: + - restart apiserver + +- name: write the config file for the controller-manager + copy: src=controller-manager dest=/etc/kubernetes/controller-manager + notify: + - restart controller-manager + +- name: write the config file for the scheduler + copy: src=scheduler dest=/etc/kubernetes/scheduler + notify: + - restart scheduler + +- name: Enable apiserver + service: name=kube-apiserver enabled=yes state=started + +- name: Enable controller-manager + service: name=kube-controller-manager enabled=yes state=started + +- name: Enable scheduler + service: name=kube-scheduler enabled=yes state=started + +- name: Copy minion definition json files to master + template: src=node.j2 dest=/tmp/node-{{ item }}.yml + changed_when: false + with_items: + groups['minions'] + +- name: Load minion definition into master + command: /usr/bin/kubectl create -f /tmp/node-{{ item }}.yml + register: command_result + failed_when: command_result.rc != 0 and 'already exists' not in command_result.stderr + changed_when: "command_result.rc == 0" + with_items: + groups['minions'] + +- name: Delete minion definitions from master + file: path=/tmp/node-{{ item }}.yml state=absent + changed_when: false + with_items: + groups['minions'] + +- include: firewalld.yml + when: has_firewalld + +- include: iptables.yml + when: not has_firewalld and has_iptables diff --git a/contrib/ansible/roles/master/templates/apiserver.j2 b/contrib/ansible/roles/master/templates/apiserver.j2 new file mode 100644 index 00000000000..6c56a39d15b --- /dev/null +++ b/contrib/ansible/roles/master/templates/apiserver.j2 @@ -0,0 +1,26 @@ +### +# kubernetes system config +# +# The following values are used to configure the kube-apiserver +# + +# The address on the local server to listen to. +KUBE_API_ADDRESS="--address=0.0.0.0" + +# The port on the local server to listen on. +# KUBE_API_PORT="--port=8080" + +# Port minions listen on +# KUBELET_PORT="--kubelet_port=10250" + +# Address range to use for services +KUBE_SERVICE_ADDRESSES="--portal_net={{ kube_service_addresses }}" + +# Location of the etcd cluster +KUBE_ETCD_SERVERS="--etcd_servers=http://{{ groups['etcd'][0] }}:4001" + +# default admission control policies +KUBE_ADMISSION_CONTROL="--admission_control=NamespaceAutoProvision,LimitRanger,ResourceQuota" + +# Add you own! +KUBE_API_ARGS="" diff --git a/contrib/ansible/roles/master/templates/node.j2 b/contrib/ansible/roles/master/templates/node.j2 new file mode 100644 index 00000000000..590de20f307 --- /dev/null +++ b/contrib/ansible/roles/master/templates/node.j2 @@ -0,0 +1,3 @@ +apiVersion: v1beta1 +id: {{ item }} +kind: Minion diff --git a/contrib/ansible/roles/minion/files/proxy b/contrib/ansible/roles/minion/files/proxy new file mode 100644 index 00000000000..034276831ba --- /dev/null +++ b/contrib/ansible/roles/minion/files/proxy @@ -0,0 +1,7 @@ +### +# kubernetes proxy config + +# default config should be adequate + +# Add your own! +KUBE_PROXY_ARGS="" diff --git a/contrib/ansible/roles/minion/handlers/main.yml b/contrib/ansible/roles/minion/handlers/main.yml new file mode 100644 index 00000000000..196a3a6e5d9 --- /dev/null +++ b/contrib/ansible/roles/minion/handlers/main.yml @@ -0,0 +1,15 @@ +--- +- name: restart daemons + command: /bin/true + notify: + - restart kubelet + - restart proxy + +- name: restart kubelet + service: name=kubelet state=restarted + +- name: restart proxy + service: name=kube-proxy state=restarted + +- name: restart iptables + service: name=iptables state=restarted diff --git a/contrib/ansible/roles/minion/tasks/firewalld.yml b/contrib/ansible/roles/minion/tasks/firewalld.yml new file mode 100644 index 00000000000..24955a1870e --- /dev/null +++ b/contrib/ansible/roles/minion/tasks/firewalld.yml @@ -0,0 +1,10 @@ +--- +# https://bugzilla.redhat.com/show_bug.cgi?id=1033606 and I think others say firewalld+docker == bad +- name: disable firewalld + service: name=firewalld enabled=no state=stopped + +#- name: Open firewalld port for the kubelet +#firewalld: port=10250/tcp permanent=false state=enabled + +#- name: Save firewalld port for the kubelet +#firewalld: port=10250/tcp permanent=true state=enabled diff --git a/contrib/ansible/roles/minion/tasks/iptables.yml b/contrib/ansible/roles/minion/tasks/iptables.yml new file mode 100644 index 00000000000..5d5ae17f733 --- /dev/null +++ b/contrib/ansible/roles/minion/tasks/iptables.yml @@ -0,0 +1,18 @@ +--- +- name: Get iptables rules + shell: iptables -L + register: iptablesrules + always_run: yes + +- name: Enable iptables at boot + service: name=iptables enabled=yes state=started + +- name: Open kubelet port with iptables + command: /sbin/iptables -I INPUT 1 -p tcp --dport 10250 -j ACCEPT -m comment --comment "kubelet" + when: kubelet not in iptablesrules.stdout + notify: + - restart iptables + +- name: Save iptables rules + command: service iptables save + when: kubelet not in iptablesrules.stdout diff --git a/contrib/ansible/roles/minion/tasks/main.yml b/contrib/ansible/roles/minion/tasks/main.yml new file mode 100644 index 00000000000..2cafd18639a --- /dev/null +++ b/contrib/ansible/roles/minion/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- name: write the config files for kubelet + template: src=kubelet.j2 dest=/etc/kubernetes/kubelet + notify: + - restart kubelet + +- name: write the config files for proxy + copy: src=proxy dest=/etc/kubernetes/proxy + notify: + - restart proxy + +- name: Enable kubelet + service: name=kubelet enabled=yes state=started + +- name: Enable proxy + service: name=kube-proxy enabled=yes state=started + +- include: firewalld.yml + when: has_firewalld + +- include: iptables.yml + when: not has_firewalld and has_iptables diff --git a/contrib/ansible/roles/minion/templates/kubelet.j2 b/contrib/ansible/roles/minion/templates/kubelet.j2 new file mode 100644 index 00000000000..d1edc84cb56 --- /dev/null +++ b/contrib/ansible/roles/minion/templates/kubelet.j2 @@ -0,0 +1,17 @@ +### +# kubernetes kubelet (minion) config + +# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) +KUBELET_ADDRESS="--address=0.0.0.0" + +# The port for the info server to serve on +# KUBELET_PORT="--port=10250" + +# You may leave this blank to use the actual hostname +KUBELET_HOSTNAME="--hostname_override={{ inventory_hostname }}" + +# location of the api-server +KUBELET_API_SERVER="--api_servers=http://{{ groups['masters'][0]}}:8080" + +# Add your own! +KUBELET_ARGS="" From c4241fb2ae847487024df613cf1b071cf59772dd Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Fri, 3 Apr 2015 11:51:45 -0400 Subject: [PATCH 2/8] Use v1beta3 node definitions --- contrib/ansible/roles/master/tasks/main.yml | 6 +++--- .../ansible/roles/master/templates/node.j2 | 19 ++++++++++++++++--- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/contrib/ansible/roles/master/tasks/main.yml b/contrib/ansible/roles/master/tasks/main.yml index fc44378ee55..4273505bb1a 100644 --- a/contrib/ansible/roles/master/tasks/main.yml +++ b/contrib/ansible/roles/master/tasks/main.yml @@ -24,13 +24,13 @@ service: name=kube-scheduler enabled=yes state=started - name: Copy minion definition json files to master - template: src=node.j2 dest=/tmp/node-{{ item }}.yml + template: src=node.j2 dest=/tmp/node-{{ item }}.json changed_when: false with_items: groups['minions'] - name: Load minion definition into master - command: /usr/bin/kubectl create -f /tmp/node-{{ item }}.yml + command: /usr/bin/kubectl create -f /tmp/node-{{ item }}.json register: command_result failed_when: command_result.rc != 0 and 'already exists' not in command_result.stderr changed_when: "command_result.rc == 0" @@ -38,7 +38,7 @@ groups['minions'] - name: Delete minion definitions from master - file: path=/tmp/node-{{ item }}.yml state=absent + file: path=/tmp/node-{{ item }}.json state=absent changed_when: false with_items: groups['minions'] diff --git a/contrib/ansible/roles/master/templates/node.j2 b/contrib/ansible/roles/master/templates/node.j2 index 590de20f307..f96459f8e09 100644 --- a/contrib/ansible/roles/master/templates/node.j2 +++ b/contrib/ansible/roles/master/templates/node.j2 @@ -1,3 +1,16 @@ -apiVersion: v1beta1 -id: {{ item }} -kind: Minion +{ + "apiVersion": "v1beta3", + "kind": "Node", + "metadata": { + "name": "{{ item }}" + }, + "spec": { + "externalID": "{{ item }}" + }, + "status": { + "capacity": { + "cpu": "1", + "memory": "1" + } + } +} From 0166392cfea4b75b79805d5be242d2eba1edb227 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Fri, 3 Apr 2015 12:00:33 -0400 Subject: [PATCH 3/8] Collect packge info in the repo, not in python There is no need for a python fact collector, just do it in ansible commands instead --- contrib/ansible/library/rpm_facts.py | 37 --------------------- contrib/ansible/roles/common/tasks/main.yml | 4 +-- contrib/ansible/roles/common/tasks/rpm.yml | 30 +++++++++++++++++ 3 files changed, 32 insertions(+), 39 deletions(-) delete mode 100644 contrib/ansible/library/rpm_facts.py create mode 100644 contrib/ansible/roles/common/tasks/rpm.yml diff --git a/contrib/ansible/library/rpm_facts.py b/contrib/ansible/library/rpm_facts.py deleted file mode 100644 index 292197e6677..00000000000 --- a/contrib/ansible/library/rpm_facts.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/python - -import subprocess -import re - -def main(): - module = AnsibleModule( - argument_spec = dict( - ), - ) - - facts = {} - - result = {} - result['rc'] = 0 - result['changed'] = False - result['ansible_facts'] = facts - - args = ("rpm", "-q", "firewalld") - popen = subprocess.Popen(args, stdout=subprocess.PIPE) - rc = popen.wait() - facts['has_firewalld'] = False - if rc == 0: - facts['has_firewalld'] = True - - args = ("rpm", "-q", "iptables-services") - popen = subprocess.Popen(args, stdout=subprocess.PIPE) - rc = popen.wait() - facts['has_iptables'] = False - if rc == 0: - facts['has_iptables'] = True - - module.exit_json(**result) - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/contrib/ansible/roles/common/tasks/main.yml b/contrib/ansible/roles/common/tasks/main.yml index 081b2fe5458..5404be01616 100644 --- a/contrib/ansible/roles/common/tasks/main.yml +++ b/contrib/ansible/roles/common/tasks/main.yml @@ -13,6 +13,6 @@ is_atomic: true when: s.stat.exists -- name: Collect fact about what RPM's are installed - rpm_facts: +# collect information about what packages are installed +- include: rpm.yml when: ansible_pkg_mgr == "yum" diff --git a/contrib/ansible/roles/common/tasks/rpm.yml b/contrib/ansible/roles/common/tasks/rpm.yml new file mode 100644 index 00000000000..959be43dbdf --- /dev/null +++ b/contrib/ansible/roles/common/tasks/rpm.yml @@ -0,0 +1,30 @@ +--- +- name: Determine if firewalld installed + command: "rpm -q firewalld" + register: s + changed_when: false + failed_when: false + +- name: Init the has_firewalld fact + set_fact: + has_firewalld: false + +- name: Set the has_firewalld fact + set_fact: + has_firewalld: true + when: s.rc == 0 + +- name: Determine if iptables-services installed + command: "rpm -q iptables-services" + register: s + changed_when: false + failed_when: false + +- name: Init the has_iptables fact + set_fact: + has_iptables: false + +- name: Set the has_iptables fact + set_fact: + has_iptables: true + when: s.rc == 0 From b25996dbcda44e98e28a0e75f0ad904048a66ba6 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Fri, 3 Apr 2015 12:10:19 -0400 Subject: [PATCH 4/8] Add comments to configuration variables --- contrib/ansible/group_vars/all.yml | 8 ++++++++ contrib/ansible/group_vars/masters.yml | 6 +++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/contrib/ansible/group_vars/all.yml b/contrib/ansible/group_vars/all.yml index ae2fbc8c954..19047befdd6 100644 --- a/contrib/ansible/group_vars/all.yml +++ b/contrib/ansible/group_vars/all.yml @@ -1,3 +1,11 @@ +# Account name of remote user. Ansible will use this user account to ssh into +# the managed machines. The user must be able to use sudo without asking +# for password unless ansible_sudo_pass is set ansible_ssh_user: root + +# password for the ansible_ssh_user. If this is unset you will need to set up +# ssh keys so a password is not needed. #ansible_ssh_pass: password + +# If a password is needed to sudo to root that password must be set here #ansible_sudo_pass: password diff --git a/contrib/ansible/group_vars/masters.yml b/contrib/ansible/group_vars/masters.yml index f03cd1d3196..afc9ff04b42 100644 --- a/contrib/ansible/group_vars/masters.yml +++ b/contrib/ansible/group_vars/masters.yml @@ -1 +1,5 @@ -kube_service_addresses: 10.254.0.0/16 # MUST be defined as a range not used in your infrastructure +# Kubernetes internal network for services. +# Kubernetes services will get fake IP addresses from this range. +# This range must not conflict with anything in your infrastructure. These +# addresses do not need to be routable and must just be an unused block of space. +kube_service_addresses: 10.254.0.0/16 From d3862b5ea2bd710ab036ffa1b47584b9d9ea2e9e Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Fri, 3 Apr 2015 12:39:03 -0400 Subject: [PATCH 5/8] Just put all vars in all.yml The distinction between all/etcd/masters wouldn't make sense for a non-developer --- contrib/ansible/group_vars/all.yml | 6 ++++++ contrib/ansible/group_vars/masters.yml | 5 ----- 2 files changed, 6 insertions(+), 5 deletions(-) delete mode 100644 contrib/ansible/group_vars/masters.yml diff --git a/contrib/ansible/group_vars/all.yml b/contrib/ansible/group_vars/all.yml index 19047befdd6..ae9d7dc2fb0 100644 --- a/contrib/ansible/group_vars/all.yml +++ b/contrib/ansible/group_vars/all.yml @@ -9,3 +9,9 @@ ansible_ssh_user: root # If a password is needed to sudo to root that password must be set here #ansible_sudo_pass: password + +# Kubernetes internal network for services. +# Kubernetes services will get fake IP addresses from this range. +# This range must not conflict with anything in your infrastructure. These +# addresses do not need to be routable and must just be an unused block of space. +kube_service_addresses: 10.254.0.0/16 diff --git a/contrib/ansible/group_vars/masters.yml b/contrib/ansible/group_vars/masters.yml deleted file mode 100644 index afc9ff04b42..00000000000 --- a/contrib/ansible/group_vars/masters.yml +++ /dev/null @@ -1,5 +0,0 @@ -# Kubernetes internal network for services. -# Kubernetes services will get fake IP addresses from this range. -# This range must not conflict with anything in your infrastructure. These -# addresses do not need to be routable and must just be an unused block of space. -kube_service_addresses: 10.254.0.0/16 From 4e8a29c2ca282412f1fd9dcb642d4f71c1ba6a0b Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Fri, 3 Apr 2015 12:37:57 -0400 Subject: [PATCH 6/8] Add a README file --- contrib/ansible/README.md | 42 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 contrib/ansible/README.md diff --git a/contrib/ansible/README.md b/contrib/ansible/README.md new file mode 100644 index 00000000000..5552f448e4a --- /dev/null +++ b/contrib/ansible/README.md @@ -0,0 +1,42 @@ +# Kubernetes Ansible + +This playbook helps you to set up a Kubernetes cluster on machines where you +can't or don't want to use the salt scripts and cluster up/down tools. They +can be real hardware, VMs, things in a public cloud, etc. + +## Usage + +* Record the IP address of which machine you want to be your master +* Record the IP address of the machine you want to be your etcd server (often same as master) +* Record the IP addresses of the machines you want to be your minions. (master can be a minion) + +Stick the system information into the 'inventory' file. + +### Configure your cluster + +You will want to look though all of the options in `group_vars/all.yml` and +set the variables to reflect your needs. The options should be described there +in full detail. + +### Set up the actual kubernetes cluster + +Now run the setup: + + $ ansible-playbook -i inventory cluster.yml + +In generel this will work on very recent Fedora, rawhide or F21. Future work to +support RHEL7, CentOS, and possible other distros should be forthcoming. + +### You can just set up certain parts instead of doing it all + +Only the kubernetes daemons: + + $ ansible-playbook -i inventory kubernetes-services.yml + +Only etcd: + + $ ansible-playbootk -i inventory etcd.yml + +Only flannel: + + $ ansible-playbook -i inventory flannel.yml From a94d8e361e698380726863ea986cb12bd9776831 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 2 Apr 2015 16:55:14 -0400 Subject: [PATCH 7/8] Only run node creation on the first master True, we can only have one master today, but this gets us ready for when it comes later --- contrib/ansible/roles/master/tasks/main.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/contrib/ansible/roles/master/tasks/main.yml b/contrib/ansible/roles/master/tasks/main.yml index 4273505bb1a..25c0f6ce14f 100644 --- a/contrib/ansible/roles/master/tasks/main.yml +++ b/contrib/ansible/roles/master/tasks/main.yml @@ -28,6 +28,7 @@ changed_when: false with_items: groups['minions'] + when: inventory_hostname == groups['masters'][0] - name: Load minion definition into master command: /usr/bin/kubectl create -f /tmp/node-{{ item }}.json @@ -36,12 +37,14 @@ changed_when: "command_result.rc == 0" with_items: groups['minions'] + when: inventory_hostname == groups['masters'][0] - name: Delete minion definitions from master file: path=/tmp/node-{{ item }}.json state=absent changed_when: false with_items: groups['minions'] + when: inventory_hostname == groups['masters'][0] - include: firewalld.yml when: has_firewalld From 051ab346bd6f1449710c90839b1b741cc18093bb Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Fri, 3 Apr 2015 12:50:18 -0400 Subject: [PATCH 8/8] Use IANA ports for etcd http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=etcd Use 2379 and 2380, not 4001 and 7001 --- contrib/ansible/roles/etcd/tasks/firewalld.yml | 8 ++++---- contrib/ansible/roles/etcd/tasks/iptables.yml | 8 ++++---- contrib/ansible/roles/etcd/templates/etcd.conf.j2 | 2 +- contrib/ansible/roles/master/templates/apiserver.j2 | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/contrib/ansible/roles/etcd/tasks/firewalld.yml b/contrib/ansible/roles/etcd/tasks/firewalld.yml index 0321918b9a7..8d730fd2e47 100644 --- a/contrib/ansible/roles/etcd/tasks/firewalld.yml +++ b/contrib/ansible/roles/etcd/tasks/firewalld.yml @@ -4,13 +4,13 @@ # in case this is also a minion where firewalld turned off ignore_errors: yes with_items: - - 4001 - - 7001 + - 2379 + - 2380 - name: Save firewalld port for etcd firewalld: port={{ item }}/tcp permanent=true state=enabled # in case this is also a minion where firewalld turned off ignore_errors: yes with_items: - - 4001 - - 7001 + - 2379 + - 2380 diff --git a/contrib/ansible/roles/etcd/tasks/iptables.yml b/contrib/ansible/roles/etcd/tasks/iptables.yml index 8be5040c538..a1035247d94 100644 --- a/contrib/ansible/roles/etcd/tasks/iptables.yml +++ b/contrib/ansible/roles/etcd/tasks/iptables.yml @@ -8,10 +8,10 @@ service: name=iptables enabled=yes state=started - name: Open etcd client port with iptables - command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "etcd_client" - when: etcd_client not in iptablesrules.stdout + command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "etcd" + when: etcd not in iptablesrules.stdout notify: - Save iptables rules with_items: - - 4001 - - 7001 + - 2379 + - 2380 diff --git a/contrib/ansible/roles/etcd/templates/etcd.conf.j2 b/contrib/ansible/roles/etcd/templates/etcd.conf.j2 index 33e703f2403..1ce5063c903 100644 --- a/contrib/ansible/roles/etcd/templates/etcd.conf.j2 +++ b/contrib/ansible/roles/etcd/templates/etcd.conf.j2 @@ -1,4 +1,4 @@ # etcd2.0 ETCD_NAME=default ETCD_DATA_DIR="/var/lib/etcd/default.etcd" -ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:4001" +ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379" diff --git a/contrib/ansible/roles/master/templates/apiserver.j2 b/contrib/ansible/roles/master/templates/apiserver.j2 index 6c56a39d15b..a8a89e826eb 100644 --- a/contrib/ansible/roles/master/templates/apiserver.j2 +++ b/contrib/ansible/roles/master/templates/apiserver.j2 @@ -17,7 +17,7 @@ KUBE_API_ADDRESS="--address=0.0.0.0" KUBE_SERVICE_ADDRESSES="--portal_net={{ kube_service_addresses }}" # Location of the etcd cluster -KUBE_ETCD_SERVERS="--etcd_servers=http://{{ groups['etcd'][0] }}:4001" +KUBE_ETCD_SERVERS="--etcd_servers=http://{{ groups['etcd'][0] }}:2379" # default admission control policies KUBE_ADMISSION_CONTROL="--admission_control=NamespaceAutoProvision,LimitRanger,ResourceQuota"