From 8830e7b84ebf6210cefeda4b6522ad4600897138 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Wed, 17 Jun 2015 15:38:33 -0400 Subject: [PATCH 01/26] Follow upstream default admission controllers --- contrib/ansible/roles/master/templates/apiserver.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/ansible/roles/master/templates/apiserver.j2 b/contrib/ansible/roles/master/templates/apiserver.j2 index c389419c596..875db079f3d 100644 --- a/contrib/ansible/roles/master/templates/apiserver.j2 +++ b/contrib/ansible/roles/master/templates/apiserver.j2 @@ -20,7 +20,7 @@ KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range={{ kube_service_addresses }}" KUBE_ETCD_SERVERS="--etcd_servers=http://{{ groups['etcd'][0] }}:2379" # default admission control policies -KUBE_ADMISSION_CONTROL="--admission_control=NamespaceAutoProvision,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" +KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" # Add your own! KUBE_API_ARGS="" From 3af9346596b134b66b27a3dbd486dfec0d02bc19 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Sat, 30 May 2015 10:58:14 -0400 Subject: [PATCH 02/26] etcd requires advertise-client-urls with listen-client-urls otherwise it won't start --- contrib/ansible/roles/etcd/templates/etcd.conf.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/contrib/ansible/roles/etcd/templates/etcd.conf.j2 b/contrib/ansible/roles/etcd/templates/etcd.conf.j2 index 1ce5063c903..e350d3ae72d 100644 --- a/contrib/ansible/roles/etcd/templates/etcd.conf.j2 +++ b/contrib/ansible/roles/etcd/templates/etcd.conf.j2 @@ -2,3 +2,4 @@ ETCD_NAME=default ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379" +ETCD_ADVERTISE_CLIENT_URLS="http://0.0.0.0:2379" From b0b3b0304523e1d18ee7869286794eb36c323725 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Sat, 30 May 2015 10:34:37 -0400 Subject: [PATCH 03/26] Use {{ ansible_pkg_mgr }} instead of yum This should make things work on systems with dnf and even on systems which use apt! --- contrib/ansible/roles/etcd/tasks/main.yml | 5 ++++- contrib/ansible/roles/kubernetes/tasks/main.yml | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/contrib/ansible/roles/etcd/tasks/main.yml b/contrib/ansible/roles/etcd/tasks/main.yml index c655f5df1d4..67ff4d42c94 100644 --- a/contrib/ansible/roles/etcd/tasks/main.yml +++ b/contrib/ansible/roles/etcd/tasks/main.yml @@ -1,6 +1,9 @@ --- - name: Install etcd - yum: pkg=etcd state=latest + action: "{{ ansible_pkg_mgr }}" + args: + name: etcd + state: latest notify: - restart etcd when: not is_atomic diff --git a/contrib/ansible/roles/kubernetes/tasks/main.yml b/contrib/ansible/roles/kubernetes/tasks/main.yml index b03c55358b3..55ff6a4b7b8 100644 --- a/contrib/ansible/roles/kubernetes/tasks/main.yml +++ b/contrib/ansible/roles/kubernetes/tasks/main.yml @@ -3,7 +3,10 @@ when: ansible_distribution == "Fedora" - name: Install kubernetes - yum: pkg=kubernetes state=latest + action: "{{ ansible_pkg_mgr }}" + args: + name: kubernetes + state: latest notify: - restart daemons when: not is_atomic From 8bb7e14a442ea6019d3154b6e13ff086ff4dd9ec Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Sat, 30 May 2015 10:57:13 -0400 Subject: [PATCH 04/26] Rework cluster.yml and use tags, not files --- contrib/ansible/cluster.yml | 31 ++++++++++++++++++++++--- contrib/ansible/etcd.yml | 6 ----- contrib/ansible/kubernetes-services.yml | 17 -------------- 3 files changed, 28 insertions(+), 26 deletions(-) delete mode 100644 contrib/ansible/etcd.yml delete mode 100644 contrib/ansible/kubernetes-services.yml diff --git a/contrib/ansible/cluster.yml b/contrib/ansible/cluster.yml index 68f7e6a44e6..984c4b090ec 100644 --- a/contrib/ansible/cluster.yml +++ b/contrib/ansible/cluster.yml @@ -1,4 +1,29 @@ -# Set up a whole working cluster! -- include: etcd.yml -- include: kubernetes-services.yml +--- +# Install etcd +- hosts: etcd + sudo: yes + roles: + - common + - etcd + tags: + - etcd +# install kube master services +- hosts: masters + sudo: yes + roles: + - common + - kubernetes + - master + tags: + - masters + +# install kubernetes on the nodes +- hosts: minions + sudo: yes + roles: + - common + - kubernetes + - minion + tags: + - minions diff --git a/contrib/ansible/etcd.yml b/contrib/ansible/etcd.yml deleted file mode 100644 index 4a4540a889d..00000000000 --- a/contrib/ansible/etcd.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- hosts: etcd - sudo: yes - roles: - - common - - etcd diff --git a/contrib/ansible/kubernetes-services.yml b/contrib/ansible/kubernetes-services.yml deleted file mode 100644 index a5e53974f43..00000000000 --- a/contrib/ansible/kubernetes-services.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- hosts: masters:minions - sudo: yes - roles: - - common - -- hosts: masters - sudo: yes - roles: - - kubernetes - - master - -- hosts: minions - sudo: yes - roles: - - kubernetes - - minion From f274881b9469398ab9b11a12153d6cc28df1ba16 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Sat, 30 May 2015 10:59:51 -0400 Subject: [PATCH 05/26] do not register nodes to apiserver They now register themselves --- contrib/ansible/roles/master/tasks/main.yml | 23 ------------------- .../ansible/roles/master/templates/node.j2 | 16 ------------- 2 files changed, 39 deletions(-) delete mode 100644 contrib/ansible/roles/master/templates/node.j2 diff --git a/contrib/ansible/roles/master/tasks/main.yml b/contrib/ansible/roles/master/tasks/main.yml index 25c0f6ce14f..1ef7be8789a 100644 --- a/contrib/ansible/roles/master/tasks/main.yml +++ b/contrib/ansible/roles/master/tasks/main.yml @@ -23,29 +23,6 @@ - name: Enable scheduler service: name=kube-scheduler enabled=yes state=started -- name: Copy minion definition json files to master - template: src=node.j2 dest=/tmp/node-{{ item }}.json - changed_when: false - with_items: - groups['minions'] - when: inventory_hostname == groups['masters'][0] - -- name: Load minion definition into master - command: /usr/bin/kubectl create -f /tmp/node-{{ item }}.json - register: command_result - failed_when: command_result.rc != 0 and 'already exists' not in command_result.stderr - changed_when: "command_result.rc == 0" - with_items: - groups['minions'] - when: inventory_hostname == groups['masters'][0] - -- name: Delete minion definitions from master - file: path=/tmp/node-{{ item }}.json state=absent - changed_when: false - with_items: - groups['minions'] - when: inventory_hostname == groups['masters'][0] - - include: firewalld.yml when: has_firewalld diff --git a/contrib/ansible/roles/master/templates/node.j2 b/contrib/ansible/roles/master/templates/node.j2 deleted file mode 100644 index f96459f8e09..00000000000 --- a/contrib/ansible/roles/master/templates/node.j2 +++ /dev/null @@ -1,16 +0,0 @@ -{ - "apiVersion": "v1beta3", - "kind": "Node", - "metadata": { - "name": "{{ item }}" - }, - "spec": { - "externalID": "{{ item }}" - }, - "status": { - "capacity": { - "cpu": "1", - "memory": "1" - } - } -} From c4ba90337cecb2e58777691bcd39599f8d201806 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Sat, 30 May 2015 11:07:19 -0400 Subject: [PATCH 06/26] Only install kube-master on the master and node on the nodes --- contrib/ansible/roles/kubernetes/tasks/main.yml | 9 --------- contrib/ansible/roles/master/tasks/main.yml | 9 +++++++++ contrib/ansible/roles/minion/tasks/main.yml | 9 +++++++++ 3 files changed, 18 insertions(+), 9 deletions(-) diff --git a/contrib/ansible/roles/kubernetes/tasks/main.yml b/contrib/ansible/roles/kubernetes/tasks/main.yml index 55ff6a4b7b8..ac004f052b1 100644 --- a/contrib/ansible/roles/kubernetes/tasks/main.yml +++ b/contrib/ansible/roles/kubernetes/tasks/main.yml @@ -2,15 +2,6 @@ - include: fedora.yml when: ansible_distribution == "Fedora" -- name: Install kubernetes - action: "{{ ansible_pkg_mgr }}" - args: - name: kubernetes - state: latest - notify: - - restart daemons - when: not is_atomic - - name: write the global config file template: src=config.j2 dest=/etc/kubernetes/config notify: diff --git a/contrib/ansible/roles/master/tasks/main.yml b/contrib/ansible/roles/master/tasks/main.yml index 1ef7be8789a..aadbbd4bd52 100644 --- a/contrib/ansible/roles/master/tasks/main.yml +++ b/contrib/ansible/roles/master/tasks/main.yml @@ -1,4 +1,13 @@ --- +- name: Install kubernetes + action: "{{ ansible_pkg_mgr }}" + args: + name: kubernetes-master + state: latest + notify: + - restart daemons + when: not is_atomic + - name: write the config file for the api server template: src=apiserver.j2 dest=/etc/kubernetes/apiserver notify: diff --git a/contrib/ansible/roles/minion/tasks/main.yml b/contrib/ansible/roles/minion/tasks/main.yml index 2cafd18639a..a24d1293725 100644 --- a/contrib/ansible/roles/minion/tasks/main.yml +++ b/contrib/ansible/roles/minion/tasks/main.yml @@ -1,4 +1,13 @@ --- +- name: Install kubernetes + action: "{{ ansible_pkg_mgr }}" + args: + name: kubernetes-node + state: latest + notify: + - restart daemons + when: not is_atomic + - name: write the config files for kubelet template: src=kubelet.j2 dest=/etc/kubernetes/kubelet notify: From f66395283ed238d881f69afe36488b576fe1a909 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Sat, 30 May 2015 11:16:19 -0400 Subject: [PATCH 07/26] Rename minion->node --- contrib/ansible/README.md | 2 +- contrib/ansible/cluster.yml | 6 +++--- contrib/ansible/inventory | 8 ++++---- contrib/ansible/roles/etcd/tasks/firewalld.yml | 4 ++-- contrib/ansible/roles/master/tasks/firewalld.yml | 4 ++-- contrib/ansible/roles/master/templates/apiserver.j2 | 2 +- contrib/ansible/roles/{minion => node}/files/proxy | 0 contrib/ansible/roles/{minion => node}/handlers/main.yml | 0 .../ansible/roles/{minion => node}/tasks/firewalld.yml | 0 contrib/ansible/roles/{minion => node}/tasks/iptables.yml | 0 contrib/ansible/roles/{minion => node}/tasks/main.yml | 0 .../ansible/roles/{minion => node}/templates/kubelet.j2 | 2 +- 12 files changed, 14 insertions(+), 14 deletions(-) rename contrib/ansible/roles/{minion => node}/files/proxy (100%) rename contrib/ansible/roles/{minion => node}/handlers/main.yml (100%) rename contrib/ansible/roles/{minion => node}/tasks/firewalld.yml (100%) rename contrib/ansible/roles/{minion => node}/tasks/iptables.yml (100%) rename contrib/ansible/roles/{minion => node}/tasks/main.yml (100%) rename contrib/ansible/roles/{minion => node}/templates/kubelet.j2 (92%) diff --git a/contrib/ansible/README.md b/contrib/ansible/README.md index d5ba1ed7772..acaeea82ae0 100644 --- a/contrib/ansible/README.md +++ b/contrib/ansible/README.md @@ -8,7 +8,7 @@ can be real hardware, VMs, things in a public cloud, etc. * Record the IP address of which machine you want to be your master * Record the IP address of the machine you want to be your etcd server (often same as master) -* Record the IP addresses of the machines you want to be your minions. (master can be a minion) +* Record the IP addresses of the machines you want to be your nodes. (master can be a node) Stick the system information into the 'inventory' file. diff --git a/contrib/ansible/cluster.yml b/contrib/ansible/cluster.yml index 984c4b090ec..e951d91617b 100644 --- a/contrib/ansible/cluster.yml +++ b/contrib/ansible/cluster.yml @@ -19,11 +19,11 @@ - masters # install kubernetes on the nodes -- hosts: minions +- hosts: nodes sudo: yes roles: - common - kubernetes - - minion + - node tags: - - minions + - nodes diff --git a/contrib/ansible/inventory b/contrib/ansible/inventory index 07af7a61b30..7b43d91c3e6 100644 --- a/contrib/ansible/inventory +++ b/contrib/ansible/inventory @@ -1,10 +1,10 @@ -[masters] -10.0.0.1 - [etcd] 10.0.0.2 -[minions] +[masters] +10.0.0.1 + +[nodes] 10.0.0.3 10.0.0.4 10.0.0.5 diff --git a/contrib/ansible/roles/etcd/tasks/firewalld.yml b/contrib/ansible/roles/etcd/tasks/firewalld.yml index 8d730fd2e47..dc2978c314b 100644 --- a/contrib/ansible/roles/etcd/tasks/firewalld.yml +++ b/contrib/ansible/roles/etcd/tasks/firewalld.yml @@ -1,7 +1,7 @@ --- - name: Open firewalld port for etcd firewalld: port={{ item }}/tcp permanent=false state=enabled - # in case this is also a minion where firewalld turned off + # in case this is also a node where firewalld turned off ignore_errors: yes with_items: - 2379 @@ -9,7 +9,7 @@ - name: Save firewalld port for etcd firewalld: port={{ item }}/tcp permanent=true state=enabled - # in case this is also a minion where firewalld turned off + # in case this is also a node where firewalld turned off ignore_errors: yes with_items: - 2379 diff --git a/contrib/ansible/roles/master/tasks/firewalld.yml b/contrib/ansible/roles/master/tasks/firewalld.yml index 752094dc997..ffb7b65fbee 100644 --- a/contrib/ansible/roles/master/tasks/firewalld.yml +++ b/contrib/ansible/roles/master/tasks/firewalld.yml @@ -1,10 +1,10 @@ --- - name: Open firewalld port for apiserver firewalld: port=8080/tcp permanent=false state=enabled - # in case this is also a minion with firewalld turned off + # in case this is also a node with firewalld turned off ignore_errors: yes - name: Save firewalld port for apiserver firewalld: port=8080/tcp permanent=true state=enabled - # in case this is also a minion with firewalld turned off + # in case this is also a node with firewalld turned off ignore_errors: yes diff --git a/contrib/ansible/roles/master/templates/apiserver.j2 b/contrib/ansible/roles/master/templates/apiserver.j2 index 875db079f3d..82114506754 100644 --- a/contrib/ansible/roles/master/templates/apiserver.j2 +++ b/contrib/ansible/roles/master/templates/apiserver.j2 @@ -10,7 +10,7 @@ KUBE_API_ADDRESS="--address=0.0.0.0" # The port on the local server to listen on. # KUBE_API_PORT="--port=8080" -# Port minions listen on +# Port nodes listen on # KUBELET_PORT="--kubelet_port=10250" # Address range to use for services diff --git a/contrib/ansible/roles/minion/files/proxy b/contrib/ansible/roles/node/files/proxy similarity index 100% rename from contrib/ansible/roles/minion/files/proxy rename to contrib/ansible/roles/node/files/proxy diff --git a/contrib/ansible/roles/minion/handlers/main.yml b/contrib/ansible/roles/node/handlers/main.yml similarity index 100% rename from contrib/ansible/roles/minion/handlers/main.yml rename to contrib/ansible/roles/node/handlers/main.yml diff --git a/contrib/ansible/roles/minion/tasks/firewalld.yml b/contrib/ansible/roles/node/tasks/firewalld.yml similarity index 100% rename from contrib/ansible/roles/minion/tasks/firewalld.yml rename to contrib/ansible/roles/node/tasks/firewalld.yml diff --git a/contrib/ansible/roles/minion/tasks/iptables.yml b/contrib/ansible/roles/node/tasks/iptables.yml similarity index 100% rename from contrib/ansible/roles/minion/tasks/iptables.yml rename to contrib/ansible/roles/node/tasks/iptables.yml diff --git a/contrib/ansible/roles/minion/tasks/main.yml b/contrib/ansible/roles/node/tasks/main.yml similarity index 100% rename from contrib/ansible/roles/minion/tasks/main.yml rename to contrib/ansible/roles/node/tasks/main.yml diff --git a/contrib/ansible/roles/minion/templates/kubelet.j2 b/contrib/ansible/roles/node/templates/kubelet.j2 similarity index 92% rename from contrib/ansible/roles/minion/templates/kubelet.j2 rename to contrib/ansible/roles/node/templates/kubelet.j2 index d1edc84cb56..1dc96df5393 100644 --- a/contrib/ansible/roles/minion/templates/kubelet.j2 +++ b/contrib/ansible/roles/node/templates/kubelet.j2 @@ -1,5 +1,5 @@ ### -# kubernetes kubelet (minion) config +# kubernetes kubelet (node) config # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) KUBELET_ADDRESS="--address=0.0.0.0" From fec5e789fd8feb340435d9e12df0dbf55a6dd1aa Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Sat, 30 May 2015 11:26:04 -0400 Subject: [PATCH 08/26] Shorthand to run the ansible setup command --- contrib/ansible/README.md | 31 ++++++++++++++++++------------- contrib/ansible/setup.sh | 18 ++++++++++++++++++ 2 files changed, 36 insertions(+), 13 deletions(-) create mode 100755 contrib/ansible/setup.sh diff --git a/contrib/ansible/README.md b/contrib/ansible/README.md index acaeea82ae0..189e4b5d554 100644 --- a/contrib/ansible/README.md +++ b/contrib/ansible/README.md @@ -4,13 +4,15 @@ This playbook helps you to set up a Kubernetes cluster on machines where you can't or don't want to use the salt scripts and cluster up/down tools. They can be real hardware, VMs, things in a public cloud, etc. -## Usage +## Before starting -* Record the IP address of which machine you want to be your master -* Record the IP address of the machine you want to be your etcd server (often same as master) -* Record the IP addresses of the machines you want to be your nodes. (master can be a node) +* Record the IP address/hostname of which machine you want to be your master (only support a single master) +* Record the IP address/hostname of the machine you want to be your etcd server (often same as master, only one) +* Record the IP addresses/hostname of the machines you want to be your nodes. (the master can also be a node) -Stick the system information into the 'inventory' file. +### Configure the inventory file + +Stick the system information gathered above into the 'inventory' file. ### Configure your cluster @@ -22,24 +24,27 @@ in full detail. Now run the setup: - $ ansible-playbook -i inventory cluster.yml +`$ ./setup.sh` In generel this will work on very recent Fedora, rawhide or F21. Future work to support RHEL7, CentOS, and possible other distros should be forthcoming. ### You can just set up certain parts instead of doing it all -Only the kubernetes daemons: - - $ ansible-playbook -i inventory kubernetes-services.yml - Only etcd: - $ ansible-playbook -i inventory etcd.yml +`$ ./setup.sh --tags=etcd` -Only flannel: +Only the kubernetes master: - $ ansible-playbook -i inventory flannel.yml +`$ ./setup.sh --tags=masters` +Only the kubernetes nodes: + +`$ ./setup.sh --tags=nodes` + +### You may overwrite the inventory file by doing + +`INVENTORY=myinventory ./setup.sh` [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/contrib/ansible/README.md?pixel)]() diff --git a/contrib/ansible/setup.sh b/contrib/ansible/setup.sh new file mode 100755 index 00000000000..43ea90affe6 --- /dev/null +++ b/contrib/ansible/setup.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +inventory=${INVENTORY:-inventory} +ansible-playbook -i ${inventory} cluster.yml $@ From bc6c425ab4dd6011481d1a754112648fdd437eaf Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 18 Jun 2015 10:21:49 -0400 Subject: [PATCH 09/26] Use 'command' not 'shell' even thought it doesn't matter --- contrib/ansible/roles/etcd/tasks/iptables.yml | 2 +- contrib/ansible/roles/master/tasks/iptables.yml | 2 +- contrib/ansible/roles/node/tasks/iptables.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/contrib/ansible/roles/etcd/tasks/iptables.yml b/contrib/ansible/roles/etcd/tasks/iptables.yml index a1035247d94..d8329955342 100644 --- a/contrib/ansible/roles/etcd/tasks/iptables.yml +++ b/contrib/ansible/roles/etcd/tasks/iptables.yml @@ -1,6 +1,6 @@ --- - name: Get iptables rules - shell: iptables -L + command: iptables -L register: iptablesrules always_run: yes diff --git a/contrib/ansible/roles/master/tasks/iptables.yml b/contrib/ansible/roles/master/tasks/iptables.yml index a9570f108a9..e0aef1ef4c9 100644 --- a/contrib/ansible/roles/master/tasks/iptables.yml +++ b/contrib/ansible/roles/master/tasks/iptables.yml @@ -1,6 +1,6 @@ --- - name: Get iptables rules - shell: iptables -L + command: iptables -L register: iptablesrules always_run: yes diff --git a/contrib/ansible/roles/node/tasks/iptables.yml b/contrib/ansible/roles/node/tasks/iptables.yml index 5d5ae17f733..53fc09c9521 100644 --- a/contrib/ansible/roles/node/tasks/iptables.yml +++ b/contrib/ansible/roles/node/tasks/iptables.yml @@ -1,6 +1,6 @@ --- - name: Get iptables rules - shell: iptables -L + command: iptables -L register: iptablesrules always_run: yes From a95243450e16ecffe40cfeaa683f6087c8eb69fe Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Tue, 31 Mar 2015 18:05:36 -0400 Subject: [PATCH 10/26] Set up flannel in your cluster! --- contrib/ansible/README.md | 4 ++++ contrib/ansible/cluster.yml | 11 +++++++++ contrib/ansible/group_vars/all.yml | 19 +++++++++++++++ .../ansible/roles/flannel/handlers/main.yml | 18 +++++++++++++++ contrib/ansible/roles/flannel/meta/main.yml | 3 +++ .../ansible/roles/flannel/tasks/client.yml | 17 ++++++++++++++ .../ansible/roles/flannel/tasks/config.yml | 23 +++++++++++++++++++ contrib/ansible/roles/flannel/tasks/main.yml | 5 ++++ .../flannel/templates/flannel-conf.json.j2 | 1 + .../roles/flannel/templates/flanneld.j2 | 11 +++++++++ 10 files changed, 112 insertions(+) create mode 100644 contrib/ansible/roles/flannel/handlers/main.yml create mode 100644 contrib/ansible/roles/flannel/meta/main.yml create mode 100644 contrib/ansible/roles/flannel/tasks/client.yml create mode 100644 contrib/ansible/roles/flannel/tasks/config.yml create mode 100644 contrib/ansible/roles/flannel/tasks/main.yml create mode 100644 contrib/ansible/roles/flannel/templates/flannel-conf.json.j2 create mode 100644 contrib/ansible/roles/flannel/templates/flanneld.j2 diff --git a/contrib/ansible/README.md b/contrib/ansible/README.md index 189e4b5d554..8813f9d7739 100644 --- a/contrib/ansible/README.md +++ b/contrib/ansible/README.md @@ -47,4 +47,8 @@ Only the kubernetes nodes: `INVENTORY=myinventory ./setup.sh` +Only flannel: + + $ ./setup.sh --tags=flannel + [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/contrib/ansible/README.md?pixel)]() diff --git a/contrib/ansible/cluster.yml b/contrib/ansible/cluster.yml index e951d91617b..11829488a0e 100644 --- a/contrib/ansible/cluster.yml +++ b/contrib/ansible/cluster.yml @@ -8,6 +8,17 @@ tags: - etcd +# install flannel +- hosts: + - etcd + - masters + - nodes + sudo: yes + roles: + - flannel + tags: + - flannel + # install kube master services - hosts: masters sudo: yes diff --git a/contrib/ansible/group_vars/all.yml b/contrib/ansible/group_vars/all.yml index ae9d7dc2fb0..456854204a7 100644 --- a/contrib/ansible/group_vars/all.yml +++ b/contrib/ansible/group_vars/all.yml @@ -1,3 +1,7 @@ +# Only used for the location to store flannel info in etcd, but may be used +# for dns purposes and cluster id purposes in the future. +cluster_name: kube.local + # Account name of remote user. Ansible will use this user account to ssh into # the managed machines. The user must be able to use sudo without asking # for password unless ansible_sudo_pass is set @@ -15,3 +19,18 @@ ansible_ssh_user: root # This range must not conflict with anything in your infrastructure. These # addresses do not need to be routable and must just be an unused block of space. kube_service_addresses: 10.254.0.0/16 + +# Flannel internal network (optional). When flannel is used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +flannel_subnet: 172.16.0.0 + +# Flannel internal network total size (optional). This is the prefix of the +# entire flannel overlay network. So the entirety of 172.16.0.0/12 must be +# unused in your environment. +flannel_prefix: 12 + +# Flannel internal network (optional). This is the size allocation that flannel +# will give to each node on your network. With these defaults you should have +# room for 4096 nodes with 254 pods per node. +flannel_host_prefix: 24 diff --git a/contrib/ansible/roles/flannel/handlers/main.yml b/contrib/ansible/roles/flannel/handlers/main.yml new file mode 100644 index 00000000000..e64c241cec4 --- /dev/null +++ b/contrib/ansible/roles/flannel/handlers/main.yml @@ -0,0 +1,18 @@ +--- +- name: restart flannel + service: name=flanneld state=restarted + notify: + - stop docker + - delete docker0 + - start docker + when: inventory_hostname in groups['nodes'] + +- name: stop docker + service: name=docker state=stopped + +- name: delete docker0 + command: ip link delete docker0 + ignore_errors: yes + +- name: start docker + service: name=docker state=started diff --git a/contrib/ansible/roles/flannel/meta/main.yml b/contrib/ansible/roles/flannel/meta/main.yml new file mode 100644 index 00000000000..0764e314824 --- /dev/null +++ b/contrib/ansible/roles/flannel/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - { role: common } diff --git a/contrib/ansible/roles/flannel/tasks/client.yml b/contrib/ansible/roles/flannel/tasks/client.yml new file mode 100644 index 00000000000..6f124404a72 --- /dev/null +++ b/contrib/ansible/roles/flannel/tasks/client.yml @@ -0,0 +1,17 @@ +--- +- name: Install flannel + action: "{{ ansible_pkg_mgr }}" + args: + name: flannel + state: latest + when: not is_atomic + +- name: Install Flannel config file + template: src=flanneld.j2 dest=/etc/sysconfig/flanneld + notify: + - restart flannel + +- name: Launch Flannel + service: name=flanneld state=started enabled=yes + notify: + - restart flannel diff --git a/contrib/ansible/roles/flannel/tasks/config.yml b/contrib/ansible/roles/flannel/tasks/config.yml new file mode 100644 index 00000000000..654576fe8b1 --- /dev/null +++ b/contrib/ansible/roles/flannel/tasks/config.yml @@ -0,0 +1,23 @@ +--- +- name: Set facts about etcdctl command + set_fact: + peers: "{% for hostname in groups['etcd'] %}http://{{ hostname }}:2379{% if not loop.last %},{% endif %}{% endfor %}" + conf_file: "/tmp/flannel-conf.json" + conf_loc: "/{{ cluster_name }}/network/config" + run_once: true + delegate_to: "{{ groups['etcd'][0] }}" + +- name: Create flannel config file to go in etcd + template: src=flannel-conf.json.j2 dest={{ conf_file }} + run_once: true + delegate_to: "{{ groups['etcd'][0] }}" + +- name: Load the flannel config file into etcd + shell: "/usr/bin/etcdctl --no-sync --peers={{ peers }} set {{ conf_loc }} < {{ conf_file }}" + run_once: true + delegate_to: "{{ groups['etcd'][0] }}" + +- name: Clean up the flannel config file + file: path=/tmp/flannel-config.json state=absent + run_once: true + delegate_to: "{{ groups['etcd'][0] }}" diff --git a/contrib/ansible/roles/flannel/tasks/main.yml b/contrib/ansible/roles/flannel/tasks/main.yml new file mode 100644 index 00000000000..be7e7501b91 --- /dev/null +++ b/contrib/ansible/roles/flannel/tasks/main.yml @@ -0,0 +1,5 @@ +--- +- include: config.yml + +- include: client.yml + when: inventory_hostname in groups['masters'] + groups['nodes'] diff --git a/contrib/ansible/roles/flannel/templates/flannel-conf.json.j2 b/contrib/ansible/roles/flannel/templates/flannel-conf.json.j2 new file mode 100644 index 00000000000..15c68e5934c --- /dev/null +++ b/contrib/ansible/roles/flannel/templates/flannel-conf.json.j2 @@ -0,0 +1 @@ +{ "Network": "{{ flannel_subnet }}/{{ flannel_prefix }}", "SubnetLen": {{ flannel_host_prefix }}, "Backend": { "Type": "vxlan" } } diff --git a/contrib/ansible/roles/flannel/templates/flanneld.j2 b/contrib/ansible/roles/flannel/templates/flanneld.j2 new file mode 100644 index 00000000000..7345c102168 --- /dev/null +++ b/contrib/ansible/roles/flannel/templates/flanneld.j2 @@ -0,0 +1,11 @@ +# Flanneld configuration options + +# etcd url location. Point this to the server where etcd runs +FLANNEL_ETCD="http://{{ groups['etcd'][0] }}:2379" + +# etcd config key. This is the configuration key that flannel queries +# For address range assignment +FLANNEL_ETCD_KEY="/{{ cluster_name }}/network" + +# Any additional options that you want to pass +#FLANNEL_OPTIONS="" From 70b7358d00dbfbf8f064e0ef35f89a87e411a0d9 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Sat, 30 May 2015 12:05:22 -0400 Subject: [PATCH 11/26] If multiple etcd servers defined, point at all of them (we still can only set up a single server though) --- contrib/ansible/roles/flannel/templates/flanneld.j2 | 2 +- contrib/ansible/roles/master/templates/apiserver.j2 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/ansible/roles/flannel/templates/flanneld.j2 b/contrib/ansible/roles/flannel/templates/flanneld.j2 index 7345c102168..e24ce895bf2 100644 --- a/contrib/ansible/roles/flannel/templates/flanneld.j2 +++ b/contrib/ansible/roles/flannel/templates/flanneld.j2 @@ -1,7 +1,7 @@ # Flanneld configuration options # etcd url location. Point this to the server where etcd runs -FLANNEL_ETCD="http://{{ groups['etcd'][0] }}:2379" +FLANNEL_ETCD="{% for node in groups['etcd'] %}http://{{ node }}:2379{% if not loop.last %},{% endif %}{% endfor %}" # etcd config key. This is the configuration key that flannel queries # For address range assignment diff --git a/contrib/ansible/roles/master/templates/apiserver.j2 b/contrib/ansible/roles/master/templates/apiserver.j2 index 82114506754..778d2877395 100644 --- a/contrib/ansible/roles/master/templates/apiserver.j2 +++ b/contrib/ansible/roles/master/templates/apiserver.j2 @@ -17,7 +17,7 @@ KUBE_API_ADDRESS="--address=0.0.0.0" KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range={{ kube_service_addresses }}" # Location of the etcd cluster -KUBE_ETCD_SERVERS="--etcd_servers=http://{{ groups['etcd'][0] }}:2379" +KUBE_ETCD_SERVERS="--etcd_servers={% for node in groups['etcd'] %}http://{{ node }}:2379{% if not loop.last %},{% endif %}{% endfor %}" # default admission control policies KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" From 9816488179149b3173e0dad86a53e7259b56c969 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Mon, 15 Jun 2015 15:43:03 -0400 Subject: [PATCH 12/26] Do not restart etcd quickly https://github.com/coreos/etcd/pull/2878 etcd destroys itself if you start and then restart etcd very rapidly. So don't do that any more. --- contrib/ansible/roles/etcd/handlers/main.yml | 1 + contrib/ansible/roles/etcd/tasks/main.yml | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/contrib/ansible/roles/etcd/handlers/main.yml b/contrib/ansible/roles/etcd/handlers/main.yml index 9218108772c..11ec38485fb 100644 --- a/contrib/ansible/roles/etcd/handlers/main.yml +++ b/contrib/ansible/roles/etcd/handlers/main.yml @@ -1,6 +1,7 @@ --- - name: restart etcd service: name=etcd state=restarted + when: etcd_started.changed == false - name: Save iptables rules command: service iptables save diff --git a/contrib/ansible/roles/etcd/tasks/main.yml b/contrib/ansible/roles/etcd/tasks/main.yml index 67ff4d42c94..527bfd6abc0 100644 --- a/contrib/ansible/roles/etcd/tasks/main.yml +++ b/contrib/ansible/roles/etcd/tasks/main.yml @@ -14,7 +14,11 @@ - restart etcd - name: Enable etcd - service: name=etcd enabled=yes state=started + service: name=etcd enabled=yes + +- name: Start etcd + service: name=etcd state=started + register: etcd_started - include: firewalld.yml when: has_firewalld From f259892cc92d451b8d881fbd672f950e3f24fc8b Mon Sep 17 00:00:00 2001 From: Jeff Bean Date: Wed, 17 Jun 2015 12:02:39 -0400 Subject: [PATCH 13/26] Basic Generic File Cleanups No major changes, just renaming, whitespace, etc --- contrib/ansible/cluster.yml | 32 +++++++++---------- contrib/ansible/group_vars/all.yml | 2 +- contrib/ansible/inventory | 12 +++---- contrib/ansible/roles/common/tasks/rpm.yml | 2 +- .../ansible/roles/etcd/tasks/firewalld.yml | 8 ++--- contrib/ansible/roles/etcd/tasks/iptables.yml | 6 ++-- contrib/ansible/roles/etcd/tasks/main.yml | 4 +-- .../ansible/roles/flannel/tasks/client.yml | 4 +-- .../ansible/roles/kubernetes/tasks/fedora.yml | 2 +- .../ansible/roles/kubernetes/tasks/main.yml | 2 +- .../ansible/roles/master/handlers/main.yml | 6 ++-- .../ansible/roles/master/tasks/iptables.yml | 2 +- contrib/ansible/roles/master/tasks/main.yml | 12 +++---- contrib/ansible/roles/node/handlers/main.yml | 4 +-- contrib/ansible/roles/node/tasks/iptables.yml | 2 +- contrib/ansible/roles/node/tasks/main.yml | 10 +++--- 16 files changed, 55 insertions(+), 55 deletions(-) diff --git a/contrib/ansible/cluster.yml b/contrib/ansible/cluster.yml index 11829488a0e..4bdfd80b004 100644 --- a/contrib/ansible/cluster.yml +++ b/contrib/ansible/cluster.yml @@ -3,38 +3,38 @@ - hosts: etcd sudo: yes roles: - - common - - etcd + - common + - etcd tags: - - etcd + - etcd # install flannel - hosts: - - etcd - - masters - - nodes + - etcd + - masters + - nodes sudo: yes roles: - - flannel + - flannel tags: - - flannel + - flannel # install kube master services - hosts: masters sudo: yes roles: - - common - - kubernetes - - master + - common + - kubernetes + - master tags: - - masters + - masters # install kubernetes on the nodes - hosts: nodes sudo: yes roles: - - common - - kubernetes - - node + - common + - kubernetes + - node tags: - - nodes + - nodes diff --git a/contrib/ansible/group_vars/all.yml b/contrib/ansible/group_vars/all.yml index 456854204a7..89ffd8090f8 100644 --- a/contrib/ansible/group_vars/all.yml +++ b/contrib/ansible/group_vars/all.yml @@ -5,7 +5,7 @@ cluster_name: kube.local # Account name of remote user. Ansible will use this user account to ssh into # the managed machines. The user must be able to use sudo without asking # for password unless ansible_sudo_pass is set -ansible_ssh_user: root +#ansible_ssh_user: root # password for the ansible_ssh_user. If this is unset you will need to set up # ssh keys so a password is not needed. diff --git a/contrib/ansible/inventory b/contrib/ansible/inventory index 7b43d91c3e6..a03104b113b 100644 --- a/contrib/ansible/inventory +++ b/contrib/ansible/inventory @@ -1,10 +1,10 @@ -[etcd] -10.0.0.2 [masters] -10.0.0.1 +kube-master-test-01.example.com + +[etcd] +kube-master-test-01.example.com [nodes] -10.0.0.3 -10.0.0.4 -10.0.0.5 +kube-minion-test-01.example.com +kube-minion-test-02.example.com diff --git a/contrib/ansible/roles/common/tasks/rpm.yml b/contrib/ansible/roles/common/tasks/rpm.yml index 959be43dbdf..9206e07e4e7 100644 --- a/contrib/ansible/roles/common/tasks/rpm.yml +++ b/contrib/ansible/roles/common/tasks/rpm.yml @@ -1,5 +1,5 @@ --- -- name: Determine if firewalld installed +- name: RPM | Determine if firewalld installed command: "rpm -q firewalld" register: s changed_when: false diff --git a/contrib/ansible/roles/etcd/tasks/firewalld.yml b/contrib/ansible/roles/etcd/tasks/firewalld.yml index dc2978c314b..10a47e60254 100644 --- a/contrib/ansible/roles/etcd/tasks/firewalld.yml +++ b/contrib/ansible/roles/etcd/tasks/firewalld.yml @@ -4,13 +4,13 @@ # in case this is also a node where firewalld turned off ignore_errors: yes with_items: - - 2379 - - 2380 + - 2379 + - 2380 - name: Save firewalld port for etcd firewalld: port={{ item }}/tcp permanent=true state=enabled # in case this is also a node where firewalld turned off ignore_errors: yes with_items: - - 2379 - - 2380 + - 2379 + - 2380 diff --git a/contrib/ansible/roles/etcd/tasks/iptables.yml b/contrib/ansible/roles/etcd/tasks/iptables.yml index d8329955342..b6eb99eb3f5 100644 --- a/contrib/ansible/roles/etcd/tasks/iptables.yml +++ b/contrib/ansible/roles/etcd/tasks/iptables.yml @@ -11,7 +11,7 @@ command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "etcd" when: etcd not in iptablesrules.stdout notify: - - Save iptables rules + - Save iptables rules with_items: - - 2379 - - 2380 + - 2379 + - 2380 diff --git a/contrib/ansible/roles/etcd/tasks/main.yml b/contrib/ansible/roles/etcd/tasks/main.yml index 527bfd6abc0..d5540f5af0f 100644 --- a/contrib/ansible/roles/etcd/tasks/main.yml +++ b/contrib/ansible/roles/etcd/tasks/main.yml @@ -5,13 +5,13 @@ name: etcd state: latest notify: - - restart etcd + - restart etcd when: not is_atomic - name: Write etcd config file template: src=etcd.conf.j2 dest=/etc/etcd/etcd.conf notify: - - restart etcd + - restart etcd - name: Enable etcd service: name=etcd enabled=yes diff --git a/contrib/ansible/roles/flannel/tasks/client.yml b/contrib/ansible/roles/flannel/tasks/client.yml index 6f124404a72..c99ae3b6947 100644 --- a/contrib/ansible/roles/flannel/tasks/client.yml +++ b/contrib/ansible/roles/flannel/tasks/client.yml @@ -9,9 +9,9 @@ - name: Install Flannel config file template: src=flanneld.j2 dest=/etc/sysconfig/flanneld notify: - - restart flannel + - restart flannel - name: Launch Flannel service: name=flanneld state=started enabled=yes notify: - - restart flannel + - restart flannel diff --git a/contrib/ansible/roles/kubernetes/tasks/fedora.yml b/contrib/ansible/roles/kubernetes/tasks/fedora.yml index be021d2d9d3..ad326e3c5c4 100644 --- a/contrib/ansible/roles/kubernetes/tasks/fedora.yml +++ b/contrib/ansible/roles/kubernetes/tasks/fedora.yml @@ -1,4 +1,4 @@ --- -- name: Remove docker window manager on F20 +- name: Fedora | Remove docker window manager on F20 yum: pkg=docker state=absent when: not is_atomic and ansible_distribution_major_version == "20" diff --git a/contrib/ansible/roles/kubernetes/tasks/main.yml b/contrib/ansible/roles/kubernetes/tasks/main.yml index ac004f052b1..f7eccd4425d 100644 --- a/contrib/ansible/roles/kubernetes/tasks/main.yml +++ b/contrib/ansible/roles/kubernetes/tasks/main.yml @@ -5,4 +5,4 @@ - name: write the global config file template: src=config.j2 dest=/etc/kubernetes/config notify: - - restart daemons + - restart daemons diff --git a/contrib/ansible/roles/master/handlers/main.yml b/contrib/ansible/roles/master/handlers/main.yml index fc40e9017de..10985570a91 100644 --- a/contrib/ansible/roles/master/handlers/main.yml +++ b/contrib/ansible/roles/master/handlers/main.yml @@ -2,9 +2,9 @@ - name: restart daemons command: /bin/true notify: - - restart apiserver - - restart controller-manager - - restart scheduler + - restart apiserver + - restart controller-manager + - restart scheduler - name: restart apiserver service: name=kube-apiserver state=restarted diff --git a/contrib/ansible/roles/master/tasks/iptables.yml b/contrib/ansible/roles/master/tasks/iptables.yml index e0aef1ef4c9..cc9e2d9d1ce 100644 --- a/contrib/ansible/roles/master/tasks/iptables.yml +++ b/contrib/ansible/roles/master/tasks/iptables.yml @@ -8,7 +8,7 @@ command: /sbin/iptables -I INPUT 1 -p tcp --dport 8080 -j ACCEPT -m comment --comment "kube-apiserver" when: kube-apiserver not in iptablesrules.stdout notify: - - restart iptables + - restart iptables - name: Save iptables rules command: service iptables save diff --git a/contrib/ansible/roles/master/tasks/main.yml b/contrib/ansible/roles/master/tasks/main.yml index aadbbd4bd52..d5b66204b7c 100644 --- a/contrib/ansible/roles/master/tasks/main.yml +++ b/contrib/ansible/roles/master/tasks/main.yml @@ -2,26 +2,26 @@ - name: Install kubernetes action: "{{ ansible_pkg_mgr }}" args: - name: kubernetes-master - state: latest + name: kubernetes-master + state: latest notify: - - restart daemons + - restart daemons when: not is_atomic - name: write the config file for the api server template: src=apiserver.j2 dest=/etc/kubernetes/apiserver notify: - - restart apiserver + - restart apiserver - name: write the config file for the controller-manager copy: src=controller-manager dest=/etc/kubernetes/controller-manager notify: - - restart controller-manager + - restart controller-manager - name: write the config file for the scheduler copy: src=scheduler dest=/etc/kubernetes/scheduler notify: - - restart scheduler + - restart scheduler - name: Enable apiserver service: name=kube-apiserver enabled=yes state=started diff --git a/contrib/ansible/roles/node/handlers/main.yml b/contrib/ansible/roles/node/handlers/main.yml index 196a3a6e5d9..fd00563b38b 100644 --- a/contrib/ansible/roles/node/handlers/main.yml +++ b/contrib/ansible/roles/node/handlers/main.yml @@ -2,8 +2,8 @@ - name: restart daemons command: /bin/true notify: - - restart kubelet - - restart proxy + - restart kubelet + - restart proxy - name: restart kubelet service: name=kubelet state=restarted diff --git a/contrib/ansible/roles/node/tasks/iptables.yml b/contrib/ansible/roles/node/tasks/iptables.yml index 53fc09c9521..afb801e98d8 100644 --- a/contrib/ansible/roles/node/tasks/iptables.yml +++ b/contrib/ansible/roles/node/tasks/iptables.yml @@ -11,7 +11,7 @@ command: /sbin/iptables -I INPUT 1 -p tcp --dport 10250 -j ACCEPT -m comment --comment "kubelet" when: kubelet not in iptablesrules.stdout notify: - - restart iptables + - restart iptables - name: Save iptables rules command: service iptables save diff --git a/contrib/ansible/roles/node/tasks/main.yml b/contrib/ansible/roles/node/tasks/main.yml index a24d1293725..501b3399c66 100644 --- a/contrib/ansible/roles/node/tasks/main.yml +++ b/contrib/ansible/roles/node/tasks/main.yml @@ -2,21 +2,21 @@ - name: Install kubernetes action: "{{ ansible_pkg_mgr }}" args: - name: kubernetes-node - state: latest + name: kubernetes-node + state: latest notify: - - restart daemons + - restart daemons when: not is_atomic - name: write the config files for kubelet template: src=kubelet.j2 dest=/etc/kubernetes/kubelet notify: - - restart kubelet + - restart kubelet - name: write the config files for proxy copy: src=proxy dest=/etc/kubernetes/proxy notify: - - restart proxy + - restart proxy - name: Enable kubelet service: name=kubelet enabled=yes state=started From 45bdfeac0cb5293e007b7cc1767d7d9b5b72e93a Mon Sep 17 00:00:00 2001 From: Jeff Bean Date: Wed, 17 Jun 2015 12:18:43 -0400 Subject: [PATCH 14/26] Packge install on CentOS 7 --- .../ansible/roles/common/files/virt7-testing.repo | 5 +++++ contrib/ansible/roles/common/tasks/centos.yml | 3 +++ contrib/ansible/roles/common/tasks/main.yml | 3 +++ contrib/ansible/roles/master/tasks/centos.yml | 5 +++++ .../ansible/roles/master/tasks/generic-install.yml | 7 +++++++ contrib/ansible/roles/master/tasks/main.yml | 13 +++++-------- contrib/ansible/roles/node/tasks/centos.yml | 5 +++++ .../ansible/roles/node/tasks/generic-install.yml | 7 +++++++ contrib/ansible/roles/node/tasks/main.yml | 13 +++++-------- 9 files changed, 45 insertions(+), 16 deletions(-) create mode 100644 contrib/ansible/roles/common/files/virt7-testing.repo create mode 100644 contrib/ansible/roles/common/tasks/centos.yml create mode 100644 contrib/ansible/roles/master/tasks/centos.yml create mode 100644 contrib/ansible/roles/master/tasks/generic-install.yml create mode 100644 contrib/ansible/roles/node/tasks/centos.yml create mode 100644 contrib/ansible/roles/node/tasks/generic-install.yml diff --git a/contrib/ansible/roles/common/files/virt7-testing.repo b/contrib/ansible/roles/common/files/virt7-testing.repo new file mode 100644 index 00000000000..d1d3a04c3fc --- /dev/null +++ b/contrib/ansible/roles/common/files/virt7-testing.repo @@ -0,0 +1,5 @@ +[virt7-testing] +name=virt7-testing +baseurl=http://cbs.centos.org/repos/virt7-testing/x86_64/os/ +enabled=0 +gpgcheck=0 diff --git a/contrib/ansible/roles/common/tasks/centos.yml b/contrib/ansible/roles/common/tasks/centos.yml new file mode 100644 index 00000000000..7d72d287d3e --- /dev/null +++ b/contrib/ansible/roles/common/tasks/centos.yml @@ -0,0 +1,3 @@ +--- +- name: CentOS | Install Testing centos7 repo for new tool versions + copy: src=virt7-testing.repo dest=/etc/yum.repos.d/virt7-testing.repo diff --git a/contrib/ansible/roles/common/tasks/main.yml b/contrib/ansible/roles/common/tasks/main.yml index 5404be01616..caccf33d6a5 100644 --- a/contrib/ansible/roles/common/tasks/main.yml +++ b/contrib/ansible/roles/common/tasks/main.yml @@ -16,3 +16,6 @@ # collect information about what packages are installed - include: rpm.yml when: ansible_pkg_mgr == "yum" + +- include: centos.yml + when: ansible_distribution == "CentOS" diff --git a/contrib/ansible/roles/master/tasks/centos.yml b/contrib/ansible/roles/master/tasks/centos.yml new file mode 100644 index 00000000000..88777cd2545 --- /dev/null +++ b/contrib/ansible/roles/master/tasks/centos.yml @@ -0,0 +1,5 @@ +--- +- name: CentOS | Install kubernetes CentOS style + yum: pkg=kubernetes state=latest enablerepo=virt7-testing + notify: + - restart daemons diff --git a/contrib/ansible/roles/master/tasks/generic-install.yml b/contrib/ansible/roles/master/tasks/generic-install.yml new file mode 100644 index 00000000000..6e69dbf3f63 --- /dev/null +++ b/contrib/ansible/roles/master/tasks/generic-install.yml @@ -0,0 +1,7 @@ +- name: Install kubernetes + action: "{{ ansible_pkg_mgr }}" + args: + name: kubernetes-master + state: latest + notify: + - restart daemons diff --git a/contrib/ansible/roles/master/tasks/main.yml b/contrib/ansible/roles/master/tasks/main.yml index d5b66204b7c..5ee28e74b80 100644 --- a/contrib/ansible/roles/master/tasks/main.yml +++ b/contrib/ansible/roles/master/tasks/main.yml @@ -1,12 +1,9 @@ --- -- name: Install kubernetes - action: "{{ ansible_pkg_mgr }}" - args: - name: kubernetes-master - state: latest - notify: - - restart daemons - when: not is_atomic +- include: generic-install.yml + when: not is_atomic and not ansible_distribution == "CentOS" + +- include: centos.yml + when: not is_atomic and ansible_distribution == "CentOS" - name: write the config file for the api server template: src=apiserver.j2 dest=/etc/kubernetes/apiserver diff --git a/contrib/ansible/roles/node/tasks/centos.yml b/contrib/ansible/roles/node/tasks/centos.yml new file mode 100644 index 00000000000..88777cd2545 --- /dev/null +++ b/contrib/ansible/roles/node/tasks/centos.yml @@ -0,0 +1,5 @@ +--- +- name: CentOS | Install kubernetes CentOS style + yum: pkg=kubernetes state=latest enablerepo=virt7-testing + notify: + - restart daemons diff --git a/contrib/ansible/roles/node/tasks/generic-install.yml b/contrib/ansible/roles/node/tasks/generic-install.yml new file mode 100644 index 00000000000..b3597237141 --- /dev/null +++ b/contrib/ansible/roles/node/tasks/generic-install.yml @@ -0,0 +1,7 @@ +- name: Install kubernetes + action: "{{ ansible_pkg_mgr }}" + args: + name: kubernetes-node + state: latest + notify: + - restart daemons diff --git a/contrib/ansible/roles/node/tasks/main.yml b/contrib/ansible/roles/node/tasks/main.yml index 501b3399c66..00046f7bea7 100644 --- a/contrib/ansible/roles/node/tasks/main.yml +++ b/contrib/ansible/roles/node/tasks/main.yml @@ -1,12 +1,9 @@ --- -- name: Install kubernetes - action: "{{ ansible_pkg_mgr }}" - args: - name: kubernetes-node - state: latest - notify: - - restart daemons - when: not is_atomic +- include: generic-install.yml + when: not is_atomic and not ansible_distribution == "CentOS" + +- include: centos.yml + when: not is_atomic and ansible_distribution == "CentOS" - name: write the config files for kubelet template: src=kubelet.j2 dest=/etc/kubernetes/kubelet From e08bd6f3c35f1e2130932cbe0eaa15b24559dae2 Mon Sep 17 00:00:00 2001 From: Jeff Bean Date: Wed, 17 Jun 2015 12:44:38 -0400 Subject: [PATCH 15/26] Configure docker daemon --- contrib/ansible/group_vars/all.yml | 8 +++++ .../ansible/roles/docker/defaults/main.yml | 1 + .../ansible/roles/docker/handlers/main.yml | 3 ++ .../roles/docker/tasks/generic-install.yml | 6 ++++ contrib/ansible/roles/docker/tasks/main.yml | 35 +++++++++++++++++++ contrib/ansible/roles/master/meta/main.yml | 4 +++ contrib/ansible/roles/node/meta/main.yml | 4 +++ 7 files changed, 61 insertions(+) create mode 100644 contrib/ansible/roles/docker/defaults/main.yml create mode 100644 contrib/ansible/roles/docker/handlers/main.yml create mode 100644 contrib/ansible/roles/docker/tasks/generic-install.yml create mode 100644 contrib/ansible/roles/docker/tasks/main.yml create mode 100644 contrib/ansible/roles/master/meta/main.yml create mode 100644 contrib/ansible/roles/node/meta/main.yml diff --git a/contrib/ansible/group_vars/all.yml b/contrib/ansible/group_vars/all.yml index 89ffd8090f8..b4a087c705e 100644 --- a/contrib/ansible/group_vars/all.yml +++ b/contrib/ansible/group_vars/all.yml @@ -14,6 +14,14 @@ cluster_name: kube.local # If a password is needed to sudo to root that password must be set here #ansible_sudo_pass: password +# A list of insecure registrys you night need to define +insecure_registrys: +# - "gcr.io" + +# If you need a proxy for the docker daemon define these here +#http_proxy: "http://proxy.example.com:3128" +#https_proxy: "http://proxy.example.com:3128" + # Kubernetes internal network for services. # Kubernetes services will get fake IP addresses from this range. # This range must not conflict with anything in your infrastructure. These diff --git a/contrib/ansible/roles/docker/defaults/main.yml b/contrib/ansible/roles/docker/defaults/main.yml new file mode 100644 index 00000000000..8a4bbb922a5 --- /dev/null +++ b/contrib/ansible/roles/docker/defaults/main.yml @@ -0,0 +1 @@ +no_proxy: "localhost,127.0.0.0/8,::1,/var/run/docker.sock" \ No newline at end of file diff --git a/contrib/ansible/roles/docker/handlers/main.yml b/contrib/ansible/roles/docker/handlers/main.yml new file mode 100644 index 00000000000..43016e002a2 --- /dev/null +++ b/contrib/ansible/roles/docker/handlers/main.yml @@ -0,0 +1,3 @@ +--- +- name: restart docker + service: name=docker state=restarted diff --git a/contrib/ansible/roles/docker/tasks/generic-install.yml b/contrib/ansible/roles/docker/tasks/generic-install.yml new file mode 100644 index 00000000000..694d54db591 --- /dev/null +++ b/contrib/ansible/roles/docker/tasks/generic-install.yml @@ -0,0 +1,6 @@ +--- +- name: Generic | Install Docker + action: "{{ ansible_pkg_mgr }}" + args: + name: docker + state: latest diff --git a/contrib/ansible/roles/docker/tasks/main.yml b/contrib/ansible/roles/docker/tasks/main.yml new file mode 100644 index 00000000000..86e8d115982 --- /dev/null +++ b/contrib/ansible/roles/docker/tasks/main.yml @@ -0,0 +1,35 @@ +--- +- include: generic-install.yml + when: not is_atomic + +- name: Turn down docker logging + lineinfile: dest=/etc/sysconfig/docker regexp=^OPTIONS= line=OPTIONS="--selinux-enabled --log-level=warn" + notify: + - restart docker + +- name: Install http_proxy into docker-network + lineinfile: dest=/etc/sysconfig/docker-network regexp=^HTTP_PROXY= line=HTTP_PROXY="{{ http_proxy }}" + when: http_proxy is defined + notify: + - restart docker + +- name: Install https_proxy into docker-network + lineinfile: dest=/etc/sysconfig/docker-network regexp=^HTTPS_PROXY= line=HTTPS_PROXY="{{ https_proxy }}" + when: https_proxy is defined + notify: + - restart docker + +- name: Install no-proxy into docker-network + lineinfile: dest=/etc/sysconfig/docker-network regexp=^NO_PROXY= line=NO_PROXY="{{ no_proxy }}" + when: no_proxy is defined + notify: + - restart docker + +- name: Add any insecure registrys to docker config + lineinfile: dest=/etc/sysconfig/docker regexp=^INSECURE_REGISTRY= line=INSECURE_REGISTRY='{% for reg in insecure_registrys %}--insecure-registry="{{ reg }}" {% endfor %}' + when: insecure_registrys is defined and insecure_registrys > 0 + notify: + - restart docker + +- name: Enable Docker + service: name=docker enabled=yes state=started diff --git a/contrib/ansible/roles/master/meta/main.yml b/contrib/ansible/roles/master/meta/main.yml new file mode 100644 index 00000000000..ca127c83f59 --- /dev/null +++ b/contrib/ansible/roles/master/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - { role: common } + - { role: kubernetes } diff --git a/contrib/ansible/roles/node/meta/main.yml b/contrib/ansible/roles/node/meta/main.yml new file mode 100644 index 00000000000..be654b3de8a --- /dev/null +++ b/contrib/ansible/roles/node/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - { role: docker } + - { role: kubernetes } From 3d10f00401ef74d04ae24b5fa7de4bf949a5b7bd Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Fri, 19 Jun 2015 16:18:12 -0400 Subject: [PATCH 16/26] Give kube-apiserver CAP_NET_BIND_SERVICE It is needed to bind on port 443 --- contrib/ansible/roles/master/tasks/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/contrib/ansible/roles/master/tasks/main.yml b/contrib/ansible/roles/master/tasks/main.yml index 5ee28e74b80..949453bc5bf 100644 --- a/contrib/ansible/roles/master/tasks/main.yml +++ b/contrib/ansible/roles/master/tasks/main.yml @@ -20,6 +20,10 @@ notify: - restart scheduler +- name: add cap_net_bind_service to kube-apiserver + capabilities: path=/usr/bin/kube-apiserver capability=cap_net_bind_service=ep state=present + when: not is_atomic + - name: Enable apiserver service: name=kube-apiserver enabled=yes state=started From c2e4df7c87db44f9893640ada47db1d38e4ac99c Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Fri, 19 Jun 2015 14:11:36 -0400 Subject: [PATCH 17/26] Create, distribute, and use certificates for TLS and identity --- .../roles/kubernetes/defaults/main.yml | 20 +++++ .../roles/kubernetes/files/make-ca-cert.sh | 80 +++++++++++++++++++ .../roles/kubernetes/files/make-cert.sh | 26 ++++++ .../ansible/roles/kubernetes/tasks/certs.yml | 52 ++++++++++++ .../roles/kubernetes/tasks/gen_certs.yml | 49 ++++++++++++ .../ansible/roles/kubernetes/tasks/main.yml | 17 +++- .../roles/kubernetes/tasks/place_certs.yml | 22 +++++ .../roles/kubernetes/templates/config.j2 | 2 +- .../ansible/roles/master/tasks/firewalld.yml | 4 +- .../ansible/roles/master/tasks/iptables.yml | 2 +- contrib/ansible/roles/master/tasks/main.yml | 16 +++- .../roles/master/templates/apiserver.j2 | 6 +- .../controller-manager.j2} | 2 +- .../controller-manager.kubeconfig.j2 | 19 +++++ .../scheduler => templates/scheduler.j2} | 2 +- .../master/templates/scheduler.kubeconfig.j2 | 19 +++++ contrib/ansible/roles/node/tasks/main.yml | 14 +++- .../ansible/roles/node/templates/kubelet.j2 | 4 +- .../node/templates/kubelet.kubeconfig.j2 | 19 +++++ .../node/{files/proxy => templates/proxy.j2} | 2 +- .../roles/node/templates/proxy.kubeconfig.j2 | 19 +++++ contrib/ansible/setup.sh | 1 + 22 files changed, 379 insertions(+), 18 deletions(-) create mode 100644 contrib/ansible/roles/kubernetes/defaults/main.yml create mode 100755 contrib/ansible/roles/kubernetes/files/make-ca-cert.sh create mode 100755 contrib/ansible/roles/kubernetes/files/make-cert.sh create mode 100644 contrib/ansible/roles/kubernetes/tasks/certs.yml create mode 100644 contrib/ansible/roles/kubernetes/tasks/gen_certs.yml create mode 100644 contrib/ansible/roles/kubernetes/tasks/place_certs.yml rename contrib/ansible/roles/master/{files/controller-manager => templates/controller-manager.j2} (62%) create mode 100644 contrib/ansible/roles/master/templates/controller-manager.kubeconfig.j2 rename contrib/ansible/roles/master/{files/scheduler => templates/scheduler.j2} (53%) create mode 100644 contrib/ansible/roles/master/templates/scheduler.kubeconfig.j2 create mode 100644 contrib/ansible/roles/node/templates/kubelet.kubeconfig.j2 rename contrib/ansible/roles/node/{files/proxy => templates/proxy.j2} (54%) create mode 100644 contrib/ansible/roles/node/templates/proxy.kubeconfig.j2 diff --git a/contrib/ansible/roles/kubernetes/defaults/main.yml b/contrib/ansible/roles/kubernetes/defaults/main.yml new file mode 100644 index 00000000000..805b069b476 --- /dev/null +++ b/contrib/ansible/roles/kubernetes/defaults/main.yml @@ -0,0 +1,20 @@ +# This directory is where all the additional scripts go +# that Kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location +kube_script_dir: /usr/libexec/kubernetes + +# This directory is where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernets. +# This puts them in a sane location. +# Editting this value will almost surely break something. Don't +# change it. Things like the systemd scripts are hard coded to +# look in here. Don't do it. +kube_config_dir: /etc/kubernetes + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/certs" + + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changable... +kube_cert_group: kube-cert diff --git a/contrib/ansible/roles/kubernetes/files/make-ca-cert.sh b/contrib/ansible/roles/kubernetes/files/make-ca-cert.sh new file mode 100755 index 00000000000..babbaf8e193 --- /dev/null +++ b/contrib/ansible/roles/kubernetes/files/make-ca-cert.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +cert_ip=$1 +cert_dir=${CERT_DIR:-/srv/kubernetes} +cert_group=${CERT_GROUP:-kube-cert} + +mkdir -p "$cert_dir" + +use_cn=false + +# TODO: Add support for discovery on other providers? +if [ "$cert_ip" == "_use_gce_external_ip_" ]; then + cert_ip=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip) +fi + +if [ "$cert_ip" == "_use_aws_external_ip_" ]; then + cert_ip=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4) +fi + +if [ "$cert_ip" == "_use_azure_dns_name_" ]; then + cert_ip=$(uname -n | awk -F. '{ print $2 }').cloudapp.net + use_cn=true +fi + +tmpdir=$(mktemp -d -t kubernetes_cacert.XXXXXX) +trap 'rm -rf "${tmpdir}"' EXIT +cd "${tmpdir}" + +# TODO: For now, this is a patched tool that makes subject-alt-name work, when +# the fix is upstream move back to the upstream easyrsa. This is cached in GCS +# but is originally taken from: +# https://github.com/brendandburns/easy-rsa/archive/master.tar.gz +# +# To update, do the following: +# curl -o easy-rsa.tar.gz https://github.com/brendandburns/easy-rsa/archive/master.tar.gz +# gsutil cp easy-rsa.tar.gz gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz +# gsutil acl ch -R -g all:R gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz +# +# Due to GCS caching of public objects, it may take time for this to be widely +# distributed. +curl -L -O https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz > /dev/null 2>&1 +tar xzf easy-rsa.tar.gz > /dev/null 2>&1 + +cd easy-rsa-master/easyrsa3 +./easyrsa init-pki > /dev/null 2>&1 +./easyrsa --batch "--req-cn=$cert_ip@`date +%s`" build-ca nopass > /dev/null 2>&1 +if [ $use_cn = "true" ]; then + ./easyrsa build-server-full $cert_ip nopass > /dev/null 2>&1 + cp -p pki/issued/$cert_ip.crt "${cert_dir}/server.cert" > /dev/null 2>&1 + cp -p pki/private/$cert_ip.key "${cert_dir}/server.key" > /dev/null 2>&1 +else + ./easyrsa --subject-alt-name=IP:$cert_ip build-server-full kubernetes-master nopass > /dev/null 2>&1 + cp -p pki/issued/kubernetes-master.crt "${cert_dir}/server.cert" > /dev/null 2>&1 + cp -p pki/private/kubernetes-master.key "${cert_dir}/server.key" > /dev/null 2>&1 +fi +./easyrsa build-client-full kubecfg nopass > /dev/null 2>&1 +cp -p pki/ca.crt "${cert_dir}/ca.crt" +cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt" +cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key" +# Make server certs accessible to apiserver. +chgrp $cert_group "${cert_dir}/server.key" "${cert_dir}/server.cert" "${cert_dir}/ca.crt" +chmod 660 "${cert_dir}/server.key" "${cert_dir}/server.cert" "${cert_dir}/ca.crt" diff --git a/contrib/ansible/roles/kubernetes/files/make-cert.sh b/contrib/ansible/roles/kubernetes/files/make-cert.sh new file mode 100755 index 00000000000..914ed1fd28f --- /dev/null +++ b/contrib/ansible/roles/kubernetes/files/make-cert.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cert_dir=${CERT_DIR:-/srv/kubernetes} +cert_group=${CERT_GROUP:-kube-cert} + +mkdir -p "$cert_dir" + +openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \ + -subj "/CN=kubernetes.invalid/O=Kubernetes" \ + -keyout "${cert_dir}/server.key" -out "${cert_dir}/server.cert" +chgrp $cert_group "${cert_dir}/server.key" "${cert_dir}/server.cert" +chmod 660 "${cert_dir}/server.key" "${cert_dir}/server.cert" diff --git a/contrib/ansible/roles/kubernetes/tasks/certs.yml b/contrib/ansible/roles/kubernetes/tasks/certs.yml new file mode 100644 index 00000000000..338e975f61d --- /dev/null +++ b/contrib/ansible/roles/kubernetes/tasks/certs.yml @@ -0,0 +1,52 @@ +--- +- name: Create system kube-cert groups + group: name={{ kube_cert_group }} state=present system=yes + +- name: Create system kube user + user: + name=kube + comment="Kubernetes user" + shell=/sbin/nologin + state=present + system=yes + groups={{ kube_cert_group }} + +- name: make sure the certificate directory exits + file: + path={{ kube_cert_dir }} + state=directory + mode=o-rwx + group={{ kube_cert_group }} + +- name: Install rsync to push certs around + action: "{{ ansible_pkg_mgr }}" + args: + name: rsync + state: latest + when: not is_atomic + +- name: Generating RSA key for cert node to push to others + user: name=root generate_ssh_key=yes + run_once: true + delegate_to: "{{ groups['masters'][0] }}" + +- name: Downloading pub key + fetch: + src=/root/.ssh/id_rsa.pub + dest=/tmp/id_rsa.pub + flat=yes + fail_on_missing=true + run_once: true + delegate_to: "{{ groups['masters'][0] }}" + changed_when: false + +- include: gen_certs.yml + when: inventory_hostname == groups['masters'][0] + +- include: place_certs.yml + +- name: Delete the downloaded pub key + local_action: file path=/tmp/id_rsa.pub state=absent + sudo: false + run_once: true + changed_when: false diff --git a/contrib/ansible/roles/kubernetes/tasks/gen_certs.yml b/contrib/ansible/roles/kubernetes/tasks/gen_certs.yml new file mode 100644 index 00000000000..458049ff8bd --- /dev/null +++ b/contrib/ansible/roles/kubernetes/tasks/gen_certs.yml @@ -0,0 +1,49 @@ +--- +#- name: Get create cert script from Kubernetes +# get_url: +# url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/generate-cert/make-cert.sh +# dest={{ kube_script_dir }}/make-cert.sh mode=0500 +# force=yes + +#- name: Get create ca cert script from Kubernetes +# get_url: +# url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/generate-cert/make-ca-cert.sh +# dest={{ kube_script_dir }}/make-ca-cert.sh mode=0500 +# force=yes + +- name: HACK | overwrite make-cert.sh from local copy + copy: + src=make-cert.sh + dest={{ kube_script_dir }} + mode=0500 + changed_when: false + +- name: HACK | overwrite make-ca-cert.sh from local copy + copy: + src=make-ca-cert.sh + dest={{ kube_script_dir }} + mode=0500 + changed_when: false + +# FIXME This only generates a cert for one master... +- name: Run create cert script on master + command: + "{{ kube_script_dir }}/make-ca-cert.sh {{ inventory_hostname }}" + args: + creates: "{{ kube_cert_dir }}/server.cert" + environment: + CERT_DIR: "{{ kube_cert_dir }}" + CERT_GROUP: "{{ kube_cert_group }}" + +- name: Verify certificate permissions + file: + path={{ item }} + group={{ kube_cert_group }} + owner=kube + mode=0440 + with_items: + - "{{ kube_cert_dir }}/ca.crt" + - "{{ kube_cert_dir }}/server.cert" + - "{{ kube_cert_dir }}/server.key" + - "{{ kube_cert_dir }}/kubecfg.crt" + - "{{ kube_cert_dir }}/kubecfg.key" diff --git a/contrib/ansible/roles/kubernetes/tasks/main.yml b/contrib/ansible/roles/kubernetes/tasks/main.yml index f7eccd4425d..f1008991833 100644 --- a/contrib/ansible/roles/kubernetes/tasks/main.yml +++ b/contrib/ansible/roles/kubernetes/tasks/main.yml @@ -2,7 +2,22 @@ - include: fedora.yml when: ansible_distribution == "Fedora" +- name: Update {{ kube_script_dir }} if this is atomic + set_fact: + kube_script_dir: "/usr/local/libexec/kubernretes" + when: is_atomic and kube_script_dir == "/usr/libexec/kubernetes" + +- name: Create kubernetes config directory + file: path={{ kube_config_dir }} state=directory + +- name: Create kubernetes script directory + file: path={{ kube_script_dir }} state=directory + - name: write the global config file - template: src=config.j2 dest=/etc/kubernetes/config + template: src=config.j2 dest={{ kube_config_dir }}/config notify: - restart daemons + +- include: certs.yml + tags: + certs diff --git a/contrib/ansible/roles/kubernetes/tasks/place_certs.yml b/contrib/ansible/roles/kubernetes/tasks/place_certs.yml new file mode 100644 index 00000000000..a3c7d8e032d --- /dev/null +++ b/contrib/ansible/roles/kubernetes/tasks/place_certs.yml @@ -0,0 +1,22 @@ +--- +- name: place ssh public key so apiserver can push certs + authorized_key: user=root key="{{ item }}" state=present + with_file: + - '/tmp/id_rsa.pub' + changed_when: false + +- name: Copy certificates directly from the apiserver to nodes + synchronize: src={{ kube_cert_dir }}/{{ item }} dest={{ kube_cert_dir }}/{{ item }} + delegate_to: "{{ groups['masters'][0] }}" + with_items: + - "ca.crt" + - "kubecfg.crt" + - "kubecfg.key" + notify: + - restart daemons + +- name: remove ssh public key so apiserver can not push stuff + authorized_key: user=root key="{{ item }}" state=absent + with_file: + - '/tmp/id_rsa.pub' + changed_when: false diff --git a/contrib/ansible/roles/kubernetes/templates/config.j2 b/contrib/ansible/roles/kubernetes/templates/config.j2 index ccf38567ec1..8da21e4bb8d 100644 --- a/contrib/ansible/roles/kubernetes/templates/config.j2 +++ b/contrib/ansible/roles/kubernetes/templates/config.j2 @@ -20,4 +20,4 @@ KUBE_LOG_LEVEL="--v=0" KUBE_ALLOW_PRIV="--allow_privileged=true" # How the replication controller, scheduler, and proxy -KUBE_MASTER="--master=http://{{ groups['masters'][0] }}:8080" +KUBE_MASTER="--master=https://{{ groups['masters'][0] }}:443" diff --git a/contrib/ansible/roles/master/tasks/firewalld.yml b/contrib/ansible/roles/master/tasks/firewalld.yml index ffb7b65fbee..7dc5d7fa73f 100644 --- a/contrib/ansible/roles/master/tasks/firewalld.yml +++ b/contrib/ansible/roles/master/tasks/firewalld.yml @@ -1,10 +1,10 @@ --- - name: Open firewalld port for apiserver - firewalld: port=8080/tcp permanent=false state=enabled + firewalld: port=443/tcp permanent=false state=enabled # in case this is also a node with firewalld turned off ignore_errors: yes - name: Save firewalld port for apiserver - firewalld: port=8080/tcp permanent=true state=enabled + firewalld: port=443/tcp permanent=true state=enabled # in case this is also a node with firewalld turned off ignore_errors: yes diff --git a/contrib/ansible/roles/master/tasks/iptables.yml b/contrib/ansible/roles/master/tasks/iptables.yml index cc9e2d9d1ce..966de088ce8 100644 --- a/contrib/ansible/roles/master/tasks/iptables.yml +++ b/contrib/ansible/roles/master/tasks/iptables.yml @@ -5,7 +5,7 @@ always_run: yes - name: Open apiserver port with iptables - command: /sbin/iptables -I INPUT 1 -p tcp --dport 8080 -j ACCEPT -m comment --comment "kube-apiserver" + command: /sbin/iptables -I INPUT 1 -p tcp --dport 443 -j ACCEPT -m comment --comment "kube-apiserver" when: kube-apiserver not in iptablesrules.stdout notify: - restart iptables diff --git a/contrib/ansible/roles/master/tasks/main.yml b/contrib/ansible/roles/master/tasks/main.yml index 949453bc5bf..0637dae8940 100644 --- a/contrib/ansible/roles/master/tasks/main.yml +++ b/contrib/ansible/roles/master/tasks/main.yml @@ -6,17 +6,17 @@ when: not is_atomic and ansible_distribution == "CentOS" - name: write the config file for the api server - template: src=apiserver.j2 dest=/etc/kubernetes/apiserver + template: src=apiserver.j2 dest={{ kube_config_dir }}/apiserver notify: - restart apiserver - name: write the config file for the controller-manager - copy: src=controller-manager dest=/etc/kubernetes/controller-manager + template: src=controller-manager.j2 dest={{ kube_config_dir }}/controller-manager notify: - restart controller-manager - name: write the config file for the scheduler - copy: src=scheduler dest=/etc/kubernetes/scheduler + template: src=scheduler.j2 dest={{ kube_config_dir }}/scheduler notify: - restart scheduler @@ -24,6 +24,16 @@ capabilities: path=/usr/bin/kube-apiserver capability=cap_net_bind_service=ep state=present when: not is_atomic +- name: write the kubecfg (auth) file for controller-manager + template: src=controller-manager.kubeconfig.j2 dest={{ kube_config_dir }}/controller-manager.kubeconfig + notify: + - restart controller-manager + +- name: write the kubecfg (auth) file for scheduler + template: src=scheduler.kubeconfig.j2 dest={{ kube_config_dir }}/scheduler.kubeconfig + notify: + - restart scheduler + - name: Enable apiserver service: name=kube-apiserver enabled=yes state=started diff --git a/contrib/ansible/roles/master/templates/apiserver.j2 b/contrib/ansible/roles/master/templates/apiserver.j2 index 778d2877395..c36c5d7de20 100644 --- a/contrib/ansible/roles/master/templates/apiserver.j2 +++ b/contrib/ansible/roles/master/templates/apiserver.j2 @@ -5,10 +5,10 @@ # # The address on the local server to listen to. -KUBE_API_ADDRESS="--address=0.0.0.0" +KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1" # The port on the local server to listen on. -# KUBE_API_PORT="--port=8080" +KUBE_API_PORT="--secure-port=443" # Port nodes listen on # KUBELET_PORT="--kubelet_port=10250" @@ -23,4 +23,4 @@ KUBE_ETCD_SERVERS="--etcd_servers={% for node in groups['etcd'] %}http://{{ node KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" # Add your own! -KUBE_API_ARGS="" +KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.cert --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt " diff --git a/contrib/ansible/roles/master/files/controller-manager b/contrib/ansible/roles/master/templates/controller-manager.j2 similarity index 62% rename from contrib/ansible/roles/master/files/controller-manager rename to contrib/ansible/roles/master/templates/controller-manager.j2 index 1a9e3f204c9..9d9d259257b 100644 --- a/contrib/ansible/roles/master/files/controller-manager +++ b/contrib/ansible/roles/master/templates/controller-manager.j2 @@ -4,4 +4,4 @@ # defaults from config and apiserver should be adequate # Add your own! -KUBE_CONTROLLER_MANAGER_ARGS="" +KUBE_CONTROLLER_MANAGER_ARGS="--kubeconfig={{ kube_config_dir }}/controller-manager.kubeconfig" diff --git a/contrib/ansible/roles/master/templates/controller-manager.kubeconfig.j2 b/contrib/ansible/roles/master/templates/controller-manager.kubeconfig.j2 new file mode 100644 index 00000000000..adf812936b1 --- /dev/null +++ b/contrib/ansible/roles/master/templates/controller-manager.kubeconfig.j2 @@ -0,0 +1,19 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority: {{ kube_cert_dir }}/ca.crt + server: http://{{ groups['masters'][0] }}:443 + name: {{ cluster_name }} +contexts: +- context: + cluster: {{ cluster_name }} + user: kubelet + name: kubelet-to-{{ cluster_name }} +current-context: kubelet-to-{{ cluster_name }} +kind: Config +preferences: {} +users: +- name: kubelet + user: + client-certificate: {{ kube_cert_dir }}/kubecfg.crt + client-key: {{ kube_cert_dir }}/kubecfg.key diff --git a/contrib/ansible/roles/master/files/scheduler b/contrib/ansible/roles/master/templates/scheduler.j2 similarity index 53% rename from contrib/ansible/roles/master/files/scheduler rename to contrib/ansible/roles/master/templates/scheduler.j2 index f6fc507b72c..8af898d0bde 100644 --- a/contrib/ansible/roles/master/files/scheduler +++ b/contrib/ansible/roles/master/templates/scheduler.j2 @@ -4,4 +4,4 @@ # default config should be adequate # Add your own! -KUBE_SCHEDULER_ARGS="" +KUBE_SCHEDULER_ARGS="--kubeconfig={{ kube_config_dir }}/scheduler.kubeconfig" diff --git a/contrib/ansible/roles/master/templates/scheduler.kubeconfig.j2 b/contrib/ansible/roles/master/templates/scheduler.kubeconfig.j2 new file mode 100644 index 00000000000..adf812936b1 --- /dev/null +++ b/contrib/ansible/roles/master/templates/scheduler.kubeconfig.j2 @@ -0,0 +1,19 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority: {{ kube_cert_dir }}/ca.crt + server: http://{{ groups['masters'][0] }}:443 + name: {{ cluster_name }} +contexts: +- context: + cluster: {{ cluster_name }} + user: kubelet + name: kubelet-to-{{ cluster_name }} +current-context: kubelet-to-{{ cluster_name }} +kind: Config +preferences: {} +users: +- name: kubelet + user: + client-certificate: {{ kube_cert_dir }}/kubecfg.crt + client-key: {{ kube_cert_dir }}/kubecfg.key diff --git a/contrib/ansible/roles/node/tasks/main.yml b/contrib/ansible/roles/node/tasks/main.yml index 00046f7bea7..2e0b7d973a8 100644 --- a/contrib/ansible/roles/node/tasks/main.yml +++ b/contrib/ansible/roles/node/tasks/main.yml @@ -6,12 +6,22 @@ when: not is_atomic and ansible_distribution == "CentOS" - name: write the config files for kubelet - template: src=kubelet.j2 dest=/etc/kubernetes/kubelet + template: src=kubelet.j2 dest={{ kube_config_dir }}/kubelet notify: - restart kubelet - name: write the config files for proxy - copy: src=proxy dest=/etc/kubernetes/proxy + template: src=proxy.j2 dest={{ kube_config_dir }}/proxy + notify: + - restart proxy + +- name: write the kubecfg (auth) file for kubelet + template: src=kubelet.kubeconfig.j2 dest={{ kube_config_dir }}/kubelet.kubeconfig + notify: + - restart kubelet + +- name: write the kubecfg (auth) file for kube-proxy + template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig notify: - restart proxy diff --git a/contrib/ansible/roles/node/templates/kubelet.j2 b/contrib/ansible/roles/node/templates/kubelet.j2 index 1dc96df5393..d7937bdf677 100644 --- a/contrib/ansible/roles/node/templates/kubelet.j2 +++ b/contrib/ansible/roles/node/templates/kubelet.j2 @@ -11,7 +11,7 @@ KUBELET_ADDRESS="--address=0.0.0.0" KUBELET_HOSTNAME="--hostname_override={{ inventory_hostname }}" # location of the api-server -KUBELET_API_SERVER="--api_servers=http://{{ groups['masters'][0]}}:8080" +KUBELET_API_SERVER="--api_servers=https://{{ groups['masters'][0]}}:443" # Add your own! -KUBELET_ARGS="" +KUBELET_ARGS="--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig" diff --git a/contrib/ansible/roles/node/templates/kubelet.kubeconfig.j2 b/contrib/ansible/roles/node/templates/kubelet.kubeconfig.j2 new file mode 100644 index 00000000000..adf812936b1 --- /dev/null +++ b/contrib/ansible/roles/node/templates/kubelet.kubeconfig.j2 @@ -0,0 +1,19 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority: {{ kube_cert_dir }}/ca.crt + server: http://{{ groups['masters'][0] }}:443 + name: {{ cluster_name }} +contexts: +- context: + cluster: {{ cluster_name }} + user: kubelet + name: kubelet-to-{{ cluster_name }} +current-context: kubelet-to-{{ cluster_name }} +kind: Config +preferences: {} +users: +- name: kubelet + user: + client-certificate: {{ kube_cert_dir }}/kubecfg.crt + client-key: {{ kube_cert_dir }}/kubecfg.key diff --git a/contrib/ansible/roles/node/files/proxy b/contrib/ansible/roles/node/templates/proxy.j2 similarity index 54% rename from contrib/ansible/roles/node/files/proxy rename to contrib/ansible/roles/node/templates/proxy.j2 index 034276831ba..1a1f7b19d9a 100644 --- a/contrib/ansible/roles/node/files/proxy +++ b/contrib/ansible/roles/node/templates/proxy.j2 @@ -4,4 +4,4 @@ # default config should be adequate # Add your own! -KUBE_PROXY_ARGS="" +KUBE_PROXY_ARGS="--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig" diff --git a/contrib/ansible/roles/node/templates/proxy.kubeconfig.j2 b/contrib/ansible/roles/node/templates/proxy.kubeconfig.j2 new file mode 100644 index 00000000000..adf812936b1 --- /dev/null +++ b/contrib/ansible/roles/node/templates/proxy.kubeconfig.j2 @@ -0,0 +1,19 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority: {{ kube_cert_dir }}/ca.crt + server: http://{{ groups['masters'][0] }}:443 + name: {{ cluster_name }} +contexts: +- context: + cluster: {{ cluster_name }} + user: kubelet + name: kubelet-to-{{ cluster_name }} +current-context: kubelet-to-{{ cluster_name }} +kind: Config +preferences: {} +users: +- name: kubelet + user: + client-certificate: {{ kube_cert_dir }}/kubecfg.crt + client-key: {{ kube_cert_dir }}/kubecfg.key diff --git a/contrib/ansible/setup.sh b/contrib/ansible/setup.sh index 43ea90affe6..39fca65df6e 100755 --- a/contrib/ansible/setup.sh +++ b/contrib/ansible/setup.sh @@ -15,4 +15,5 @@ # limitations under the License. inventory=${INVENTORY:-inventory} + ansible-playbook -i ${inventory} cluster.yml $@ From 374658ef01937aab40787ed853759a519c678f16 Mon Sep 17 00:00:00 2001 From: Jeff Bean Date: Wed, 17 Jun 2015 19:00:11 -0400 Subject: [PATCH 18/26] support the upstream 'addons' nonsense --- contrib/ansible/cluster.yml | 8 + .../roles/kubernetes-addons/defaults/main.yml | 1 + .../files/kube-addon-update.sh | 445 ++++++++++++++++++ .../kubernetes-addons/files/kube-addons.sh | 178 +++++++ .../kubernetes-addons/files/kube-gen-token.sh | 30 ++ .../roles/kubernetes-addons/handlers/main.yml | 8 + .../roles/kubernetes-addons/meta/main.yml | 3 + .../tasks/generic-install.yml | 5 + .../roles/kubernetes-addons/tasks/main.yml | 53 +++ .../templates/kube-addons.service.j2 | 12 + contrib/ansible/roles/master/tasks/main.yml | 4 + .../roles/master/templates/apiserver.j2 | 2 +- 12 files changed, 748 insertions(+), 1 deletion(-) create mode 100644 contrib/ansible/roles/kubernetes-addons/defaults/main.yml create mode 100755 contrib/ansible/roles/kubernetes-addons/files/kube-addon-update.sh create mode 100644 contrib/ansible/roles/kubernetes-addons/files/kube-addons.sh create mode 100644 contrib/ansible/roles/kubernetes-addons/files/kube-gen-token.sh create mode 100644 contrib/ansible/roles/kubernetes-addons/handlers/main.yml create mode 100644 contrib/ansible/roles/kubernetes-addons/meta/main.yml create mode 100644 contrib/ansible/roles/kubernetes-addons/tasks/generic-install.yml create mode 100644 contrib/ansible/roles/kubernetes-addons/tasks/main.yml create mode 100644 contrib/ansible/roles/kubernetes-addons/templates/kube-addons.service.j2 diff --git a/contrib/ansible/cluster.yml b/contrib/ansible/cluster.yml index 4bdfd80b004..52a8bf35cf7 100644 --- a/contrib/ansible/cluster.yml +++ b/contrib/ansible/cluster.yml @@ -29,6 +29,14 @@ tags: - masters +# launch addons, like dns +- hosts: masters + sudo: yes + roles: + - kubernetes-addons + tags: + - addons + # install kubernetes on the nodes - hosts: nodes sudo: yes diff --git a/contrib/ansible/roles/kubernetes-addons/defaults/main.yml b/contrib/ansible/roles/kubernetes-addons/defaults/main.yml new file mode 100644 index 00000000000..203f93aa7f6 --- /dev/null +++ b/contrib/ansible/roles/kubernetes-addons/defaults/main.yml @@ -0,0 +1 @@ +local_temp_addon_dir: /tmp/kubernetes/addons diff --git a/contrib/ansible/roles/kubernetes-addons/files/kube-addon-update.sh b/contrib/ansible/roles/kubernetes-addons/files/kube-addon-update.sh new file mode 100755 index 00000000000..373ca7fce54 --- /dev/null +++ b/contrib/ansible/roles/kubernetes-addons/files/kube-addon-update.sh @@ -0,0 +1,445 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The business logic for whether a given object should be created +# was already enforced by salt, and /etc/kubernetes/addons is the +# managed result is of that. Start everything below that directory. + +# Parameters +# $1 path to add-ons + + +# LIMITATIONS +# 1. controllers are not updated unless their name is changed +# 3. Services will not be updated unless their name is changed, +# but for services we acually want updates without name change. +# 4. Json files are not handled at all. Currently addons must be +# in yaml files +# 5. exit code is probably not always correct (I haven't checked +# carefully if it works in 100% cases) +# 6. There are no unittests +# 8. Will not work if the total length of paths to addons is greater than +# bash can handle. Probably it is not a problem: ARG_MAX=2097152 on GCE. +# 9. Performance issue: yaml files are read many times in a single execution. + +# cosmetic improvements to be done +# 1. improve the log function; add timestamp, file name, etc. +# 2. logging doesn't work from files that print things out. +# 3. kubectl prints the output to stderr (the output should be captured and then +# logged) + + + +# global config +KUBECTL=${TEST_KUBECTL:-} # substitute for tests +KUBECTL=${KUBECTL:-${KUBECTL_BIN:-}} +KUBECTL=${KUBECTL:-/usr/local/bin/kubectl} +NUM_TRIES_FOR_CREATE=${TEST_NUM_TRIES:-100} +DELAY_AFTER_CREATE_ERROR_SEC=${TEST_DELAY_AFTER_ERROR_SEC:=10} +NUM_TRIES_FOR_STOP=${TEST_NUM_TRIES:-100} +DELAY_AFTER_STOP_ERROR_SEC=${TEST_DELAY_AFTER_ERROR_SEC:=10} + +if [[ ! -x ${KUBECTL} ]]; then + echo "ERROR: kubectl command (${KUBECTL}) not found or is not executable" 1>&2 + exit 1 +fi + + +# remember that you can't log from functions that print some output (because +# logs are also printed on stdout) +# $1 level +# $2 message +function log() { + # manage log levels manually here + + # add the timestamp if you find it useful + case $1 in + DB3 ) +# echo "$1: $2" + ;; + DB2 ) +# echo "$1: $2" + ;; + DBG ) +# echo "$1: $2" + ;; + INFO ) + echo "$1: $2" + ;; + WRN ) + echo "$1: $2" + ;; + ERR ) + echo "$1: $2" + ;; + * ) + echo "INVALID_LOG_LEVEL $1: $2" + ;; + esac +} + +#$1 yaml file path +function get-object-kind-from-file() { + # prints to stdout, so log cannot be used + #WARNING: only yaml is supported + cat $1 | python -c ''' +try: + import pipes,sys,yaml + y = yaml.load(sys.stdin) + labels = y["metadata"]["labels"] + if ("kubernetes.io/cluster-service", "true") not in labels.iteritems(): + # all add-ons must have the label "kubernetes.io/cluster-service". + # Otherwise we are ignoring them (the update will not work anyway) + print "ERROR" + else: + print y["kind"] +except Exception, ex: + print "ERROR" + ''' +} + +# $1 yaml file path +function get-object-name-from-file() { + # prints to stdout, so log cannot be used + #WARNING: only yaml is supported + cat $1 | python -c ''' +try: + import pipes,sys,yaml + y = yaml.load(sys.stdin) + labels = y["metadata"]["labels"] + if ("kubernetes.io/cluster-service", "true") not in labels.iteritems(): + # all add-ons must have the label "kubernetes.io/cluster-service". + # Otherwise we are ignoring them (the update will not work anyway) + print "ERROR" + else: + print y["metadata"]["name"] +except Exception, ex: + print "ERROR" + ''' +} + +# $1 addon directory path +# $2 addon type (e.g. ReplicationController) +# echoes the string with paths to files containing addon for the given type +# works only for yaml files (!) (ignores json files) +function get-addons-from-disk() { + # prints to stdout, so log cannot be used + local -r addon_dir=$1 + local -r obj_type=$2 + local kind + local file_path + for file_path in $(find ${addon_dir} -name \*.yaml); do + kind=$(get-object-kind-from-file ${file_path}) + # WARNING: assumption that the topmost indentation is zero (I'm not sure yaml allows for topmost indentation) + if [[ "${kind}" == "${obj_type}" ]]; then + echo ${file_path} + fi + done +} + +# waits for all subprocesses +# returns 0 if all of them were successful and 1 otherwise +function wait-for-jobs() { + local rv=0 + local pid + for pid in $(jobs -p); do + wait ${pid} || (rv=1; log ERR "error in pid ${pid}") + log DB2 "pid ${pid} completed, current error code: ${rv}" + done + return ${rv} +} + + +function run-until-success() { + local -r command=$1 + local tries=$2 + local -r delay=$3 + local -r command_name=$1 + while [ ${tries} -gt 0 ]; do + log DBG "executing: '$command'" + # let's give the command as an argument to bash -c, so that we can use + # && and || inside the command itself + /bin/bash -c "${command}" && \ + log DB3 "== Successfully executed ${command_name} at $(date -Is) ==" && \ + return 0 + let tries=tries-1 + log INFO "== Failed to execute ${command_name} at $(date -Is). ${tries} tries remaining. ==" + sleep ${delay} + done + return 1 +} + +# $1 object type +function get-addons-from-server() { + local -r obj_type=$1 + "${KUBECTL}" get "${obj_type}" -o template -t "{{range.items}}{{.metadata.name}} {{end}}" --api-version=v1beta3 -l kubernetes.io/cluster-service=true +} + +# returns the characters after the last separator (including) +# If the separator is empty or if it doesn't appear in the string, +# an empty string is printed +# $1 input string +# $2 separator (must be single character, or empty) +function get-suffix() { + # prints to stdout, so log cannot be used + local -r input_string=$1 + local -r separator=$2 + local suffix + + if [[ "${separator}" == "" ]]; then + echo "" + return + fi + + if [[ "${input_string}" == *"${separator}"* ]]; then + suffix=$(echo "${input_string}" | rev | cut -d "${separator}" -f1 | rev) + echo "${separator}${suffix}" + else + echo "" + fi +} + +# returns the characters up to the last '-' (without it) +# $1 input string +# $2 separator +function get-basename() { + # prints to stdout, so log cannot be used + local -r input_string=$1 + local -r separator=$2 + local suffix + suffix="$(get-suffix ${input_string} ${separator})" + # this will strip the suffix (if matches) + echo ${input_string%$suffix} +} + +function stop-object() { + local -r obj_type=$1 + local -r obj_name=$2 + log INFO "Stopping ${obj_type} ${obj_name}" + run-until-success "${KUBECTL} stop ${obj_type} ${obj_name}" ${NUM_TRIES_FOR_STOP} ${DELAY_AFTER_STOP_ERROR_SEC} +} + +function create-object() { + local -r obj_type=$1 + local -r file_path=$2 + log INFO "Creating new ${obj_type} from file ${file_path}" + run-until-success "${KUBECTL} create -f ${file_path}" ${NUM_TRIES_FOR_CREATE} ${DELAY_AFTER_CREATE_ERROR_SEC} +} + +function update-object() { + local -r obj_type=$1 + local -r obj_name=$2 + local -r file_path=$3 + log INFO "updating the ${obj_type} ${obj_name} with the new definition ${file_path}" + stop-object ${obj_type} ${obj_name} + create-object ${obj_type} ${file_path} +} + +# deletes the objects from the server +# $1 object type +# $2 a list of object names +function stop-objects() { + local -r obj_type=$1 + local -r obj_names=$2 + local obj_name + for obj_name in ${obj_names}; do + stop-object ${obj_type} ${obj_names} & + done +} + +# creates objects from the given files +# $1 object type +# $2 a list of paths to definition files +function create-objects() { + local -r obj_type=$1 + local -r file_paths=$2 + local file_path + for file_path in ${file_paths}; do + create-object ${obj_type} ${file_path} & + done +} + +# updates objects +# $1 object type +# $2 a list of update specifications +# each update specification is a ';' separated pair: ; +function update-objects() { + local -r obj_type=$1 # ignored + local -r update_spec=$2 + local objdesc + for objdesc in ${update_spec}; do + IFS=';' read -a array <<< ${objdesc} + update-object ${obj_type} ${array[0]} ${array[1]} & + done +} + +# Global variables set by function match-objects. +for_delete="" # a list of object names to be deleted +for_update="" # a list of pairs ; for objects that should be updated +for_ignore="" # a list of object nanes that can be ignored +new_files="" # a list of file paths that weren't matched by any existing objects (these objects must be created now) + + +# $1 path to files with objects +# $2 object type in the API (ReplicationController or Service) +# $3 name separator (single character or empty) +function match-objects() { + local -r addon_dir=$1 + local -r obj_type=$2 + local -r separator=$3 + + # output variables (globals) + for_delete="" + for_update="" + for_ignore="" + new_files="" + + addon_names_on_server=$(get-addons-from-server "${obj_type}") + addon_paths_in_files=$(get-addons-from-disk "${addon_dir}" "${obj_type}") + + log DB2 "addon_names_on_server=${addon_names_on_server}" + log DB2 "addon_paths_in_files=${addon_paths_in_files}" + + local matched_files="" + + local basename_on_server="" + local name_on_server="" + local suffix_on_server="" + local name_from_file="" + local suffix_from_file="" + local found=0 + local addon_path="" + + for name_on_server in ${addon_names_on_server}; do + basename_on_server=$(get-basename ${name_on_server} ${separator}) + suffix_on_server="$(get-suffix ${name_on_server} ${separator})" + + log DB3 "Found existing addon ${name_on_server}, basename=${basename_on_server}" + + # check if the addon is present in the directory and decide + # what to do with it + # this is not optimal because we're reading the files over and over + # again. But for small number of addons it doesn't matter so much. + found=0 + for addon_path in ${addon_paths_in_files}; do + name_from_file=$(get-object-name-from-file ${addon_path}) + if [[ "${name_from_file}" == "ERROR" ]]; then + log INFO "Cannot read object name from ${addon_path}. Ignoring" + continue + else + log DB2 "Found object name '${name_from_file}' in file ${addon_path}" + fi + suffix_from_file="$(get-suffix ${name_from_file} ${separator})" + + log DB3 "matching: ${basename_on_server}${suffix_from_file} == ${name_from_file}" + if [[ "${basename_on_server}${suffix_from_file}" == "${name_from_file}" ]]; then + log DB3 "matched existing ${obj_type} ${name_on_server} to file ${addon_path}; suffix_on_server=${suffix_on_server}, suffix_from_file=${suffix_from_file}" + found=1 + matched_files="${matched_files} ${addon_path}" + if [[ "${suffix_on_server}" == "${suffix_from_file}" ]]; then + for_ignore="${for_ignore} ${name_from_file}" + else + for_update="${for_update} ${name_on_server};${addon_path}" + fi + break + fi + done + if [[ ${found} -eq 0 ]]; then + log DB2 "No definition file found for replication controller ${name_on_server}. Scheduling for deletion" + for_delete="${for_delete} ${name_on_server}" + fi + done + + log DB3 "matched_files=${matched_files}" + + for addon_path in ${addon_paths_in_files}; do + echo ${matched_files} | grep "${addon_path}" >/dev/null + if [[ $? -ne 0 ]]; then + new_files="${new_files} ${addon_path}" + fi + done +} + + + +function reconcile-objects() { + local -r addon_path=$1 + local -r obj_type=$2 + local -r separator=$3 # name separator + match-objects ${addon_path} ${obj_type} ${separator} + + log DBG "${obj_type}: for_delete=${for_delete}" + log DBG "${obj_type}: for_update=${for_update}" + log DBG "${obj_type}: for_ignore=${for_ignore}" + log DBG "${obj_type}: new_files=${new_files}" + + stop-objects "${obj_type}" "${for_delete}" + # wait for jobs below is a protection against changing the basename + # of a replication controllerm without changing the selector. + # If we don't wait, the new rc may be created before the old one is deleted + # In such case the old one will wait for all its pods to be gone, but the pods + # are created by the new replication controller. + # passing --cascade=false could solve the problem, but we want + # all orphan pods to be deleted. + wait-for-jobs + stopResult=$? + + create-objects "${obj_type}" "${new_files}" + update-objects "${obj_type}" "${for_update}" + + local obj + for obj in ${for_ignore}; do + log DB2 "The ${obj_type} ${obj} is already up to date" + done + + wait-for-jobs + createUpdateResult=$? + + if [[ ${stopResult} -eq 0 ]] && [[ ${createUpdateResult} -eq 0 ]]; then + return 0 + else + return 1 + fi +} + +function update-addons() { + local -r addon_path=$1 + # be careful, reconcile-objects uses global variables + reconcile-objects ${addon_path} ReplicationController "-" & + + # We don't expect service names to be versioned, so + # we match entire name, ignoring version suffix. + # That's why we pass an empty string as the version separator. + # If the service description differs on disk, the service should be recreated. + # This is not implemented in this version. + reconcile-objects ${addon_path} Service "" & + + wait-for-jobs + if [[ $? -eq 0 ]]; then + log INFO "== Kubernetes addon update completed successfully at $(date -Is) ==" + else + log WRN "== Kubernetes addon update completed with errors at $(date -Is) ==" + fi +} + +if [[ $# -ne 1 ]]; then + echo "Illegal number of parameters" 1>&2 + exit 1 +fi + +addon_path=$1 +update-addons ${addon_path} + diff --git a/contrib/ansible/roles/kubernetes-addons/files/kube-addons.sh b/contrib/ansible/roles/kubernetes-addons/files/kube-addons.sh new file mode 100644 index 00000000000..c07491c8fc8 --- /dev/null +++ b/contrib/ansible/roles/kubernetes-addons/files/kube-addons.sh @@ -0,0 +1,178 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The business logic for whether a given object should be created +# was already enforced by salt, and /etc/kubernetes/addons is the +# managed result is of that. Start everything below that directory. +KUBECTL=${KUBECTL_BIN:-/usr/local/bin/kubectl} + +ADDON_CHECK_INTERVAL_SEC=${TEST_ADDON_CHECK_INTERVAL_SEC:-600} + +token_dir=${TOKEN_DIR:-/srv/kubernetes} + +function create-kubeconfig-secret() { + local -r token=$1 + local -r username=$2 + local -r server=$3 + local -r safe_username=$(tr -s ':_' '--' <<< "${username}") + + # Make a kubeconfig file with the token. + if [[ ! -z "${CA_CERT:-}" ]]; then + # If the CA cert is available, put it into the secret rather than using + # insecure-skip-tls-verify. + read -r -d '' kubeconfig </dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) + echo "${token},${account},${account}" >> "${token_file}" + echo "Added ${account}" +done diff --git a/contrib/ansible/roles/kubernetes-addons/handlers/main.yml b/contrib/ansible/roles/kubernetes-addons/handlers/main.yml new file mode 100644 index 00000000000..d4ed09da21d --- /dev/null +++ b/contrib/ansible/roles/kubernetes-addons/handlers/main.yml @@ -0,0 +1,8 @@ +--- +- name: reload and restart kube-addons + command: systemctl daemon-reload + notify: + - restart kube-addons + +- name: restart kube-addons + service: name=kube-addons state=restarted diff --git a/contrib/ansible/roles/kubernetes-addons/meta/main.yml b/contrib/ansible/roles/kubernetes-addons/meta/main.yml new file mode 100644 index 00000000000..80ad2e6cdf7 --- /dev/null +++ b/contrib/ansible/roles/kubernetes-addons/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - { role: master } diff --git a/contrib/ansible/roles/kubernetes-addons/tasks/generic-install.yml b/contrib/ansible/roles/kubernetes-addons/tasks/generic-install.yml new file mode 100644 index 00000000000..4cca068e489 --- /dev/null +++ b/contrib/ansible/roles/kubernetes-addons/tasks/generic-install.yml @@ -0,0 +1,5 @@ +- name: Install PyYAML + action: "{{ ansible_pkg_mgr }}" + args: + name: PyYAML + state: latest diff --git a/contrib/ansible/roles/kubernetes-addons/tasks/main.yml b/contrib/ansible/roles/kubernetes-addons/tasks/main.yml new file mode 100644 index 00000000000..cf5482660fd --- /dev/null +++ b/contrib/ansible/roles/kubernetes-addons/tasks/main.yml @@ -0,0 +1,53 @@ +--- +- name: Assures /etc/kubernetes/addons/ dir exists + file: path=/etc/kubernetes/addons/ state=directory + +- include: generic-install.yml + when: not is_atomic + +- name: Assures local addon dir exists + local_action: file + path={{ local_temp_addon_dir }} + state=directory + sudo: no + +#- name: Get kube-addons script from Kubernetes +# get_url: +# url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/kube-addons/kube-addons.sh +# dest={{ kube_script_dir }}/kube-addons.sh mode=0755 +# force=yes + +#- name: Get kube-addon-update script from Kubernetes +# get_url: +# url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/kube-addons/kube-addon-update.sh +# dest={{ kube_script_dir }}/kube-addon-update.sh mode=0755 +# force=yes + +- name: HACK | copy local kube-addons.sh + copy: src=kube-addons.sh dest={{ kube_script_dir }}/kube-addons.sh mode=0755 + +- name: HACK | copy local kube-addon-update.sh + copy: src=kube-addon-update.sh dest={{ kube_script_dir }}/kube-addon-update.sh mode=0755 + +- name: Copy script to create known_tokens.csv + copy: src=kube-gen-token.sh dest={{ kube_script_dir }}/kube-gen-token.sh mode=0755 + +- name: Run kube-gen-token script to create {{ kube_config_dir }}/known_tokens.csv + command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item }}" + environment: + TOKEN_DIR: "{{ kube_config_dir }}" + with_items: + - + register: gentoken + changed_when: "'Added' in gentoken.stdout" + notify: + - restart apiserver + - restart kube-addons + +- name: Install kube-addons service + template: src=kube-addons.service.j2 dest=/etc/systemd/system/kube-addons.service + notify: + - reload and restart kube-addons + +- name: Enable and start kube addons + service: name=kube-addons.service enabled=yes state=started diff --git a/contrib/ansible/roles/kubernetes-addons/templates/kube-addons.service.j2 b/contrib/ansible/roles/kubernetes-addons/templates/kube-addons.service.j2 new file mode 100644 index 00000000000..38db35c8e5a --- /dev/null +++ b/contrib/ansible/roles/kubernetes-addons/templates/kube-addons.service.j2 @@ -0,0 +1,12 @@ +[Unit] +Description=Kubernetes Addon Object Manager +Documentation=https://github.com/GoogleCloudPlatform/kubernetes + +[Service] +Environment="TOKEN_DIR={{ kube_config_dir }}" +Environment="KUBECTL_BIN=/usr/bin/kubectl" +Environment="KUBERNETES_MASTER_NAME={{ groups['masters'][0] }}" +ExecStart={{ kube_script_dir }}/kube-addons.sh + +[Install] +WantedBy=multi-user.target diff --git a/contrib/ansible/roles/master/tasks/main.yml b/contrib/ansible/roles/master/tasks/main.yml index 0637dae8940..3bbd9e204e2 100644 --- a/contrib/ansible/roles/master/tasks/main.yml +++ b/contrib/ansible/roles/master/tasks/main.yml @@ -10,6 +10,10 @@ notify: - restart apiserver +- name: Ensure that a token auth file exists (addons may populate it) + file: path={{kube_config_dir }}/known_tokens.csv state=touch + changed_when: false + - name: write the config file for the controller-manager template: src=controller-manager.j2 dest={{ kube_config_dir }}/controller-manager notify: diff --git a/contrib/ansible/roles/master/templates/apiserver.j2 b/contrib/ansible/roles/master/templates/apiserver.j2 index c36c5d7de20..c0787286da7 100644 --- a/contrib/ansible/roles/master/templates/apiserver.j2 +++ b/contrib/ansible/roles/master/templates/apiserver.j2 @@ -23,4 +23,4 @@ KUBE_ETCD_SERVERS="--etcd_servers={% for node in groups['etcd'] %}http://{{ node KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" # Add your own! -KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.cert --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt " +KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.cert --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt --token_auth_file={{ kube_config_dir }}/known_tokens.csv" From 7b8ed5a12cea1286be1fc684d93f23933c8c80c3 Mon Sep 17 00:00:00 2001 From: Jeff Bean Date: Wed, 17 Jun 2015 19:00:27 -0400 Subject: [PATCH 19/26] support dns in particular --- contrib/ansible/cluster.yml | 5 +- contrib/ansible/group_vars/all.yml | 19 +++++++ .../roles/kubernetes-addons/tasks/dns.yml | 55 +++++++++++++++++++ .../roles/kubernetes-addons/tasks/main.yml | 6 +- .../ansible/roles/node/templates/kubelet.j2 | 4 ++ 5 files changed, 86 insertions(+), 3 deletions(-) create mode 100644 contrib/ansible/roles/kubernetes-addons/tasks/dns.yml diff --git a/contrib/ansible/cluster.yml b/contrib/ansible/cluster.yml index 52a8bf35cf7..6454c2f8d12 100644 --- a/contrib/ansible/cluster.yml +++ b/contrib/ansible/cluster.yml @@ -33,9 +33,10 @@ - hosts: masters sudo: yes roles: - - kubernetes-addons + - kubernetes-addons tags: - - addons + - addons + - dns # install kubernetes on the nodes - hosts: nodes diff --git a/contrib/ansible/group_vars/all.yml b/contrib/ansible/group_vars/all.yml index b4a087c705e..413c4d22414 100644 --- a/contrib/ansible/group_vars/all.yml +++ b/contrib/ansible/group_vars/all.yml @@ -42,3 +42,22 @@ flannel_prefix: 12 # will give to each node on your network. With these defaults you should have # room for 4096 nodes with 254 pods per node. flannel_host_prefix: 24 + +# Turn this varable to 'false' to disable whole DNS configuration. +dns_setup: true +# How many replicas in the Replication Controller +dns_replicas: 1 + +# Internal DNS domain name. +# This domain must not be used in your network. Services will be discoverable +# under .., e.g. +# myservice.default.kube.local +dns_domain: kube.local + +# IP address of the DNS server. +# Kubernetes will create a pod with several containers, serving as the DNS +# server and expose it under this IP address. The IP address must be from +# the range specified as kube_service_addresses above. +# And this is the IP address you should use as address of the DNS server +# in your containers. +dns_server: 10.254.0.10 diff --git a/contrib/ansible/roles/kubernetes-addons/tasks/dns.yml b/contrib/ansible/roles/kubernetes-addons/tasks/dns.yml new file mode 100644 index 00000000000..a2e264c7b68 --- /dev/null +++ b/contrib/ansible/roles/kubernetes-addons/tasks/dns.yml @@ -0,0 +1,55 @@ +--- +- name: DNS | Assures {{ kube_config_dir }}/addons/dns dir exists + file: path={{ kube_config_dir }}/addons/dns state=directory + +- name: DNS | Assures local dns addon dir exists + local_action: file + path={{ local_temp_addon_dir }}/dns + state=directory + sudo: no + +- name: DNS | Download skydns-rc.yaml file from Kubernetes repo + local_action: get_url + url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/addons/dns/skydns-rc.yaml.in + dest="{{ local_temp_addon_dir }}/dns/skydns-rc.yaml.j2" + force=yes + sudo: no + +- name: DNS | Convert pillar vars to ansible vars for skydns-rc.yaml + local_action: replace + dest="{{ local_temp_addon_dir }}/dns/skydns-rc.yaml.j2" + regexp="pillar\[\'(\w*)\'\]" + replace="\1" + sudo: no + +- name: DNS | Install Template from converted saltfile + template: + args: + src: "{{ local_temp_addon_dir }}/dns/skydns-rc.yaml.j2" + dest: "{{ kube_config_dir }}/addons/dns/skydns-rc.yaml" + mode: 0755 + owner: root + group: root + +- name: DNS | Download skydns-svc.yaml file from Kubernetes repo + local_action: get_url + url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/addons/dns/skydns-svc.yaml.in + dest="{{ local_temp_addon_dir }}/dns/skydns-svc.yaml.j2" + force=yes + sudo: no + +- name: DNS | Convert pillar vars to ansible vars for skydns-rc.yaml + local_action: replace + dest="{{ local_temp_addon_dir }}/dns/skydns-svc.yaml.j2" + regexp="pillar\[\'(\w*)\'\]" + replace="\1" + sudo: no + +- name: DNS | Install Template from converted saltfile + template: + args: + src: "{{ local_temp_addon_dir }}/dns/skydns-svc.yaml.j2" + dest: "{{ kube_config_dir }}/addons/dns/skydns-svc.yaml" + mode: 0755 + owner: root + group: root diff --git a/contrib/ansible/roles/kubernetes-addons/tasks/main.yml b/contrib/ansible/roles/kubernetes-addons/tasks/main.yml index cf5482660fd..3da6617954c 100644 --- a/contrib/ansible/roles/kubernetes-addons/tasks/main.yml +++ b/contrib/ansible/roles/kubernetes-addons/tasks/main.yml @@ -11,6 +11,10 @@ state=directory sudo: no +- include: dns.yml + when: dns_setup + tags: dns + #- name: Get kube-addons script from Kubernetes # get_url: # url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/kube-addons/kube-addons.sh @@ -37,7 +41,7 @@ environment: TOKEN_DIR: "{{ kube_config_dir }}" with_items: - - + - "system:dns" register: gentoken changed_when: "'Added' in gentoken.stdout" notify: diff --git a/contrib/ansible/roles/node/templates/kubelet.j2 b/contrib/ansible/roles/node/templates/kubelet.j2 index d7937bdf677..953e5e05fbc 100644 --- a/contrib/ansible/roles/node/templates/kubelet.j2 +++ b/contrib/ansible/roles/node/templates/kubelet.j2 @@ -14,4 +14,8 @@ KUBELET_HOSTNAME="--hostname_override={{ inventory_hostname }}" KUBELET_API_SERVER="--api_servers=https://{{ groups['masters'][0]}}:443" # Add your own! +{% if dns_setup %} +KUBELET_ARGS="--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig" +{% else %} KUBELET_ARGS="--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig" +{% endif %} From 7996f39413eb6886fa9bcad38569381c3bbbc753 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Mon, 22 Jun 2015 11:01:13 -0400 Subject: [PATCH 20/26] Vagrant? Did we mean to include this? --- contrib/ansible/vagrant/Vagrantfile | 51 +++++++++++++++++++++++++++++ contrib/ansible/vagrant/vinventory | 8 +++++ 2 files changed, 59 insertions(+) create mode 100644 contrib/ansible/vagrant/Vagrantfile create mode 100644 contrib/ansible/vagrant/vinventory diff --git a/contrib/ansible/vagrant/Vagrantfile b/contrib/ansible/vagrant/Vagrantfile new file mode 100644 index 00000000000..dd469edca1b --- /dev/null +++ b/contrib/ansible/vagrant/Vagrantfile @@ -0,0 +1,51 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# All Vagrant configuration is done below. The "2" in Vagrant.configure +# configures the configuration version (we support older styles for +# backwards compatibility). Please don't change it unless you know what +# you're doing. +Vagrant.configure(2) do |config| + config.vm.box = "chef/centos-7.0" + +# config.vm.network "public_network" + + config.vm.define "master", primary: true do |master| + master.vm.hostname = "master.vms.local" + master.vm.network "private_network", ip: "192.168.1.100" + end + + (1..1).each do |i| + config.vm.define "node-#{i}" do |node| + node.vm.hostname = "node-#{i}.vms.local" + node.vm.network "private_network", ip: "192.168.1.1#{i}" + node.vm.provision :ansible do |ansible| + ansible.groups = { + "masters" => ["master"], + "nodes" => ["node-1", "node-2"], + "etcd" => ["master"] + } + ansible.host_key_checking = false + ansible.extra_vars = { + ansible_ssh_user: 'vagrant', + ansible_ssh_pass: 'vagrant', + user: 'vagrant' + } + #ansible.verbose = 'vvv' + ansible.playbook = "../cluster.yml" + ansible.inventory_path = "vinventory" + + ansible.limit = 'all' + end + end + end + + + config.vm.provider "virtualbox" do |vb| + # Display the VirtualBox GUI when booting the machine + vb.gui = false + # Customize the amount of memory on the VM: + vb.memory = "2048" + # vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] + end +end diff --git a/contrib/ansible/vagrant/vinventory b/contrib/ansible/vagrant/vinventory new file mode 100644 index 00000000000..3cde8796a0f --- /dev/null +++ b/contrib/ansible/vagrant/vinventory @@ -0,0 +1,8 @@ +[masters] +192.168.1.100 + +[etcd] +192.168.1.100 + +[nodes] +192.168.1.11 From fc5c063a2d8ba957a04991324c643979fda6fb52 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Tue, 23 Jun 2015 13:16:08 -0400 Subject: [PATCH 21/26] Support Fedora Rawhide Fedora rawhide has neither yum, nor python2, nor python2 libselinux bindings. Ansible needs all of these. This adds a new play which can be used to get machines setup so they can then be managed with ansible. --- contrib/ansible/cluster.yml | 8 ++++++++ contrib/ansible/roles/common/tasks/main.yml | 2 +- .../ansible/roles/pre-ansible/tasks/fedora-dnf.yml | 8 ++++++++ contrib/ansible/roles/pre-ansible/tasks/main.yml | 11 +++++++++++ 4 files changed, 28 insertions(+), 1 deletion(-) create mode 100644 contrib/ansible/roles/pre-ansible/tasks/fedora-dnf.yml create mode 100644 contrib/ansible/roles/pre-ansible/tasks/main.yml diff --git a/contrib/ansible/cluster.yml b/contrib/ansible/cluster.yml index 6454c2f8d12..dc67ebc4352 100644 --- a/contrib/ansible/cluster.yml +++ b/contrib/ansible/cluster.yml @@ -1,4 +1,12 @@ --- +- hosts: all + gather_facts: false + sudo: yes + roles: + - pre-ansible + tags: + - pre-ansible + # Install etcd - hosts: etcd sudo: yes diff --git a/contrib/ansible/roles/common/tasks/main.yml b/contrib/ansible/roles/common/tasks/main.yml index caccf33d6a5..71b637bd6dd 100644 --- a/contrib/ansible/roles/common/tasks/main.yml +++ b/contrib/ansible/roles/common/tasks/main.yml @@ -15,7 +15,7 @@ # collect information about what packages are installed - include: rpm.yml - when: ansible_pkg_mgr == "yum" + when: ansible_pkg_mgr == "yum" or ansible_pkg_mgr == "dnf" - include: centos.yml when: ansible_distribution == "CentOS" diff --git a/contrib/ansible/roles/pre-ansible/tasks/fedora-dnf.yml b/contrib/ansible/roles/pre-ansible/tasks/fedora-dnf.yml new file mode 100644 index 00000000000..1c70564a658 --- /dev/null +++ b/contrib/ansible/roles/pre-ansible/tasks/fedora-dnf.yml @@ -0,0 +1,8 @@ +--- +- name: Install minimal packages + raw: dnf install -y {{ item }} + with_items: + - python # everyone need python2 + - python-dnf # some versions of ansible (2.0) use dnf directly + - yum # some versions of ansible use yum + - libselinux-python diff --git a/contrib/ansible/roles/pre-ansible/tasks/main.yml b/contrib/ansible/roles/pre-ansible/tasks/main.yml new file mode 100644 index 00000000000..92abb057c6a --- /dev/null +++ b/contrib/ansible/roles/pre-ansible/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Get os_version from /etc/os-release + raw: "grep '^VERSION_ID=' /etc/os-release | sed s'/VERSION_ID=//'" + register: os_version + +- name: Get distro name from /etc/os-release + raw: "grep '^NAME=' /etc/os-release | sed s'/NAME=//'" + register: distro + +- include: fedora-dnf.yml + when: os_version.stdout|int >= 22 and 'Fedora' in distro.stdout From 83bd4d09032d29402d4202b41bae176e800199e2 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Tue, 23 Jun 2015 14:05:07 -0400 Subject: [PATCH 22/26] Install python-firewall as ansible needs python2 firewalld support --- contrib/ansible/roles/common/tasks/fedora-install.yml | 7 +++++++ contrib/ansible/roles/common/tasks/main.yml | 3 +++ 2 files changed, 10 insertions(+) create mode 100644 contrib/ansible/roles/common/tasks/fedora-install.yml diff --git a/contrib/ansible/roles/common/tasks/fedora-install.yml b/contrib/ansible/roles/common/tasks/fedora-install.yml new file mode 100644 index 00000000000..48454fa235f --- /dev/null +++ b/contrib/ansible/roles/common/tasks/fedora-install.yml @@ -0,0 +1,7 @@ +--- +- name: Generic | Install Firewalld Python2 Package + action: "{{ ansible_pkg_mgr }}" + args: + name: python-firewall + state: latest + when: ansible_distribution_major_version|int >= 22 diff --git a/contrib/ansible/roles/common/tasks/main.yml b/contrib/ansible/roles/common/tasks/main.yml index 71b637bd6dd..3404365eb79 100644 --- a/contrib/ansible/roles/common/tasks/main.yml +++ b/contrib/ansible/roles/common/tasks/main.yml @@ -19,3 +19,6 @@ - include: centos.yml when: ansible_distribution == "CentOS" + +- include: fedora-install.yml + when: not is_atomic and ansible_distribution == "Fedora" From 8cf36d7402687d84319e8f0b02a789768af4a5d7 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Tue, 23 Jun 2015 16:01:56 -0400 Subject: [PATCH 23/26] Declare etcd roles in meta rather then top level playbook --- contrib/ansible/cluster.yml | 5 ----- contrib/ansible/roles/etcd/meta/main.yml | 3 +++ contrib/ansible/roles/node/meta/main.yml | 1 + 3 files changed, 4 insertions(+), 5 deletions(-) create mode 100644 contrib/ansible/roles/etcd/meta/main.yml diff --git a/contrib/ansible/cluster.yml b/contrib/ansible/cluster.yml index dc67ebc4352..a5b3532fd81 100644 --- a/contrib/ansible/cluster.yml +++ b/contrib/ansible/cluster.yml @@ -11,7 +11,6 @@ - hosts: etcd sudo: yes roles: - - common - etcd tags: - etcd @@ -31,8 +30,6 @@ - hosts: masters sudo: yes roles: - - common - - kubernetes - master tags: - masters @@ -50,8 +47,6 @@ - hosts: nodes sudo: yes roles: - - common - - kubernetes - node tags: - nodes diff --git a/contrib/ansible/roles/etcd/meta/main.yml b/contrib/ansible/roles/etcd/meta/main.yml new file mode 100644 index 00000000000..0764e314824 --- /dev/null +++ b/contrib/ansible/roles/etcd/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - { role: common } diff --git a/contrib/ansible/roles/node/meta/main.yml b/contrib/ansible/roles/node/meta/main.yml index be654b3de8a..e05b1275fc2 100644 --- a/contrib/ansible/roles/node/meta/main.yml +++ b/contrib/ansible/roles/node/meta/main.yml @@ -1,4 +1,5 @@ --- dependencies: + - { role: common } - { role: docker } - { role: kubernetes } From 97b88fa5637766b66e472e167794bed161cedbd8 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Tue, 23 Jun 2015 19:24:38 -0400 Subject: [PATCH 24/26] Set SELinux permissive to allow DNS to work --- contrib/ansible/roles/node/tasks/main.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/contrib/ansible/roles/node/tasks/main.yml b/contrib/ansible/roles/node/tasks/main.yml index 2e0b7d973a8..08c815878a6 100644 --- a/contrib/ansible/roles/node/tasks/main.yml +++ b/contrib/ansible/roles/node/tasks/main.yml @@ -1,4 +1,13 @@ --- +- name: Check if selinux enforcing + command: getenforce + register: selinux + changed_when: false + +- name: Set selinux permissive because tokens and selinux don't work together + selinux: state=permissive policy=targeted + when: "'Enforcing' in selinux.stdout" + - include: generic-install.yml when: not is_atomic and not ansible_distribution == "CentOS" From 52999cd29290702774f1689888a95a02d1326770 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Wed, 24 Jun 2015 12:01:29 -0400 Subject: [PATCH 25/26] Check for rpm directly, don't imply from ansible_pkg_manager Atomic host have neither dnf nor yum. So we cannot use that as a hueristic if they have rpm. --- contrib/ansible/roles/common/tasks/main.yml | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/contrib/ansible/roles/common/tasks/main.yml b/contrib/ansible/roles/common/tasks/main.yml index 3404365eb79..f35b503d7ff 100644 --- a/contrib/ansible/roles/common/tasks/main.yml +++ b/contrib/ansible/roles/common/tasks/main.yml @@ -13,9 +13,23 @@ is_atomic: true when: s.stat.exists +- name: Determine if has rpm + stat: path=/usr/bin/rpm + register: s + changed_when: false + +- name: Init the has_rpm fact + set_fact: + has_rpm: false + +- name: Set the has_rpm fact + set_fact: + has_rpm: true + when: s.stat.exists + # collect information about what packages are installed - include: rpm.yml - when: ansible_pkg_mgr == "yum" or ansible_pkg_mgr == "dnf" + when: has_rpm - include: centos.yml when: ansible_distribution == "CentOS" From a0c312f22421a84223be1c78455ff4cbdbb16a53 Mon Sep 17 00:00:00 2001 From: Jeff Bean Date: Thu, 25 Jun 2015 15:15:35 -0700 Subject: [PATCH 26/26] Changing CentOS install of master and nodes The testing repo now had updated packages for kubernetes Also adding some options to rsync task to work with different users ssh settings --- contrib/ansible/roles/kubernetes/tasks/place_certs.yml | 8 ++++++-- contrib/ansible/roles/master/tasks/centos.yml | 5 ++++- contrib/ansible/roles/node/tasks/centos.yml | 2 +- contrib/ansible/vagrant/Vagrantfile | 5 ----- 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/contrib/ansible/roles/kubernetes/tasks/place_certs.yml b/contrib/ansible/roles/kubernetes/tasks/place_certs.yml index a3c7d8e032d..2271fd1115a 100644 --- a/contrib/ansible/roles/kubernetes/tasks/place_certs.yml +++ b/contrib/ansible/roles/kubernetes/tasks/place_certs.yml @@ -1,12 +1,16 @@ --- -- name: place ssh public key so apiserver can push certs +- name: place ssh public key on other nodes so apiserver can push certs authorized_key: user=root key="{{ item }}" state=present with_file: - '/tmp/id_rsa.pub' changed_when: false - name: Copy certificates directly from the apiserver to nodes - synchronize: src={{ kube_cert_dir }}/{{ item }} dest={{ kube_cert_dir }}/{{ item }} + synchronize: + src={{ kube_cert_dir }}/{{ item }} + dest={{ kube_cert_dir }}/{{ item }} + rsync_timeout=30 + set_remote_user=no delegate_to: "{{ groups['masters'][0] }}" with_items: - "ca.crt" diff --git a/contrib/ansible/roles/master/tasks/centos.yml b/contrib/ansible/roles/master/tasks/centos.yml index 88777cd2545..4564d18cf42 100644 --- a/contrib/ansible/roles/master/tasks/centos.yml +++ b/contrib/ansible/roles/master/tasks/centos.yml @@ -1,5 +1,8 @@ --- - name: CentOS | Install kubernetes CentOS style - yum: pkg=kubernetes state=latest enablerepo=virt7-testing + yum: + pkg=kubernetes-master + state=latest + enablerepo=virt7-testing notify: - restart daemons diff --git a/contrib/ansible/roles/node/tasks/centos.yml b/contrib/ansible/roles/node/tasks/centos.yml index 88777cd2545..638ab440ef0 100644 --- a/contrib/ansible/roles/node/tasks/centos.yml +++ b/contrib/ansible/roles/node/tasks/centos.yml @@ -1,5 +1,5 @@ --- - name: CentOS | Install kubernetes CentOS style - yum: pkg=kubernetes state=latest enablerepo=virt7-testing + yum: pkg=kubernetes-node state=latest enablerepo=virt7-testing notify: - restart daemons diff --git a/contrib/ansible/vagrant/Vagrantfile b/contrib/ansible/vagrant/Vagrantfile index dd469edca1b..05220d2c281 100644 --- a/contrib/ansible/vagrant/Vagrantfile +++ b/contrib/ansible/vagrant/Vagrantfile @@ -20,11 +20,6 @@ Vagrant.configure(2) do |config| node.vm.hostname = "node-#{i}.vms.local" node.vm.network "private_network", ip: "192.168.1.1#{i}" node.vm.provision :ansible do |ansible| - ansible.groups = { - "masters" => ["master"], - "nodes" => ["node-1", "node-2"], - "etcd" => ["master"] - } ansible.host_key_checking = false ansible.extra_vars = { ansible_ssh_user: 'vagrant',