mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
remove most of contrib as it's moving to a seperate repo
This commit is contained in:
parent
d9555aed67
commit
00cf4c9c94
3
contrib/ansible/.gitignore
vendored
3
contrib/ansible/.gitignore
vendored
@ -1,3 +0,0 @@
|
||||
myinventory
|
||||
inventory
|
||||
*.swp
|
@ -1,59 +0,0 @@
|
||||
# Kubernetes Ansible
|
||||
|
||||
This playbook and set of roles set up a Kubernetes cluster onto machines. They
|
||||
can be real hardware, VMs, things in a public cloud, etc. Anything that you can connect to via SSH.
|
||||
|
||||
## Before starting
|
||||
|
||||
* Record the IP address/hostname of which machine you want to be your master (only support a single master)
|
||||
* Record the IP address/hostname of the machine you want to be your etcd server (often same as master, only one)
|
||||
* Record the IP addresses/hostname of the machines you want to be your nodes. (the master can also be a node)
|
||||
* Make sure your ansible running machine has ansible 1.9 and python-netaddr installed.
|
||||
|
||||
## Setup
|
||||
|
||||
### Configure inventory
|
||||
|
||||
Add the system information gathered above into the 'inventory' file, or create a new inventory file for the cluster.
|
||||
|
||||
### Configure Cluster options
|
||||
|
||||
Look though all of the options in `group_vars/all.yml` and
|
||||
set the variables to reflect your needs. The options are described there
|
||||
in full detail.
|
||||
|
||||
## Running the playbook
|
||||
|
||||
After going through the setup, run the setup script provided:
|
||||
|
||||
`$ ./setup.sh`
|
||||
|
||||
You may override the inventory file by doing:
|
||||
|
||||
`INVENTORY=myinventory ./setup.sh`
|
||||
|
||||
|
||||
In general this will work on very recent Fedora, rawhide or F21. Future work to
|
||||
support RHEL7, CentOS, and possible other distros should be forthcoming.
|
||||
|
||||
### Targeted runs
|
||||
|
||||
You can just setup certain parts instead of doing it all.
|
||||
|
||||
#### etcd
|
||||
|
||||
`$ ./setup.sh --tags=etcd`
|
||||
|
||||
#### Kubernetes master
|
||||
|
||||
`$ ./setup.sh --tags=masters`
|
||||
|
||||
#### kubernetes nodes
|
||||
|
||||
`$ ./setup.sh --tags=nodes`
|
||||
|
||||
### flannel
|
||||
|
||||
`$ ./setup.sh --tags=flannel`
|
||||
|
||||
[]()
|
@ -1,55 +0,0 @@
|
||||
---
|
||||
# This playbook deploys a kubernetes cluster
|
||||
# with the default addons.
|
||||
|
||||
- hosts: all
|
||||
gather_facts: false
|
||||
sudo: yes
|
||||
roles:
|
||||
- pre-ansible
|
||||
tags:
|
||||
- pre-ansible
|
||||
|
||||
# Install etcd
|
||||
- hosts: etcd
|
||||
sudo: yes
|
||||
roles:
|
||||
- etcd
|
||||
tags:
|
||||
- etcd
|
||||
|
||||
# install flannel
|
||||
- hosts:
|
||||
- etcd
|
||||
- masters
|
||||
- nodes
|
||||
sudo: yes
|
||||
roles:
|
||||
- flannel
|
||||
tags:
|
||||
- flannel
|
||||
|
||||
# install kube master services
|
||||
- hosts: masters
|
||||
sudo: yes
|
||||
roles:
|
||||
- master
|
||||
tags:
|
||||
- masters
|
||||
|
||||
# launch addons, like dns
|
||||
- hosts: masters
|
||||
sudo: yes
|
||||
roles:
|
||||
- kubernetes-addons
|
||||
tags:
|
||||
- addons
|
||||
- dns
|
||||
|
||||
# install kubernetes on the nodes
|
||||
- hosts: nodes
|
||||
sudo: yes
|
||||
roles:
|
||||
- node
|
||||
tags:
|
||||
- nodes
|
@ -1,71 +0,0 @@
|
||||
# This value determines how kubernetes binaries, config files, and service
|
||||
# files are loaded onto the target machines. The following are the only
|
||||
# valid options:
|
||||
#
|
||||
# localBuild - requires make release to have been run to build local binaries
|
||||
# packageManager - will install packages from your distribution using yum/dnf/apt
|
||||
source_type: localBuild
|
||||
|
||||
# will be used as the Internal dns domain name if DNS is enabled. Services
|
||||
# will be discoverable under <service-name>.<namespace>.svc.<domainname>, e.g.
|
||||
# myservice.default.svc.cluster.local
|
||||
cluster_name: cluster.local
|
||||
|
||||
# Account name of remote user. Ansible will use this user account to ssh into
|
||||
# the managed machines. The user must be able to use sudo without asking
|
||||
# for password unless ansible_sudo_pass is set
|
||||
#ansible_ssh_user: root
|
||||
|
||||
# password for the ansible_ssh_user. If this is unset you will need to set up
|
||||
# ssh keys so a password is not needed.
|
||||
#ansible_ssh_pass: password
|
||||
|
||||
# If a password is needed to sudo to root that password must be set here
|
||||
#ansible_sudo_pass: password
|
||||
|
||||
# A list of insecure registrys you night need to define
|
||||
insecure_registrys:
|
||||
# - "gcr.io"
|
||||
|
||||
# If you need a proxy for the docker daemon define these here
|
||||
#http_proxy: "http://proxy.example.com:3128"
|
||||
#https_proxy: "http://proxy.example.com:3128"
|
||||
|
||||
# Kubernetes internal network for services.
|
||||
# Kubernetes services will get fake IP addresses from this range.
|
||||
# This range must not conflict with anything in your infrastructure. These
|
||||
# addresses do not need to be routable and must just be an unused block of space.
|
||||
kube_service_addresses: 10.254.0.0/16
|
||||
|
||||
# Flannel internal network (optional). When flannel is used, it will assign IP
|
||||
# addresses from this range to individual pods.
|
||||
# This network must be unused in your network infrastructure!
|
||||
flannel_subnet: 172.16.0.0
|
||||
|
||||
# Flannel internal network total size (optional). This is the prefix of the
|
||||
# entire flannel overlay network. So the entirety of 172.16.0.0/12 must be
|
||||
# unused in your environment.
|
||||
flannel_prefix: 12
|
||||
|
||||
# Flannel internal network (optional). This is the size allocation that flannel
|
||||
# will give to each node on your network. With these defaults you should have
|
||||
# room for 4096 nodes with 254 pods per node.
|
||||
flannel_host_prefix: 24
|
||||
|
||||
# Set to false to disable logging with elasticsearch
|
||||
cluster_logging: true
|
||||
|
||||
# Turn to false to disable cluster monitoring with heapster and influxdb
|
||||
cluster_monitoring: true
|
||||
|
||||
# Turn to false to disable the kube-ui addon for this cluster
|
||||
kube-ui: false
|
||||
|
||||
# Turn this varable to 'false' to disable whole DNS configuration.
|
||||
dns_setup: true
|
||||
# How many replicas in the Replication Controller
|
||||
dns_replicas: 1
|
||||
|
||||
# There are other variable in roles/kubernetes/defaults/main.yml but changing
|
||||
# them comes with a much higher risk to your cluster. So proceed over there
|
||||
# with caution.
|
@ -1,9 +0,0 @@
|
||||
|
||||
[masters]
|
||||
kube-master-test-[1:3].example.com
|
||||
|
||||
[etcd:children]
|
||||
masters
|
||||
|
||||
[nodes]
|
||||
kube-minion-test-[1:2].example.com
|
@ -1,10 +0,0 @@
|
||||
|
||||
[masters]
|
||||
kube-master-test-01.example.com
|
||||
|
||||
[etcd]
|
||||
kube-master-test-01.example.com
|
||||
|
||||
[nodes]
|
||||
kube-minion-test-01.example.com
|
||||
kube-minion-test-02.example.com
|
@ -1,5 +0,0 @@
|
||||
[virt7-docker-common-candidate]
|
||||
name=virt7-docker-common-candidate
|
||||
baseurl=http://cbs.centos.org/repos/virt7-docker-common-candidate/x86_64/os/
|
||||
enabled=0
|
||||
gpgcheck=0
|
@ -1,5 +0,0 @@
|
||||
[virt7-testing]
|
||||
name=virt7-testing
|
||||
baseurl=http://cbs.centos.org/repos/virt7-testing/x86_64/os/
|
||||
enabled=0
|
||||
gpgcheck=0
|
@ -1,6 +0,0 @@
|
||||
---
|
||||
- name: CentOS | Install Testing centos7 repo
|
||||
copy: src=virt7-testing.repo dest=/etc/yum.repos.d/virt7-testing.repo
|
||||
|
||||
- name: CentOS | Install docker-common-candidate centos7 repo
|
||||
copy: src=virt7-docker-common-candidate.repo dest=/etc/yum.repos.d/virt7-docker-common-candidate.repo
|
@ -1,7 +0,0 @@
|
||||
---
|
||||
- name: Generic | Install Firewalld Python2 Package
|
||||
action: "{{ ansible_pkg_mgr }}"
|
||||
args:
|
||||
name: python-firewall
|
||||
state: latest
|
||||
when: ansible_distribution_major_version|int >= 22
|
@ -1,46 +0,0 @@
|
||||
---
|
||||
- name: Determine if Atomic
|
||||
stat: path=/run/ostree-booted
|
||||
register: s
|
||||
changed_when: false
|
||||
|
||||
- name: Init the is_atomic fact
|
||||
set_fact:
|
||||
is_atomic: false
|
||||
|
||||
- name: Set the is_atomic fact
|
||||
set_fact:
|
||||
is_atomic: true
|
||||
when: s.stat.exists
|
||||
|
||||
- name: Determine if has rpm
|
||||
stat: path=/usr/bin/rpm
|
||||
register: s
|
||||
changed_when: false
|
||||
|
||||
- name: Init the has_rpm fact
|
||||
set_fact:
|
||||
has_rpm: false
|
||||
|
||||
- name: Set the has_rpm fact
|
||||
set_fact:
|
||||
has_rpm: true
|
||||
when: s.stat.exists
|
||||
|
||||
- name: Init the has_firewalld fact
|
||||
set_fact:
|
||||
has_firewalld: false
|
||||
|
||||
- name: Init the has_iptables fact
|
||||
set_fact:
|
||||
has_iptables: false
|
||||
|
||||
# collect information about what packages are installed
|
||||
- include: rpm.yml
|
||||
when: has_rpm
|
||||
|
||||
- include: centos.yml
|
||||
when: ansible_distribution == "CentOS"
|
||||
|
||||
- include: fedora-install.yml
|
||||
when: not is_atomic and ansible_distribution == "Fedora"
|
@ -1,22 +0,0 @@
|
||||
---
|
||||
- name: RPM | Determine if firewalld installed
|
||||
command: "rpm -q firewalld"
|
||||
register: s
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Set the has_firewalld fact
|
||||
set_fact:
|
||||
has_firewalld: true
|
||||
when: s.rc == 0
|
||||
|
||||
- name: Determine if iptables-services installed
|
||||
command: "rpm -q iptables-services"
|
||||
register: s
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Set the has_iptables fact
|
||||
set_fact:
|
||||
has_iptables: true
|
||||
when: s.rc == 0
|
@ -1 +0,0 @@
|
||||
no_proxy: "localhost,127.0.0.0/8,::1,/var/run/docker.sock"
|
@ -1,3 +0,0 @@
|
||||
---
|
||||
- name: restart docker
|
||||
service: name=docker state=restarted
|
@ -1,10 +0,0 @@
|
||||
---
|
||||
- name: DEBIAN | Make sure this is stretch or sid, jessies does not have docker
|
||||
fail: msg="Docker.io only available in sid and stretch, https://wiki.debian.org/Docker"
|
||||
when: ansible_lsb.codename != "stretch" and ansible_lsb.codename != "sid"
|
||||
|
||||
- name: DEBIAN | Install Docker
|
||||
action: "{{ ansible_pkg_mgr }}"
|
||||
args:
|
||||
name: docker.io
|
||||
state: latest
|
@ -1,7 +0,0 @@
|
||||
---
|
||||
- name: Generic | Install Docker
|
||||
action: "{{ ansible_pkg_mgr }}"
|
||||
args:
|
||||
name: docker
|
||||
state: latest
|
||||
when: not is_atomic
|
@ -1,54 +0,0 @@
|
||||
---
|
||||
- include: debian-install.yml
|
||||
when: ansible_distribution == "Debian"
|
||||
|
||||
- include: generic-install.yml
|
||||
when: ansible_distribution != "Debian"
|
||||
|
||||
- name: Set docker config file directory
|
||||
set_fact:
|
||||
docker_config_dir: "/etc/sysconfig"
|
||||
|
||||
- name: Override docker config file directory for Debian
|
||||
set_fact:
|
||||
docker_config_dir: "/etc/default"
|
||||
when: ansible_distribution == "Debian"
|
||||
|
||||
- name: Verify docker config files exists
|
||||
file: path={{ docker_config_dir }}/{{ item }} state=touch
|
||||
changed_when: false
|
||||
with_items:
|
||||
- docker
|
||||
- docker-network
|
||||
|
||||
- name: Turn down docker logging
|
||||
lineinfile: dest={{ docker_config_dir }}/docker regexp=^OPTIONS= line=OPTIONS="--selinux-enabled --log-level=warn"
|
||||
notify:
|
||||
- restart docker
|
||||
|
||||
- name: Install http_proxy into docker-network
|
||||
lineinfile: dest={{ docker_config_dir }}/docker-network regexp=^HTTP_PROXY= line=HTTP_PROXY="{{ http_proxy }}"
|
||||
when: http_proxy is defined
|
||||
notify:
|
||||
- restart docker
|
||||
|
||||
- name: Install https_proxy into docker-network
|
||||
lineinfile: dest={{ docker_config_dir }}/docker-network regexp=^HTTPS_PROXY= line=HTTPS_PROXY="{{ https_proxy }}"
|
||||
when: https_proxy is defined
|
||||
notify:
|
||||
- restart docker
|
||||
|
||||
- name: Install no-proxy into docker-network
|
||||
lineinfile: dest={{ docker_config_dir }}/docker-network regexp=^NO_PROXY= line=NO_PROXY="{{ no_proxy }}"
|
||||
when: no_proxy is defined
|
||||
notify:
|
||||
- restart docker
|
||||
|
||||
- name: Add any insecure registrys to docker config
|
||||
lineinfile: dest={{ docker_config_dir }}/docker regexp=^INSECURE_REGISTRY= line=INSECURE_REGISTRY='{% for reg in insecure_registrys %}--insecure-registry="{{ reg }}" {% endfor %}'
|
||||
when: insecure_registrys is defined and insecure_registrys > 0
|
||||
notify:
|
||||
- restart docker
|
||||
|
||||
- name: Enable Docker
|
||||
service: name=docker enabled=yes state=started
|
@ -1,36 +0,0 @@
|
||||
Role Name
|
||||
=========
|
||||
|
||||
Configures an etcd cluster for an arbitrary number of hosts
|
||||
|
||||
Role Variables
|
||||
--------------
|
||||
|
||||
TODO
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
None
|
||||
|
||||
Example Playbook
|
||||
----------------
|
||||
|
||||
- hosts: etcd
|
||||
roles:
|
||||
- { etcd }
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
MIT
|
||||
|
||||
Author Information
|
||||
------------------
|
||||
|
||||
Scott Dodson <sdodson@redhat.com>, Tim St. Clair <tstclair@redhat.com>
|
||||
Adapted from https://github.com/retr0h/ansible-etcd. We
|
||||
should at some point submit a PR to merge this with that module.
|
||||
|
||||
|
||||
[]()
|
@ -1,25 +0,0 @@
|
||||
---
|
||||
etcd_client_port: 2379
|
||||
etcd_peer_port: 2380
|
||||
etcd_peers_group: etcd
|
||||
etcd_url_scheme: http
|
||||
etcd_peer_url_scheme: http
|
||||
etcd_conf_dir: /etc/etcd
|
||||
etcd_script_dir: /usr/libexec/etcd
|
||||
etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
|
||||
etcd_cert_file: "{{ etcd_conf_dir }}/server.crt"
|
||||
etcd_key_file: "{{ etcd_conf_dir }}/server.key"
|
||||
etcd_peer_ca_file: "{{ etcd_conf_dir }}/ca.crt"
|
||||
etcd_peer_cert_file: "{{ etcd_conf_dir }}/peer.crt"
|
||||
etcd_peer_key_file: "{{ etcd_conf_dir }}/peer.key"
|
||||
|
||||
etcd_initial_cluster_state: new
|
||||
etcd_initial_cluster_token: etcd-k8-cluster
|
||||
|
||||
etcd_initial_advertise_peer_urls: "{{ etcd_peer_url_scheme }}://{{ ansible_fqdn }}:{{ etcd_peer_port }}"
|
||||
etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ ansible_fqdn }}:{{ etcd_peer_port }}"
|
||||
etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ ansible_fqdn }}:{{ etcd_client_port }}"
|
||||
etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ ansible_fqdn }}:{{ etcd_client_port }}"
|
||||
|
||||
etcd_data_dir: /var/lib/etcd
|
||||
|
@ -1,15 +0,0 @@
|
||||
[Unit]
|
||||
Description=Etcd Server
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/var/lib/etcd/
|
||||
EnvironmentFile=-/etc/etcd/etcd.conf
|
||||
User=etcd
|
||||
ExecStart=/usr/bin/etcd
|
||||
Restart=on-failure
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -1,10 +0,0 @@
|
||||
---
|
||||
- name: reload systemd
|
||||
command: systemctl --system daemon-reload
|
||||
|
||||
- name: restart etcd
|
||||
service: name=etcd state=restarted
|
||||
when: etcd_started.changed == false
|
||||
|
||||
- name: Save iptables rules
|
||||
command: service iptables save
|
@ -1,3 +0,0 @@
|
||||
---
|
||||
dependencies:
|
||||
- { role: common }
|
@ -1,16 +0,0 @@
|
||||
---
|
||||
- name: Open firewalld port for etcd
|
||||
firewalld: port={{ item }}/tcp permanent=false state=enabled
|
||||
# in case this is also a node where firewalld turned off
|
||||
ignore_errors: yes
|
||||
with_items:
|
||||
- 2379
|
||||
- 2380
|
||||
|
||||
- name: Save firewalld port for etcd
|
||||
firewalld: port={{ item }}/tcp permanent=true state=enabled
|
||||
# in case this is also a node where firewalld turned off
|
||||
ignore_errors: yes
|
||||
with_items:
|
||||
- 2379
|
||||
- 2380
|
@ -1,17 +0,0 @@
|
||||
---
|
||||
- name: Get iptables rules
|
||||
command: iptables -L
|
||||
register: iptablesrules
|
||||
always_run: yes
|
||||
|
||||
- name: Enable iptables at boot
|
||||
service: name=iptables enabled=yes state=started
|
||||
|
||||
- name: Open etcd client port with iptables
|
||||
command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "etcd"
|
||||
when: "'etcd' not in iptablesrules.stdout"
|
||||
notify:
|
||||
- Save iptables rules
|
||||
with_items:
|
||||
- 2379
|
||||
- 2380
|
@ -1,34 +0,0 @@
|
||||
---
|
||||
- name: Install etcd
|
||||
action: "{{ ansible_pkg_mgr }}"
|
||||
args:
|
||||
name: etcd
|
||||
state: latest
|
||||
notify:
|
||||
- restart etcd
|
||||
when: not is_atomic
|
||||
|
||||
- name: Write etcd config file
|
||||
template: src=etcd.conf.j2 dest=/etc/etcd/etcd.conf
|
||||
notify:
|
||||
- restart etcd
|
||||
|
||||
- name: Write etcd systemd unit file for Debian
|
||||
copy: src=etcd.service dest=/etc/systemd/system
|
||||
notify:
|
||||
- reload systemd
|
||||
- restart etcd
|
||||
when: ansible_distribution == "Debian"
|
||||
|
||||
- name: Enable etcd
|
||||
service: name=etcd enabled=yes
|
||||
|
||||
- name: Start etcd
|
||||
service: name=etcd state=started
|
||||
register: etcd_started
|
||||
|
||||
- include: firewalld.yml
|
||||
when: has_firewalld
|
||||
|
||||
- include: iptables.yml
|
||||
when: not has_firewalld and has_iptables
|
@ -1,52 +0,0 @@
|
||||
{% macro initial_cluster() -%}
|
||||
{% for host in groups[etcd_peers_group] -%}
|
||||
{% if loop.last -%}
|
||||
{{ hostvars[host]['ansible_hostname'] }}={{ etcd_peer_url_scheme }}://{{ hostvars[host]['ansible_fqdn'] }}:{{ etcd_peer_port }}
|
||||
{%- else -%}
|
||||
{{ hostvars[host]['ansible_hostname'] }}={{ etcd_peer_url_scheme }}://{{ hostvars[host]['ansible_fqdn'] }}:{{ etcd_peer_port }},
|
||||
{%- endif -%}
|
||||
{% endfor -%}
|
||||
{% endmacro -%}
|
||||
|
||||
{% if groups[etcd_peers_group] and groups[etcd_peers_group] | length > 1 %}
|
||||
ETCD_NAME={{ ansible_hostname }}
|
||||
ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }}
|
||||
{% else %}
|
||||
ETCD_NAME=default
|
||||
{% endif %}
|
||||
ETCD_DATA_DIR={{ etcd_data_dir }}
|
||||
#ETCD_SNAPSHOT_COUNTER="10000"
|
||||
#ETCD_HEARTBEAT_INTERVAL="100"
|
||||
#ETCD_ELECTION_TIMEOUT="1000"
|
||||
ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }}
|
||||
#ETCD_MAX_SNAPSHOTS="5"
|
||||
#ETCD_MAX_WALS="5"
|
||||
#ETCD_CORS=""
|
||||
|
||||
{% if groups[etcd_peers_group] and groups[etcd_peers_group] | length > 1 %}
|
||||
#[cluster]
|
||||
ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }}
|
||||
ETCD_INITIAL_CLUSTER={{ initial_cluster() }}
|
||||
ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }}
|
||||
ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }}
|
||||
#ETCD_DISCOVERY=""
|
||||
#ETCD_DISCOVERY_SRV=""
|
||||
#ETCD_DISCOVERY_FALLBACK="proxy"
|
||||
#ETCD_DISCOVERY_PROXY=""
|
||||
{% endif %}
|
||||
ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }}
|
||||
|
||||
#[proxy]
|
||||
#ETCD_PROXY="off"
|
||||
|
||||
#[security]
|
||||
{% if etcd_url_scheme == 'https' -%}
|
||||
ETCD_CA_FILE={{ etcd_ca_file }}
|
||||
ETCD_CERT_FILE={{ etcd_cert_file }}
|
||||
ETCD_KEY_FILE={{ etcd_key_file }}
|
||||
{% endif -%}
|
||||
{% if etcd_peer_url_scheme == 'https' -%}
|
||||
ETCD_PEER_CA_FILE={{ etcd_peer_ca_file }}
|
||||
ETCD_PEER_CERT_FILE={{ etcd_peer_cert_file }}
|
||||
ETCD_PEER_KEY_FILE={{ etcd_peer_key_file }}
|
||||
{% endif -%}
|
@ -1,20 +0,0 @@
|
||||
---
|
||||
- name: restart flannel
|
||||
service: name=flanneld state=restarted
|
||||
notify:
|
||||
- stop docker
|
||||
- delete docker0
|
||||
- start docker
|
||||
when: inventory_hostname in groups['nodes']
|
||||
|
||||
- name: stop docker
|
||||
service: name=docker state=stopped
|
||||
|
||||
- name: delete docker0
|
||||
command: ip link delete docker0
|
||||
ignore_errors: yes
|
||||
|
||||
- name: start docker
|
||||
service: name=docker state=started
|
||||
# This might fail if docker isn't installed yet
|
||||
ignore_errors: yes
|
@ -1,3 +0,0 @@
|
||||
---
|
||||
dependencies:
|
||||
- { role: common }
|
@ -1,17 +0,0 @@
|
||||
---
|
||||
- name: Install flannel
|
||||
action: "{{ ansible_pkg_mgr }}"
|
||||
args:
|
||||
name: flannel
|
||||
state: latest
|
||||
when: not is_atomic
|
||||
|
||||
- name: Install Flannel config file
|
||||
template: src=flanneld.j2 dest=/etc/sysconfig/flanneld
|
||||
notify:
|
||||
- restart flannel
|
||||
|
||||
- name: Launch Flannel
|
||||
service: name=flanneld state=started enabled=yes
|
||||
notify:
|
||||
- restart flannel
|
@ -1,23 +0,0 @@
|
||||
---
|
||||
- name: Set facts about etcdctl command
|
||||
set_fact:
|
||||
peers: "{% for hostname in groups['etcd'] %}http://{{ hostname }}:2379{% if not loop.last %},{% endif %}{% endfor %}"
|
||||
conf_file: "/tmp/flannel-conf.json"
|
||||
conf_loc: "/{{ cluster_name }}/network/config"
|
||||
run_once: true
|
||||
delegate_to: "{{ groups['etcd'][0] }}"
|
||||
|
||||
- name: Create flannel config file to go in etcd
|
||||
template: src=flannel-conf.json.j2 dest={{ conf_file }}
|
||||
run_once: true
|
||||
delegate_to: "{{ groups['etcd'][0] }}"
|
||||
|
||||
- name: Load the flannel config file into etcd
|
||||
shell: "/usr/bin/etcdctl --no-sync --peers={{ peers }} set {{ conf_loc }} < {{ conf_file }}"
|
||||
run_once: true
|
||||
delegate_to: "{{ groups['etcd'][0] }}"
|
||||
|
||||
- name: Clean up the flannel config file
|
||||
file: path=/tmp/flannel-config.json state=absent
|
||||
run_once: true
|
||||
delegate_to: "{{ groups['etcd'][0] }}"
|
@ -1,5 +0,0 @@
|
||||
---
|
||||
- include: config.yml
|
||||
|
||||
- include: client.yml
|
||||
when: inventory_hostname in groups['masters'] + groups['nodes']
|
@ -1 +0,0 @@
|
||||
{ "Network": "{{ flannel_subnet }}/{{ flannel_prefix }}", "SubnetLen": {{ flannel_host_prefix }}, "Backend": { "Type": "vxlan" } }
|
@ -1,11 +0,0 @@
|
||||
# Flanneld configuration options
|
||||
|
||||
# etcd url location. Point this to the server where etcd runs
|
||||
FLANNEL_ETCD="{% for node in groups['etcd'] %}http://{{ node }}:2379{% if not loop.last %},{% endif %}{% endfor %}"
|
||||
|
||||
# etcd config key. This is the configuration key that flannel queries
|
||||
# For address range assignment
|
||||
FLANNEL_ETCD_KEY="/{{ cluster_name }}/network"
|
||||
|
||||
# Any additional options that you want to pass
|
||||
#FLANNEL_OPTIONS=""
|
@ -1,4 +0,0 @@
|
||||
kube_addons_dir: "{{ kube_config_dir }}/addons"
|
||||
|
||||
|
||||
local_temp_addon_dir: /tmp/kubernetes/addons
|
@ -1,501 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# The business logic for whether a given object should be created
|
||||
# was already enforced by salt, and /etc/kubernetes/addons is the
|
||||
# managed result is of that. Start everything below that directory.
|
||||
|
||||
# Parameters
|
||||
# $1 path to add-ons
|
||||
|
||||
|
||||
# LIMITATIONS
|
||||
# 1. controllers are not updated unless their name is changed
|
||||
# 3. Services will not be updated unless their name is changed,
|
||||
# but for services we actually want updates without name change.
|
||||
# 4. Json files are not handled at all. Currently addons must be
|
||||
# in yaml files
|
||||
# 5. exit code is probably not always correct (I haven't checked
|
||||
# carefully if it works in 100% cases)
|
||||
# 6. There are no unittests
|
||||
# 8. Will not work if the total length of paths to addons is greater than
|
||||
# bash can handle. Probably it is not a problem: ARG_MAX=2097152 on GCE.
|
||||
# 9. Performance issue: yaml files are read many times in a single execution.
|
||||
|
||||
# cosmetic improvements to be done
|
||||
# 1. improve the log function; add timestamp, file name, etc.
|
||||
# 2. logging doesn't work from files that print things out.
|
||||
# 3. kubectl prints the output to stderr (the output should be captured and then
|
||||
# logged)
|
||||
|
||||
|
||||
|
||||
# global config
|
||||
KUBECTL=${TEST_KUBECTL:-} # substitute for tests
|
||||
KUBECTL=${KUBECTL:-${KUBECTL_BIN:-}}
|
||||
KUBECTL=${KUBECTL:-/usr/local/bin/kubectl}
|
||||
if [[ ! -x ${KUBECTL} ]]; then
|
||||
echo "ERROR: kubectl command (${KUBECTL}) not found or is not executable" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# If an add-on definition is incorrect, or a definition has just disappeared
|
||||
# from the local directory, the script will still keep on retrying.
|
||||
# The script does not end until all retries are done, so
|
||||
# one invalid manifest may block updates of other add-ons.
|
||||
# Be careful how you set these parameters
|
||||
NUM_TRIES=1 # will be updated based on input parameters
|
||||
DELAY_AFTER_ERROR_SEC=${TEST_DELAY_AFTER_ERROR_SEC:=10}
|
||||
|
||||
|
||||
# remember that you can't log from functions that print some output (because
|
||||
# logs are also printed on stdout)
|
||||
# $1 level
|
||||
# $2 message
|
||||
function log() {
|
||||
# manage log levels manually here
|
||||
|
||||
# add the timestamp if you find it useful
|
||||
case $1 in
|
||||
DB3 )
|
||||
# echo "$1: $2"
|
||||
;;
|
||||
DB2 )
|
||||
# echo "$1: $2"
|
||||
;;
|
||||
DBG )
|
||||
# echo "$1: $2"
|
||||
;;
|
||||
INFO )
|
||||
echo "$1: $2"
|
||||
;;
|
||||
WRN )
|
||||
echo "$1: $2"
|
||||
;;
|
||||
ERR )
|
||||
echo "$1: $2"
|
||||
;;
|
||||
* )
|
||||
echo "INVALID_LOG_LEVEL $1: $2"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
#$1 yaml file path
|
||||
function get-object-kind-from-file() {
|
||||
# prints to stdout, so log cannot be used
|
||||
#WARNING: only yaml is supported
|
||||
cat $1 | python -c '''
|
||||
try:
|
||||
import pipes,sys,yaml
|
||||
y = yaml.load(sys.stdin)
|
||||
labels = y["metadata"]["labels"]
|
||||
if ("kubernetes.io/cluster-service", "true") not in labels.iteritems():
|
||||
# all add-ons must have the label "kubernetes.io/cluster-service".
|
||||
# Otherwise we are ignoring them (the update will not work anyway)
|
||||
print "ERROR"
|
||||
else:
|
||||
print y["kind"]
|
||||
except Exception, ex:
|
||||
print "ERROR"
|
||||
'''
|
||||
}
|
||||
|
||||
# $1 yaml file path
|
||||
# returns a string of the form <namespace>/<name> (we call it nsnames)
|
||||
function get-object-nsname-from-file() {
|
||||
# prints to stdout, so log cannot be used
|
||||
#WARNING: only yaml is supported
|
||||
#addons that do not specify a namespace are assumed to be in "default".
|
||||
cat $1 | python -c '''
|
||||
try:
|
||||
import pipes,sys,yaml
|
||||
y = yaml.load(sys.stdin)
|
||||
labels = y["metadata"]["labels"]
|
||||
if ("kubernetes.io/cluster-service", "true") not in labels.iteritems():
|
||||
# all add-ons must have the label "kubernetes.io/cluster-service".
|
||||
# Otherwise we are ignoring them (the update will not work anyway)
|
||||
print "ERROR"
|
||||
else:
|
||||
try:
|
||||
print "%s/%s" % (y["metadata"]["namespace"], y["metadata"]["name"])
|
||||
except Exception, ex:
|
||||
print "default/%s" % y["metadata"]["name"]
|
||||
except Exception, ex:
|
||||
print "ERROR"
|
||||
'''
|
||||
}
|
||||
|
||||
# $1 addon directory path
|
||||
# $2 addon type (e.g. ReplicationController)
|
||||
# echoes the string with paths to files containing addon for the given type
|
||||
# works only for yaml files (!) (ignores json files)
|
||||
function get-addon-paths-from-disk() {
|
||||
# prints to stdout, so log cannot be used
|
||||
local -r addon_dir=$1
|
||||
local -r obj_type=$2
|
||||
local kind
|
||||
local file_path
|
||||
for file_path in $(find ${addon_dir} -name \*.yaml); do
|
||||
kind=$(get-object-kind-from-file ${file_path})
|
||||
# WARNING: assumption that the topmost indentation is zero (I'm not sure yaml allows for topmost indentation)
|
||||
if [[ "${kind}" == "${obj_type}" ]]; then
|
||||
echo ${file_path}
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# waits for all subprocesses
|
||||
# returns 0 if all of them were successful and 1 otherwise
|
||||
function wait-for-jobs() {
|
||||
local rv=0
|
||||
local pid
|
||||
for pid in $(jobs -p); do
|
||||
wait ${pid} || (rv=1; log ERR "error in pid ${pid}")
|
||||
log DB2 "pid ${pid} completed, current error code: ${rv}"
|
||||
done
|
||||
return ${rv}
|
||||
}
|
||||
|
||||
|
||||
function run-until-success() {
|
||||
local -r command=$1
|
||||
local tries=$2
|
||||
local -r delay=$3
|
||||
local -r command_name=$1
|
||||
while [ ${tries} -gt 0 ]; do
|
||||
log DBG "executing: '$command'"
|
||||
# let's give the command as an argument to bash -c, so that we can use
|
||||
# && and || inside the command itself
|
||||
/bin/bash -c "${command}" && \
|
||||
log DB3 "== Successfully executed ${command_name} at $(date -Is) ==" && \
|
||||
return 0
|
||||
let tries=tries-1
|
||||
log INFO "== Failed to execute ${command_name} at $(date -Is). ${tries} tries remaining. =="
|
||||
sleep ${delay}
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
# $1 object type
|
||||
# returns a list of <namespace>/<name> pairs (nsnames)
|
||||
function get-addon-nsnames-from-server() {
|
||||
local -r obj_type=$1
|
||||
"${KUBECTL}" get "${obj_type}" --all-namespaces -o template -t "{{range.items}}{{.metadata.namespace}}/{{.metadata.name}} {{end}}" --api-version=v1 -l kubernetes.io/cluster-service=true
|
||||
}
|
||||
|
||||
# returns the characters after the last separator (including)
|
||||
# If the separator is empty or if it doesn't appear in the string,
|
||||
# an empty string is printed
|
||||
# $1 input string
|
||||
# $2 separator (must be single character, or empty)
|
||||
function get-suffix() {
|
||||
# prints to stdout, so log cannot be used
|
||||
local -r input_string=$1
|
||||
local -r separator=$2
|
||||
local suffix
|
||||
|
||||
if [[ "${separator}" == "" ]]; then
|
||||
echo ""
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ "${input_string}" == *"${separator}"* ]]; then
|
||||
suffix=$(echo "${input_string}" | rev | cut -d "${separator}" -f1 | rev)
|
||||
echo "${separator}${suffix}"
|
||||
else
|
||||
echo ""
|
||||
fi
|
||||
}
|
||||
|
||||
# returns the characters up to the last '-' (without it)
|
||||
# $1 input string
|
||||
# $2 separator
|
||||
function get-basename() {
|
||||
# prints to stdout, so log cannot be used
|
||||
local -r input_string=$1
|
||||
local -r separator=$2
|
||||
local suffix
|
||||
suffix="$(get-suffix ${input_string} ${separator})"
|
||||
# this will strip the suffix (if matches)
|
||||
echo ${input_string%$suffix}
|
||||
}
|
||||
|
||||
function stop-object() {
|
||||
local -r obj_type=$1
|
||||
local -r namespace=$2
|
||||
local -r obj_name=$3
|
||||
log INFO "Stopping ${obj_type} ${namespace}/${obj_name}"
|
||||
|
||||
run-until-success "${KUBECTL} stop --namespace=${namespace} ${obj_type} ${obj_name}" ${NUM_TRIES} ${DELAY_AFTER_ERROR_SEC}
|
||||
}
|
||||
|
||||
function create-object() {
|
||||
local -r obj_type=$1
|
||||
local -r file_path=$2
|
||||
|
||||
local nsname_from_file
|
||||
nsname_from_file=$(get-object-nsname-from-file ${file_path})
|
||||
if [[ "${nsname_from_file}" == "ERROR" ]]; then
|
||||
log INFO "Cannot read object name from ${file_path}. Ignoring"
|
||||
return 1
|
||||
fi
|
||||
IFS='/' read namespace obj_name <<< "${nsname_from_file}"
|
||||
|
||||
log INFO "Creating new ${obj_type} from file ${file_path} in namespace ${namespace}, name: ${obj_name}"
|
||||
# this will keep on failing if the ${file_path} disappeared in the meantime.
|
||||
# Do not use too many retries.
|
||||
run-until-success "${KUBECTL} create --namespace=${namespace} -f ${file_path}" ${NUM_TRIES} ${DELAY_AFTER_ERROR_SEC}
|
||||
}
|
||||
|
||||
function update-object() {
|
||||
local -r obj_type=$1
|
||||
local -r namespace=$2
|
||||
local -r obj_name=$3
|
||||
local -r file_path=$4
|
||||
log INFO "updating the ${obj_type} ${namespace}/${obj_name} with the new definition ${file_path}"
|
||||
stop-object ${obj_type} ${namespace} ${obj_name}
|
||||
create-object ${obj_type} ${file_path}
|
||||
}
|
||||
|
||||
# deletes the objects from the server
|
||||
# $1 object type
|
||||
# $2 a list of object nsnames
|
||||
function stop-objects() {
|
||||
local -r obj_type=$1
|
||||
local -r obj_nsnames=$2
|
||||
local namespace
|
||||
local obj_name
|
||||
for nsname in ${obj_nsnames}; do
|
||||
IFS='/' read namespace obj_name <<< "${nsname}"
|
||||
stop-object ${obj_type} ${namespace} ${obj_name} &
|
||||
done
|
||||
}
|
||||
|
||||
# creates objects from the given files
|
||||
# $1 object type
|
||||
# $2 a list of paths to definition files
|
||||
function create-objects() {
|
||||
local -r obj_type=$1
|
||||
local -r file_paths=$2
|
||||
local file_path
|
||||
for file_path in ${file_paths}; do
|
||||
# Remember that the file may have disappear by now
|
||||
# But we don't want to check it here because
|
||||
# such race condition may always happen after
|
||||
# we check it. Let's have the race
|
||||
# condition happen a bit more often so that
|
||||
# we see that our tests pass anyway.
|
||||
create-object ${obj_type} ${file_path} &
|
||||
done
|
||||
}
|
||||
|
||||
# updates objects
|
||||
# $1 object type
|
||||
# $2 a list of update specifications
|
||||
# each update specification is a ';' separated pair: <nsname>;<file path>
|
||||
function update-objects() {
|
||||
local -r obj_type=$1 # ignored
|
||||
local -r update_spec=$2
|
||||
local objdesc
|
||||
local nsname
|
||||
local obj_name
|
||||
local namespace
|
||||
|
||||
for objdesc in ${update_spec}; do
|
||||
IFS=';' read nsname file_path <<< "${objdesc}"
|
||||
IFS='/' read namespace obj_name <<< "${nsname}"
|
||||
|
||||
update-object ${obj_type} ${namespace} ${obj_name} ${file_path} &
|
||||
done
|
||||
}
|
||||
|
||||
# Global variables set by function match-objects.
|
||||
nsnames_for_delete="" # a list of object nsnames to be deleted
|
||||
for_update="" # a list of pairs <nsname>;<filePath> for objects that should be updated
|
||||
nsnames_for_ignore="" # a list of object nsnames that will be ignored
|
||||
new_files="" # a list of file paths that weren't matched by any existing objects (these objects must be created now)
|
||||
|
||||
|
||||
# $1 path to files with objects
|
||||
# $2 object type in the API (ReplicationController or Service)
|
||||
# $3 name separator (single character or empty)
|
||||
function match-objects() {
|
||||
local -r addon_dir=$1
|
||||
local -r obj_type=$2
|
||||
local -r separator=$3
|
||||
|
||||
# output variables (globals)
|
||||
nsnames_for_delete=""
|
||||
for_update=""
|
||||
nsnames_for_ignore=""
|
||||
new_files=""
|
||||
|
||||
addon_nsnames_on_server=$(get-addon-nsnames-from-server "${obj_type}")
|
||||
addon_paths_in_files=$(get-addon-paths-from-disk "${addon_dir}" "${obj_type}")
|
||||
|
||||
log DB2 "addon_nsnames_on_server=${addon_nsnames_on_server}"
|
||||
log DB2 "addon_paths_in_files=${addon_paths_in_files}"
|
||||
|
||||
local matched_files=""
|
||||
|
||||
local basensname_on_server=""
|
||||
local nsname_on_server=""
|
||||
local suffix_on_server=""
|
||||
local nsname_from_file=""
|
||||
local suffix_from_file=""
|
||||
local found=0
|
||||
local addon_path=""
|
||||
|
||||
# objects that were moved between namespaces will have different nsname
|
||||
# because the namespace is included. So they will be treated
|
||||
# like different objects and not updated but deleted and created again
|
||||
# (in the current version update is also delete+create, so it does not matter)
|
||||
for nsname_on_server in ${addon_nsnames_on_server}; do
|
||||
basensname_on_server=$(get-basename ${nsname_on_server} ${separator})
|
||||
suffix_on_server="$(get-suffix ${nsname_on_server} ${separator})"
|
||||
|
||||
log DB3 "Found existing addon ${nsname_on_server}, basename=${basensname_on_server}"
|
||||
|
||||
# check if the addon is present in the directory and decide
|
||||
# what to do with it
|
||||
# this is not optimal because we're reading the files over and over
|
||||
# again. But for small number of addons it doesn't matter so much.
|
||||
found=0
|
||||
for addon_path in ${addon_paths_in_files}; do
|
||||
nsname_from_file=$(get-object-nsname-from-file ${addon_path})
|
||||
if [[ "${nsname_from_file}" == "ERROR" ]]; then
|
||||
log INFO "Cannot read object name from ${addon_path}. Ignoring"
|
||||
continue
|
||||
else
|
||||
log DB2 "Found object name '${nsname_from_file}' in file ${addon_path}"
|
||||
fi
|
||||
suffix_from_file="$(get-suffix ${nsname_from_file} ${separator})"
|
||||
|
||||
log DB3 "matching: ${basensname_on_server}${suffix_from_file} == ${nsname_from_file}"
|
||||
if [[ "${basensname_on_server}${suffix_from_file}" == "${nsname_from_file}" ]]; then
|
||||
log DB3 "matched existing ${obj_type} ${nsname_on_server} to file ${addon_path}; suffix_on_server=${suffix_on_server}, suffix_from_file=${suffix_from_file}"
|
||||
found=1
|
||||
matched_files="${matched_files} ${addon_path}"
|
||||
if [[ "${suffix_on_server}" == "${suffix_from_file}" ]]; then
|
||||
nsnames_for_ignore="${nsnames_for_ignore} ${nsname_from_file}"
|
||||
else
|
||||
for_update="${for_update} ${nsname_on_server};${addon_path}"
|
||||
fi
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ ${found} -eq 0 ]]; then
|
||||
log DB2 "No definition file found for replication controller ${nsname_on_server}. Scheduling for deletion"
|
||||
nsnames_for_delete="${nsnames_for_delete} ${nsname_on_server}"
|
||||
fi
|
||||
done
|
||||
|
||||
log DB3 "matched_files=${matched_files}"
|
||||
|
||||
|
||||
# note that if the addon file is invalid (or got removed after listing files
|
||||
# but before we managed to match it) it will not be matched to any
|
||||
# of the existing objects. So we will treat it as a new file
|
||||
# and try to create its object.
|
||||
for addon_path in ${addon_paths_in_files}; do
|
||||
echo ${matched_files} | grep "${addon_path}" >/dev/null
|
||||
if [[ $? -ne 0 ]]; then
|
||||
new_files="${new_files} ${addon_path}"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
|
||||
function reconcile-objects() {
|
||||
local -r addon_path=$1
|
||||
local -r obj_type=$2
|
||||
local -r separator=$3 # name separator
|
||||
match-objects ${addon_path} ${obj_type} ${separator}
|
||||
|
||||
log DBG "${obj_type}: nsnames_for_delete=${nsnames_for_delete}"
|
||||
log DBG "${obj_type}: for_update=${for_update}"
|
||||
log DBG "${obj_type}: nsnames_for_ignore=${nsnames_for_ignore}"
|
||||
log DBG "${obj_type}: new_files=${new_files}"
|
||||
|
||||
stop-objects "${obj_type}" "${nsnames_for_delete}"
|
||||
# wait for jobs below is a protection against changing the basename
|
||||
# of a replication controllerm without changing the selector.
|
||||
# If we don't wait, the new rc may be created before the old one is deleted
|
||||
# In such case the old one will wait for all its pods to be gone, but the pods
|
||||
# are created by the new replication controller.
|
||||
# passing --cascade=false could solve the problem, but we want
|
||||
# all orphan pods to be deleted.
|
||||
wait-for-jobs
|
||||
stopResult=$?
|
||||
|
||||
create-objects "${obj_type}" "${new_files}"
|
||||
update-objects "${obj_type}" "${for_update}"
|
||||
|
||||
local nsname
|
||||
for nsname in ${nsnames_for_ignore}; do
|
||||
log DB2 "The ${obj_type} ${nsname} is already up to date"
|
||||
done
|
||||
|
||||
wait-for-jobs
|
||||
createUpdateResult=$?
|
||||
|
||||
if [[ ${stopResult} -eq 0 ]] && [[ ${createUpdateResult} -eq 0 ]]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function update-addons() {
|
||||
local -r addon_path=$1
|
||||
# be careful, reconcile-objects uses global variables
|
||||
reconcile-objects ${addon_path} ReplicationController "-" &
|
||||
|
||||
# We don't expect service names to be versioned, so
|
||||
# we match entire name, ignoring version suffix.
|
||||
# That's why we pass an empty string as the version separator.
|
||||
# If the service description differs on disk, the service should be recreated.
|
||||
# This is not implemented in this version.
|
||||
reconcile-objects ${addon_path} Service "" &
|
||||
|
||||
wait-for-jobs
|
||||
if [[ $? -eq 0 ]]; then
|
||||
log INFO "== Kubernetes addon update completed successfully at $(date -Is) =="
|
||||
else
|
||||
log WRN "== Kubernetes addon update completed with errors at $(date -Is) =="
|
||||
fi
|
||||
}
|
||||
|
||||
# input parameters:
|
||||
# $1 input directory
|
||||
# $2 retry period in seconds - the script will retry api-server errors for approximately
|
||||
# this amound of time (it is not very precise), at interval equal $DELAY_AFTER_ERROR_SEC.
|
||||
#
|
||||
|
||||
if [[ $# -ne 2 ]]; then
|
||||
echo "Illegal number of parameters. Usage $0 addon-dir [retry-period]" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NUM_TRIES=$(($2 / ${DELAY_AFTER_ERROR_SEC}))
|
||||
if [[ ${NUM_TRIES} -le 0 ]]; then
|
||||
NUM_TRIES=1
|
||||
fi
|
||||
|
||||
addon_path=$1
|
||||
update-addons ${addon_path}
|
@ -1,202 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# The business logic for whether a given object should be created
|
||||
# was already enforced by salt, and /etc/kubernetes/addons is the
|
||||
# managed result is of that. Start everything below that directory.
|
||||
KUBECTL=${KUBECTL_BIN:-/usr/local/bin/kubectl}
|
||||
|
||||
ADDON_CHECK_INTERVAL_SEC=${TEST_ADDON_CHECK_INTERVAL_SEC:-600}
|
||||
|
||||
SYSTEM_NAMESPACE=kube-system
|
||||
|
||||
token_dir=${TOKEN_DIR:-/srv/kubernetes}
|
||||
|
||||
function create-kubeconfig-secret() {
|
||||
local -r token=$1
|
||||
local -r username=$2
|
||||
local -r server=$3
|
||||
local -r safe_username=$(tr -s ':_' '--' <<< "${username}")
|
||||
|
||||
# Make a kubeconfig file with the token.
|
||||
if [[ ! -z "${CA_CERT:-}" ]]; then
|
||||
# If the CA cert is available, put it into the secret rather than using
|
||||
# insecure-skip-tls-verify.
|
||||
read -r -d '' kubeconfig <<EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: ${username}
|
||||
user:
|
||||
token: ${token}
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
server: ${server}
|
||||
certificate-authority-data: ${CA_CERT}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: ${username}
|
||||
namespace: ${SYSTEM_NAMESPACE}
|
||||
name: service-account-context
|
||||
current-context: service-account-context
|
||||
EOF
|
||||
else
|
||||
read -r -d '' kubeconfig <<EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: ${username}
|
||||
user:
|
||||
token: ${token}
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
server: ${server}
|
||||
insecure-skip-tls-verify: true
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: ${username}
|
||||
namespace: ${SYSTEM_NAMESPACE}
|
||||
name: service-account-context
|
||||
current-context: service-account-context
|
||||
EOF
|
||||
fi
|
||||
|
||||
local -r kubeconfig_base64=$(echo "${kubeconfig}" | base64 -w0)
|
||||
read -r -d '' secretyaml <<EOF
|
||||
apiVersion: v1
|
||||
data:
|
||||
kubeconfig: ${kubeconfig_base64}
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: token-${safe_username}
|
||||
type: Opaque
|
||||
EOF
|
||||
create-resource-from-string "${secretyaml}" 100 10 "Secret-for-token-for-user-${username}" "${SYSTEM_NAMESPACE}" &
|
||||
}
|
||||
|
||||
# $1 filename of addon to start.
|
||||
# $2 count of tries to start the addon.
|
||||
# $3 delay in seconds between two consecutive tries
|
||||
# $4 namespace
|
||||
function start_addon() {
|
||||
local -r addon_filename=$1;
|
||||
local -r tries=$2;
|
||||
local -r delay=$3;
|
||||
local -r namespace=$4
|
||||
|
||||
create-resource-from-string "$(cat ${addon_filename})" "${tries}" "${delay}" "${addon_filename}" "${namespace}"
|
||||
}
|
||||
|
||||
# $1 string with json or yaml.
|
||||
# $2 count of tries to start the addon.
|
||||
# $3 delay in seconds between two consecutive tries
|
||||
# $4 name of this object to use when logging about it.
|
||||
# $5 namespace for this object
|
||||
function create-resource-from-string() {
|
||||
local -r config_string=$1;
|
||||
local tries=$2;
|
||||
local -r delay=$3;
|
||||
local -r config_name=$4;
|
||||
local -r namespace=$5;
|
||||
while [ ${tries} -gt 0 ]; do
|
||||
echo "${config_string}" | ${KUBECTL} --namespace="${namespace}" create -f - && \
|
||||
echo "== Successfully started ${config_name} in namespace ${namespace} at $(date -Is)" && \
|
||||
return 0;
|
||||
let tries=tries-1;
|
||||
echo "== Failed to start ${config_name} in namespace ${namespace} at $(date -Is). ${tries} tries remaining. =="
|
||||
sleep ${delay};
|
||||
done
|
||||
return 1;
|
||||
}
|
||||
|
||||
# The business logic for whether a given object should be created
|
||||
# was already enforced by salt, and /etc/kubernetes/addons is the
|
||||
# managed result is of that. Start everything below that directory.
|
||||
echo "== Kubernetes addon manager started at $(date -Is) with ADDON_CHECK_INTERVAL_SEC=${ADDON_CHECK_INTERVAL_SEC} =="
|
||||
|
||||
# Load the kube-env, which has all the environment variables we care
|
||||
# about, in a flat yaml format.
|
||||
kube_env_yaml="/var/cache/kubernetes-install/kube_env.yaml"
|
||||
if [ ! -e "${kubelet_kubeconfig_file}" ]; then
|
||||
eval $(python -c '''
|
||||
import pipes,sys,yaml
|
||||
|
||||
for k,v in yaml.load(sys.stdin).iteritems():
|
||||
print "readonly {var}={value}".format(var = k, value = pipes.quote(str(v)))
|
||||
''' < "${kube_env_yaml}")
|
||||
fi
|
||||
|
||||
# Create the namespace that will be used to host the cluster-level add-ons.
|
||||
start_addon /etc/kubernetes/addons/namespace.yaml 100 10 "" &
|
||||
|
||||
# Wait for the default service account to be created in the kube-system namespace.
|
||||
token_found=""
|
||||
while [ -z "${token_found}" ]; do
|
||||
sleep .5
|
||||
token_found=$(${KUBECTL} get --namespace="${SYSTEM_NAMESPACE}" serviceaccount default -o template -t "{{with index .secrets 0}}{{.name}}{{end}}" || true)
|
||||
done
|
||||
|
||||
echo "== default service account in the ${SYSTEM_NAMESPACE} namespace has token ${token_found} =="
|
||||
|
||||
# Generate secrets for "internal service accounts".
|
||||
# TODO(etune): move to a completely yaml/object based
|
||||
# workflow so that service accounts can be created
|
||||
# at the same time as the services that use them.
|
||||
# NOTE: needs to run as root to read this file.
|
||||
# Read each line in the csv file of tokens.
|
||||
# Expect errors when the script is started again.
|
||||
# NOTE: secrets are created asynchronously, in background.
|
||||
while read line; do
|
||||
# Split each line into the token and username.
|
||||
IFS=',' read -a parts <<< "${line}"
|
||||
token=${parts[0]}
|
||||
username=${parts[1]}
|
||||
# DNS is special, since it's necessary for cluster bootstrapping.
|
||||
if [[ "${username}" == "system:dns" ]] && [[ ! -z "${KUBERNETES_MASTER_NAME:-}" ]]; then
|
||||
create-kubeconfig-secret "${token}" "${username}" "https://${KUBERNETES_MASTER_NAME}"
|
||||
else
|
||||
# Set the server to https://kubernetes. Pods/components that
|
||||
# do not have DNS available will have to override the server.
|
||||
create-kubeconfig-secret "${token}" "${username}" "https://kubernetes.default"
|
||||
fi
|
||||
done < "${token_dir}/known_tokens.csv"
|
||||
|
||||
# Create admission_control objects if defined before any other addon services. If the limits
|
||||
# are defined in a namespace other than default, we should still create the limits for the
|
||||
# default namespace.
|
||||
for obj in $(find /etc/kubernetes/admission-controls \( -name \*.yaml -o -name \*.json \)); do
|
||||
start_addon ${obj} 100 10 default &
|
||||
echo "++ obj ${obj} is created ++"
|
||||
done
|
||||
|
||||
# Check if the configuration has changed recently - in case the user
|
||||
# created/updated/deleted the files on the master.
|
||||
while true; do
|
||||
start_sec=$(date +"%s")
|
||||
#kube-addon-update.sh must be deployed in the same directory as this file
|
||||
`dirname $0`/kube-addon-update.sh /etc/kubernetes/addons ${ADDON_CHECK_INTERVAL_SEC}
|
||||
end_sec=$(date +"%s")
|
||||
len_sec=$((${end_sec}-${start_sec}))
|
||||
# subtract the time passed from the sleep time
|
||||
if [[ ${len_sec} -lt ${ADDON_CHECK_INTERVAL_SEC} ]]; then
|
||||
sleep_time=$((${ADDON_CHECK_INTERVAL_SEC}-${len_sec}))
|
||||
sleep ${sleep_time}
|
||||
fi
|
||||
done
|
@ -1,8 +0,0 @@
|
||||
---
|
||||
- name: reload and restart kube-addons
|
||||
command: systemctl --system daemon-reload
|
||||
notify:
|
||||
- restart kube-addons
|
||||
|
||||
- name: restart kube-addons
|
||||
service: name=kube-addons state=restarted
|
@ -1,3 +0,0 @@
|
||||
---
|
||||
dependencies:
|
||||
- { role: master }
|
@ -1,14 +0,0 @@
|
||||
---
|
||||
- name: LOGGING | Assures {{ kube_addons_dir }}/cluster-logging dir exists
|
||||
file: path={{ kube_addons_dir }}/cluster-logging state=directory
|
||||
|
||||
- name: LOGGING | Download logging files from Kubernetes repo
|
||||
get_url:
|
||||
url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/addons/fluentd-elasticsearch/{{ item }}
|
||||
dest="{{ kube_addons_dir }}/cluster-logging/"
|
||||
force=yes
|
||||
with_items:
|
||||
- es-controller.yaml
|
||||
- es-service.yaml
|
||||
- kibana-controller.yaml
|
||||
- kibana-service.yaml
|
@ -1,15 +0,0 @@
|
||||
---
|
||||
- name: MONITORING | Assures {{ kube_config_dir }}/addons/cluster-monitoring dir exists
|
||||
file: path={{ kube_addons_dir }}/cluster-monitoring state=directory
|
||||
|
||||
- name: MONITORING | Download monitoring files from Kubernetes repo
|
||||
get_url:
|
||||
url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/addons/cluster-monitoring/influxdb/{{ item }}
|
||||
dest="{{ kube_addons_dir }}/cluster-monitoring/"
|
||||
force=yes
|
||||
with_items:
|
||||
- grafana-service.yaml
|
||||
- heapster-controller.yaml
|
||||
- heapster-service.yaml
|
||||
- influxdb-grafana-controller.yaml
|
||||
- influxdb-service.yaml
|
@ -1,55 +0,0 @@
|
||||
---
|
||||
- name: DNS | Assures {{ kube_addons_dir }}/dns dir exists
|
||||
file: path={{ kube_addons_dir }}/dns state=directory
|
||||
|
||||
- name: DNS | Assures local dns addon dir exists
|
||||
local_action: file
|
||||
path={{ local_temp_addon_dir }}/dns
|
||||
state=directory
|
||||
sudo: no
|
||||
|
||||
- name: DNS | Download skydns-rc.yaml file from Kubernetes repo
|
||||
local_action: get_url
|
||||
url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/addons/dns/skydns-rc.yaml.in
|
||||
dest="{{ local_temp_addon_dir }}/dns/skydns-rc.yaml.j2"
|
||||
force=yes
|
||||
sudo: no
|
||||
|
||||
- name: DNS | Convert pillar vars to ansible vars for skydns-rc.yaml
|
||||
local_action: replace
|
||||
dest="{{ local_temp_addon_dir }}/dns/skydns-rc.yaml.j2"
|
||||
regexp="pillar\[\'(\w*)\'\]"
|
||||
replace="\1"
|
||||
sudo: no
|
||||
|
||||
- name: DNS | Install Template from converted saltfile
|
||||
template:
|
||||
args:
|
||||
src: "{{ local_temp_addon_dir }}/dns/skydns-rc.yaml.j2"
|
||||
dest: "{{ kube_addons_dir }}/dns/skydns-rc.yaml"
|
||||
mode: 0755
|
||||
owner: root
|
||||
group: root
|
||||
|
||||
- name: DNS | Download skydns-svc.yaml file from Kubernetes repo
|
||||
local_action: get_url
|
||||
url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/addons/dns/skydns-svc.yaml.in
|
||||
dest="{{ local_temp_addon_dir }}/dns/skydns-svc.yaml.j2"
|
||||
force=yes
|
||||
sudo: no
|
||||
|
||||
- name: DNS | Convert pillar vars to ansible vars for skydns-rc.yaml
|
||||
local_action: replace
|
||||
dest="{{ local_temp_addon_dir }}/dns/skydns-svc.yaml.j2"
|
||||
regexp="pillar\[\'(\w*)\'\]"
|
||||
replace="\1"
|
||||
sudo: no
|
||||
|
||||
- name: DNS | Install Template from converted saltfile
|
||||
template:
|
||||
args:
|
||||
src: "{{ local_temp_addon_dir }}/dns/skydns-svc.yaml.j2"
|
||||
dest: "{{ kube_addons_dir }}/dns/skydns-svc.yaml"
|
||||
mode: 0755
|
||||
owner: root
|
||||
group: root
|
@ -1,14 +0,0 @@
|
||||
- name: Set pyyaml package name
|
||||
set_fact:
|
||||
pyyaml_name: python-yaml
|
||||
|
||||
- name: Overwrite pyyaml package name for non-Debian
|
||||
set_fact:
|
||||
pyyaml_name: PyYAML
|
||||
when: ansible_distribution != "Debian"
|
||||
|
||||
- name: Install PyYAML for non-debian
|
||||
action: "{{ ansible_pkg_mgr }}"
|
||||
args:
|
||||
name: "{{ pyyaml_name }}"
|
||||
state: latest
|
@ -1,12 +0,0 @@
|
||||
---
|
||||
- name: KUBE-UI | Assures {{ kube_addons_dir }}/kube-ui dir exists
|
||||
file: path={{ kube_addons_dir }}/kube-ui state=directory
|
||||
|
||||
- name: KUBE-UI | Download kube-ui files from Kubernetes repo
|
||||
get_url:
|
||||
url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/addons/kube-ui/{{ item }}
|
||||
dest="{{ kube_addons_dir }}/kube-ui/"
|
||||
force=yes
|
||||
with_items:
|
||||
- kube-ui-rc.yaml
|
||||
- kube-ui-svc.yaml
|
@ -1,70 +0,0 @@
|
||||
---
|
||||
- name: Assures addons dir exists
|
||||
file: path={{ kube_addons_dir }} state=directory
|
||||
|
||||
- include: generic-install.yml
|
||||
when: not is_atomic
|
||||
|
||||
- name: Assures local addon dir exists
|
||||
local_action: file
|
||||
path={{ local_temp_addon_dir }}
|
||||
state=directory
|
||||
sudo: no
|
||||
|
||||
- name: Make sure the system services namespace exists
|
||||
get_url:
|
||||
url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/kube-addons/namespace.yaml
|
||||
dest="{{ kube_config_dir }}/addons/"
|
||||
force=yes
|
||||
|
||||
- include: dns.yml
|
||||
when: dns_setup
|
||||
|
||||
- include: cluster-monitoring.yml
|
||||
when: cluster_monitoring
|
||||
|
||||
- include: cluster-logging.yml
|
||||
when: cluster_logging
|
||||
|
||||
- include: kube-ui.yml
|
||||
when: kube-ui
|
||||
|
||||
#- name: Get kube-addons script from Kubernetes
|
||||
# get_url:
|
||||
# url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/kube-addons/kube-addons.sh
|
||||
# dest={{ kube_script_dir }}/kube-addons.sh mode=0755
|
||||
# force=yes
|
||||
|
||||
#- name: Get kube-addon-update script from Kubernetes
|
||||
# get_url:
|
||||
# url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/kube-addons/kube-addon-update.sh
|
||||
# dest={{ kube_script_dir }}/kube-addon-update.sh mode=0755
|
||||
# force=yes
|
||||
|
||||
- name: HACK | copy local kube-addons.sh
|
||||
copy: src=kube-addons.sh dest={{ kube_script_dir }}/kube-addons.sh mode=0755
|
||||
|
||||
- name: HACK | copy local kube-addon-update.sh
|
||||
copy: src=kube-addon-update.sh dest={{ kube_script_dir }}/kube-addon-update.sh mode=0755
|
||||
|
||||
- name: Run kube-gen-token script to create {{ kube_token_dir }}/known_tokens.csv
|
||||
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item }}"
|
||||
environment:
|
||||
TOKEN_DIR: "{{ kube_token_dir }}"
|
||||
with_items:
|
||||
- "system:dns"
|
||||
- "system:monitoring"
|
||||
- "system:logging"
|
||||
register: gentoken
|
||||
changed_when: "'Added' in gentoken.stdout"
|
||||
notify:
|
||||
- restart apiserver
|
||||
- restart kube-addons
|
||||
|
||||
- name: Install kube-addons service
|
||||
template: src=kube-addons.service.j2 dest=/etc/systemd/system/kube-addons.service
|
||||
notify:
|
||||
- reload and restart kube-addons
|
||||
|
||||
- name: Enable and start kube addons
|
||||
service: name=kube-addons.service enabled=yes state=started
|
@ -1,12 +0,0 @@
|
||||
[Unit]
|
||||
Description=Kubernetes Addon Object Manager
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
|
||||
[Service]
|
||||
Environment="TOKEN_DIR={{ kube_token_dir }}"
|
||||
Environment="KUBECTL_BIN=/usr/bin/kubectl"
|
||||
Environment="KUBERNETES_MASTER_NAME={{ groups['masters'][0] }}"
|
||||
ExecStart={{ kube_script_dir }}/kube-addons.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -1,42 +0,0 @@
|
||||
# The port that the Kubernetes apiserver component listens on.
|
||||
kube_master_api_port: 443
|
||||
|
||||
# This directory is where all the additional scripts go
|
||||
# that Kubernetes normally puts in /srv/kubernetes.
|
||||
# This puts them in a sane location
|
||||
kube_script_dir: /usr/libexec/kubernetes
|
||||
|
||||
# This directory is where all the additional config stuff goes
|
||||
# the kubernetes normally puts in /srv/kubernets.
|
||||
# This puts them in a sane location.
|
||||
# Editting this value will almost surely break something. Don't
|
||||
# change it. Things like the systemd scripts are hard coded to
|
||||
# look in here. Don't do it.
|
||||
kube_config_dir: /etc/kubernetes
|
||||
|
||||
# This is where all the cert scripts and certs will be located
|
||||
kube_cert_dir: "{{ kube_config_dir }}/certs"
|
||||
|
||||
# This is where all of the bearer tokens will be stored
|
||||
kube_token_dir: "{{ kube_config_dir }}/tokens"
|
||||
|
||||
# This is where you can drop yaml/json files and the kubelet will run those
|
||||
# pods on startup
|
||||
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
||||
|
||||
# This is the group that the cert creation scripts chgrp the
|
||||
# cert files to. Not really changeable...
|
||||
kube_cert_group: kube-cert
|
||||
|
||||
# Internal DNS domain name.
|
||||
# This domain must not be used in your network. Services will be discoverable
|
||||
# under <service-name>.<namespace>.<domainname>, e.g.
|
||||
# myservice.default.svc.cluster.local
|
||||
dns_domain: "{{ cluster_name }}"
|
||||
|
||||
# IP address of the DNS server.
|
||||
# Kubernetes will create a pod with several containers, serving as the DNS
|
||||
# server and expose it under this IP address. The IP address must be from
|
||||
# the range specified as kube_service_addresses. This magic will actually
|
||||
# pick the 10th ip address in the kube_service_addresses range and use that.
|
||||
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(10)|ipaddr('address') }}"
|
@ -1,31 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
token_dir=${TOKEN_DIR:-/var/srv/kubernetes}
|
||||
token_file="${token_dir}/known_tokens.csv"
|
||||
|
||||
create_accounts=($@)
|
||||
|
||||
touch "${token_file}"
|
||||
for account in "${create_accounts[@]}"; do
|
||||
if grep ",${account}," "${token_file}" ; then
|
||||
continue
|
||||
fi
|
||||
token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
echo "${token},${account},${account}" >> "${token_file}"
|
||||
echo "${token}" > "${token_dir}/${account}.token"
|
||||
echo "Added ${account}"
|
||||
done
|
@ -1,115 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# Caller should set in the ev:
|
||||
# MASTER_IP - this may be an ip or things like "_use_gce_external_ip_"
|
||||
# MASTER_NAME - DNS name for the master
|
||||
# DNS_DOMAIN - which will be passed to minions in --cluster-domain
|
||||
# SERVICE_CLUSTER_IP_RANGE - where all service IPs are allocated
|
||||
|
||||
# Also the following will be respected
|
||||
# CERT_DIR - where to place the finished certs
|
||||
# CERT_GROUP - who the group owner of the cert files should be
|
||||
|
||||
cert_ip="${MASTER_IP:="${1}"}"
|
||||
master_name="${MASTER_NAME:="kubernetes"}"
|
||||
service_range="${SERVICE_CLUSTER_IP_RANGE:="10.0.0.0/16"}"
|
||||
dns_domain="${DNS_DOMAIN:="cluster.local"}"
|
||||
cert_dir="${CERT_DIR:-"/srv/kubernetes"}"
|
||||
cert_group="${CERT_GROUP:="kube-cert"}"
|
||||
|
||||
# The following certificate pairs are created:
|
||||
#
|
||||
# - ca (the cluster's certificate authority)
|
||||
# - server
|
||||
# - kubelet
|
||||
# - kubecfg (for kubectl)
|
||||
#
|
||||
# TODO(roberthbailey): Replace easyrsa with a simple Go program to generate
|
||||
# the certs that we need.
|
||||
|
||||
# TODO: Add support for discovery on other providers?
|
||||
if [ "$cert_ip" == "_use_gce_external_ip_" ]; then
|
||||
cert_ip=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip)
|
||||
fi
|
||||
|
||||
if [ "$cert_ip" == "_use_aws_external_ip_" ]; then
|
||||
cert_ip=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4)
|
||||
fi
|
||||
|
||||
if [ "$cert_ip" == "_use_azure_dns_name_" ]; then
|
||||
cert_ip=$(uname -n | awk -F. '{ print $2 }').cloudapp.net
|
||||
fi
|
||||
|
||||
tmpdir=$(mktemp -d --tmpdir kubernetes_cacert.XXXXXX)
|
||||
trap 'rm -rf "${tmpdir}"' EXIT
|
||||
cd "${tmpdir}"
|
||||
|
||||
# TODO: For now, this is a patched tool that makes subject-alt-name work, when
|
||||
# the fix is upstream move back to the upstream easyrsa. This is cached in GCS
|
||||
# but is originally taken from:
|
||||
# https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
|
||||
#
|
||||
# To update, do the following:
|
||||
# curl -o easy-rsa.tar.gz https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
|
||||
# gsutil cp easy-rsa.tar.gz gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
|
||||
# gsutil acl ch -R -g all:R gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
|
||||
#
|
||||
# Due to GCS caching of public objects, it may take time for this to be widely
|
||||
# distributed.
|
||||
|
||||
# Calculate the first ip address in the service range
|
||||
octets=($(echo "${service_range}" | sed -e 's|/.*||' -e 's/\./ /g'))
|
||||
((octets[3]+=1))
|
||||
service_ip=$(echo "${octets[*]}" | sed 's/ /./g')
|
||||
|
||||
# Determine appropriete subject alt names
|
||||
sans="IP:${cert_ip},IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${dns_domain},DNS:${master_name}"
|
||||
|
||||
curl -L -O https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz > /dev/null 2>&1
|
||||
tar xzf easy-rsa.tar.gz > /dev/null
|
||||
cd easy-rsa-master/easyrsa3
|
||||
|
||||
(./easyrsa init-pki > /dev/null 2>&1
|
||||
./easyrsa --batch "--req-cn=${cert_ip}@$(date +%s)" build-ca nopass > /dev/null 2>&1
|
||||
./easyrsa --subject-alt-name="${sans}" build-server-full "${master_name}" nopass > /dev/null 2>&1
|
||||
./easyrsa build-client-full kubelet nopass > /dev/null 2>&1
|
||||
./easyrsa build-client-full kubecfg nopass > /dev/null 2>&1) || {
|
||||
# If there was an error in the subshell, just die.
|
||||
# TODO(roberthbailey): add better error handling here
|
||||
echo "=== Failed to generate certificates: Aborting ==="
|
||||
exit 2
|
||||
}
|
||||
|
||||
mkdir -p "$cert_dir"
|
||||
|
||||
cp -p pki/ca.crt "${cert_dir}/ca.crt"
|
||||
cp -p "pki/issued/${master_name}.crt" "${cert_dir}/server.crt" > /dev/null 2>&1
|
||||
cp -p "pki/private/${master_name}.key" "${cert_dir}/server.key" > /dev/null 2>&1
|
||||
cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt"
|
||||
cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key"
|
||||
cp -p pki/issued/kubelet.crt "${cert_dir}/kubelet.crt"
|
||||
cp -p pki/private/kubelet.key "${cert_dir}/kubelet.key"
|
||||
|
||||
CERTS=("ca.crt" "server.key" "server.crt" "kubelet.key" "kubelet.crt" "kubecfg.key" "kubecfg.crt")
|
||||
for cert in "${CERTS[@]}"; do
|
||||
chgrp "${cert_group}" "${cert_dir}/${cert}"
|
||||
chmod 660 "${cert_dir}/${cert}"
|
||||
done
|
@ -1,4 +0,0 @@
|
||||
---
|
||||
- name: Fedora | Remove docker window manager on F20
|
||||
yum: pkg=docker state=absent
|
||||
when: ansible_distribution_major_version == "20"
|
@ -1,50 +0,0 @@
|
||||
---
|
||||
- name: Install openssl for easy-rsa stuff
|
||||
action: "{{ ansible_pkg_mgr }}"
|
||||
args:
|
||||
name: "{{ item }}"
|
||||
state: latest
|
||||
with_items:
|
||||
- openssl
|
||||
- curl
|
||||
when: not is_atomic
|
||||
|
||||
#- name: Get create ca cert script from Kubernetes
|
||||
# get_url:
|
||||
# url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/generate-cert/make-ca-cert.sh
|
||||
# dest={{ kube_script_dir }}/make-ca-cert.sh mode=0500
|
||||
# force=yes
|
||||
|
||||
- name: HACK | overwrite make-ca-cert.sh from local copy
|
||||
copy:
|
||||
src=make-ca-cert.sh
|
||||
dest={{ kube_script_dir }}
|
||||
mode=0500
|
||||
changed_when: false
|
||||
|
||||
# FIXME This only generates a cert for one master...
|
||||
- name: Run create cert script on master
|
||||
command:
|
||||
"{{ kube_script_dir }}/make-ca-cert.sh"
|
||||
args:
|
||||
creates: "{{ kube_cert_dir }}/server.crt"
|
||||
environment:
|
||||
MASTER_IP: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
|
||||
MASTER_NAME: "{{ inventory_hostname }}"
|
||||
DNS_DOMAIN: "{{ dns_domain }}"
|
||||
SERVICE_CLUSTER_IP_RANGE: "{{ kube_service_addresses }}"
|
||||
CERT_DIR: "{{ kube_cert_dir }}"
|
||||
CERT_GROUP: "{{ kube_cert_group }}"
|
||||
|
||||
- name: Verify certificate permissions
|
||||
file:
|
||||
path={{ item }}
|
||||
group={{ kube_cert_group }}
|
||||
owner=kube
|
||||
mode=0440
|
||||
with_items:
|
||||
- "{{ kube_cert_dir }}/ca.crt"
|
||||
- "{{ kube_cert_dir }}/server.crt"
|
||||
- "{{ kube_cert_dir }}/server.key"
|
||||
- "{{ kube_cert_dir }}/kubecfg.crt"
|
||||
- "{{ kube_cert_dir }}/kubecfg.key"
|
@ -1,30 +0,0 @@
|
||||
---
|
||||
- name: Copy the token gen script
|
||||
copy:
|
||||
src=kube-gen-token.sh
|
||||
dest={{ kube_script_dir }}
|
||||
mode=u+x
|
||||
|
||||
- name: Generate tokens for master components
|
||||
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
|
||||
environment:
|
||||
TOKEN_DIR: "{{ kube_token_dir }}"
|
||||
with_nested:
|
||||
- [ "system:controller_manager", "system:scheduler", "system:kubectl" ]
|
||||
- "{{ groups['masters'] }}"
|
||||
register: gentoken
|
||||
changed_when: "'Added' in gentoken.stdout"
|
||||
notify:
|
||||
- restart daemons
|
||||
|
||||
- name: Generate tokens for node components
|
||||
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
|
||||
environment:
|
||||
TOKEN_DIR: "{{ kube_token_dir }}"
|
||||
with_nested:
|
||||
- [ 'system:kubelet', 'system:proxy' ]
|
||||
- "{{ groups['nodes'] }}"
|
||||
register: gentoken
|
||||
changed_when: "'Added' in gentoken.stdout"
|
||||
notify:
|
||||
- restart daemons
|
@ -1,23 +0,0 @@
|
||||
---
|
||||
- include: fedora.yml
|
||||
when: ansible_distribution == "Fedora"
|
||||
|
||||
- name: Update {{ kube_script_dir }} if this is atomic
|
||||
set_fact:
|
||||
kube_script_dir: "/usr/local/libexec/kubernretes"
|
||||
when: is_atomic and kube_script_dir == "/usr/libexec/kubernetes"
|
||||
|
||||
- name: Create kubernetes config directory
|
||||
file: path={{ kube_config_dir }} state=directory
|
||||
|
||||
- name: Create kubernetes script directory
|
||||
file: path={{ kube_script_dir }} state=directory
|
||||
|
||||
- name: write the global config file
|
||||
template: src=config.j2 dest={{ kube_config_dir }}/config
|
||||
notify:
|
||||
- restart daemons
|
||||
|
||||
- include: secrets.yml
|
||||
tags:
|
||||
secrets
|
@ -1,48 +0,0 @@
|
||||
---
|
||||
- name: Create system kube-cert groups
|
||||
group: name={{ kube_cert_group }} state=present system=yes
|
||||
|
||||
- name: Create system kube user
|
||||
user:
|
||||
name=kube
|
||||
comment="Kubernetes user"
|
||||
shell=/sbin/nologin
|
||||
state=present
|
||||
system=yes
|
||||
groups={{ kube_cert_group }}
|
||||
|
||||
- name: make sure the certificate directory exits
|
||||
file:
|
||||
path={{ kube_cert_dir }}
|
||||
state=directory
|
||||
mode=o-rwx
|
||||
group={{ kube_cert_group }}
|
||||
|
||||
- name: make sure the tokens directory exits
|
||||
file:
|
||||
path={{ kube_token_dir }}
|
||||
state=directory
|
||||
mode=o-rwx
|
||||
group={{ kube_cert_group }}
|
||||
|
||||
- include: gen_certs.yml
|
||||
when: inventory_hostname == groups['masters'][0]
|
||||
|
||||
- name: Read back the CA certificate
|
||||
slurp:
|
||||
src: "{{ kube_cert_dir }}/ca.crt"
|
||||
register: ca_cert
|
||||
run_once: true
|
||||
delegate_to: "{{ groups['masters'][0] }}"
|
||||
|
||||
- name: Register the CA certificate as a fact so it can be used later
|
||||
set_fact:
|
||||
kube_ca_cert: "{{ ca_cert.content|b64decode }}"
|
||||
|
||||
- name: Place CA certificate everywhere
|
||||
copy: content="{{ kube_ca_cert }}" dest="{{ kube_cert_dir }}/ca.crt"
|
||||
notify:
|
||||
- restart daemons
|
||||
|
||||
- include: gen_tokens.yml
|
||||
when: inventory_hostname == groups['masters'][0]
|
@ -1,23 +0,0 @@
|
||||
###
|
||||
# kubernetes system config
|
||||
#
|
||||
# The following values are used to configure various aspects of all
|
||||
# kubernetes services, including
|
||||
#
|
||||
# kube-apiserver.service
|
||||
# kube-controller-manager.service
|
||||
# kube-scheduler.service
|
||||
# kubelet.service
|
||||
# kube-proxy.service
|
||||
|
||||
# logging to stderr means we get it in the systemd journal
|
||||
KUBE_LOGTOSTDERR="--logtostderr=true"
|
||||
|
||||
# journal message level, 0 is debug
|
||||
KUBE_LOG_LEVEL="--v=0"
|
||||
|
||||
# Should this cluster be allowed to run privileged docker containers
|
||||
KUBE_ALLOW_PRIV="--allow-privileged=true"
|
||||
|
||||
# How the replication controller, scheduler, and proxy
|
||||
KUBE_MASTER="--master=https://{{ groups['masters'][0] }}:{{ kube_master_api_port }}"
|
@ -1,24 +0,0 @@
|
||||
---
|
||||
- name: reload systemd
|
||||
command: systemctl --system daemon-reload
|
||||
notify:
|
||||
- restart daemons
|
||||
|
||||
- name: restart daemons
|
||||
command: /bin/true
|
||||
notify:
|
||||
- restart apiserver
|
||||
- restart controller-manager
|
||||
- restart scheduler
|
||||
|
||||
- name: restart apiserver
|
||||
service: name=kube-apiserver state=restarted
|
||||
|
||||
- name: restart controller-manager
|
||||
service: name=kube-controller-manager state=restarted
|
||||
|
||||
- name: restart scheduler
|
||||
service: name=kube-scheduler state=restarted
|
||||
|
||||
- name: restart iptables
|
||||
service: name=iptables state=restarted
|
@ -1,4 +0,0 @@
|
||||
---
|
||||
dependencies:
|
||||
- { role: common }
|
||||
- { role: kubernetes }
|
@ -1,10 +0,0 @@
|
||||
---
|
||||
- name: Open firewalld port for apiserver
|
||||
firewalld: port={{ kube_master_api_port }}/tcp permanent=false state=enabled
|
||||
# in case this is also a node with firewalld turned off
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Save firewalld port for apiserver
|
||||
firewalld: port={{ kube_master_api_port }}/tcp permanent=true state=enabled
|
||||
# in case this is also a node with firewalld turned off
|
||||
ignore_errors: yes
|
@ -1,15 +0,0 @@
|
||||
---
|
||||
- name: Get iptables rules
|
||||
command: iptables -L
|
||||
register: iptablesrules
|
||||
always_run: yes
|
||||
|
||||
- name: Open apiserver port with iptables
|
||||
command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ kube_master_api_port }} -j ACCEPT -m comment --comment "kube-apiserver"
|
||||
when: "'kube-apiserver' not in iptablesrules.stdout"
|
||||
notify:
|
||||
- restart iptables
|
||||
|
||||
- name: Save iptables rules
|
||||
command: service iptables save
|
||||
when: "'kube-apiserver' not in iptablesrules.stdout"
|
@ -1,30 +0,0 @@
|
||||
---
|
||||
- name: Copy master binaries
|
||||
copy:
|
||||
src: ../../_output/local/go/bin/{{ item }}
|
||||
dest: /usr/bin/
|
||||
mode: 755
|
||||
with_items:
|
||||
- kube-apiserver
|
||||
- kube-scheduler
|
||||
- kube-controller-manager
|
||||
- kubectl
|
||||
notify: restart daemons
|
||||
|
||||
- name: Copy master service files
|
||||
copy:
|
||||
src: ../init/systemd/{{ item }}
|
||||
dest: /etc/systemd/system/
|
||||
mode: 644
|
||||
with_items:
|
||||
- kube-apiserver.service
|
||||
- kube-scheduler.service
|
||||
- kube-controller-manager.service
|
||||
notify: reload systemd
|
||||
|
||||
- name: Copy systemd tmpfile for apiserver
|
||||
copy:
|
||||
src: ../init/systemd/tmpfiles.d/
|
||||
dest: /etc/tmpfiles.d/
|
||||
mode: 644
|
||||
notify: reload systemd
|
@ -1,77 +0,0 @@
|
||||
---
|
||||
- include: packageManagerInstall.yml
|
||||
when: source_type == "packageManager"
|
||||
tags:
|
||||
- binary-update
|
||||
|
||||
- include: localBuildInstall.yml
|
||||
when: source_type == "localBuild"
|
||||
tags:
|
||||
- binary-update
|
||||
|
||||
- name: write the config file for the api server
|
||||
template: src=apiserver.j2 dest={{ kube_config_dir }}/apiserver
|
||||
notify:
|
||||
- restart apiserver
|
||||
|
||||
- name: Ensure that a token auth file exists (addons may populate it)
|
||||
file: path={{ kube_token_dir }}/known_tokens.csv state=touch
|
||||
changed_when: false
|
||||
|
||||
- name: add cap_net_bind_service to kube-apiserver
|
||||
capabilities: path=/usr/bin/kube-apiserver capability=cap_net_bind_service=ep state=present
|
||||
when: not is_atomic
|
||||
|
||||
- name: Enable apiserver
|
||||
service: name=kube-apiserver enabled=yes state=started
|
||||
|
||||
- name: Get the master token values
|
||||
slurp:
|
||||
src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token"
|
||||
with_items:
|
||||
- "system:controller_manager"
|
||||
- "system:scheduler"
|
||||
- "system:kubectl"
|
||||
register: tokens
|
||||
delegate_to: "{{ groups['masters'][0] }}"
|
||||
|
||||
- name: Set token facts
|
||||
set_fact:
|
||||
controller_manager_token: "{{ tokens.results[0].content|b64decode }}"
|
||||
scheduler_token: "{{ tokens.results[1].content|b64decode }}"
|
||||
kubectl_token: "{{ tokens.results[2].content|b64decode }}"
|
||||
|
||||
- name: write the config file for the controller-manager
|
||||
template: src=controller-manager.j2 dest={{ kube_config_dir }}/controller-manager
|
||||
notify:
|
||||
- restart controller-manager
|
||||
|
||||
- name: write the kubecfg (auth) file for controller-manager
|
||||
template: src=controller-manager.kubeconfig.j2 dest={{ kube_config_dir }}/controller-manager.kubeconfig
|
||||
notify:
|
||||
- restart controller-manager
|
||||
|
||||
- name: Enable controller-manager
|
||||
service: name=kube-controller-manager enabled=yes state=started
|
||||
|
||||
- name: write the config file for the scheduler
|
||||
template: src=scheduler.j2 dest={{ kube_config_dir }}/scheduler
|
||||
notify:
|
||||
- restart scheduler
|
||||
|
||||
- name: write the kubecfg (auth) file for scheduler
|
||||
template: src=scheduler.kubeconfig.j2 dest={{ kube_config_dir }}/scheduler.kubeconfig
|
||||
notify:
|
||||
- restart scheduler
|
||||
|
||||
- name: Enable scheduler
|
||||
service: name=kube-scheduler enabled=yes state=started
|
||||
|
||||
- name: write the kubecfg (auth) file for kubectl
|
||||
template: src=kubectl.kubeconfig.j2 dest={{ kube_config_dir }}/kubectl.kubeconfig
|
||||
|
||||
- include: firewalld.yml
|
||||
when: has_firewalld
|
||||
|
||||
- include: iptables.yml
|
||||
when: not has_firewalld and has_iptables
|
@ -1,28 +0,0 @@
|
||||
---
|
||||
- name: Set fact for Atomic Host package install
|
||||
set_fact:
|
||||
did_install: true
|
||||
when: is_atomic
|
||||
|
||||
- include: pkgMgrInstallers/centos-install.yml
|
||||
when: ansible_distribution == "CentOS"
|
||||
|
||||
- name: Set fact saying we did CentOS package install
|
||||
set_fact:
|
||||
did_install: true
|
||||
when: ansible_distribution == "CentOS"
|
||||
|
||||
|
||||
|
||||
- include: pkgMgrInstallers/fedora-install.yml
|
||||
when: ansible_distribution == "Fedora" and ansible_distribution_release != "Rawhide"
|
||||
|
||||
- name: Set fact saying we did Fedora package install
|
||||
set_fact:
|
||||
did_install: true
|
||||
when: ansible_distribution == "Fedora" and ansible_distribution_release != "Rawhide"
|
||||
|
||||
|
||||
|
||||
- include: pkgMgrInstallers/generic-install.yml
|
||||
when: not did_install
|
@ -1,8 +0,0 @@
|
||||
---
|
||||
- name: CentOS | Install kubernetes master
|
||||
yum:
|
||||
pkg=kubernetes-master
|
||||
state=latest
|
||||
enablerepo=virt7-docker-common-candidate
|
||||
notify:
|
||||
- restart daemons
|
@ -1,8 +0,0 @@
|
||||
- name: Fedora | Install kubernetes master
|
||||
action: "{{ ansible_pkg_mgr }}"
|
||||
args:
|
||||
name: kubernetes-master
|
||||
state: latest
|
||||
enablerepo: "updates-testing"
|
||||
notify:
|
||||
- restart daemons
|
@ -1,7 +0,0 @@
|
||||
- name: Generic | Install kubernetes master
|
||||
action: "{{ ansible_pkg_mgr }}"
|
||||
args:
|
||||
name: kubernetes-master
|
||||
state: latest
|
||||
notify:
|
||||
- restart daemons
|
@ -1,26 +0,0 @@
|
||||
###
|
||||
# kubernetes system config
|
||||
#
|
||||
# The following values are used to configure the kube-apiserver
|
||||
#
|
||||
|
||||
# The address on the local server to listen to.
|
||||
KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1"
|
||||
|
||||
# The port on the local server to listen on.
|
||||
KUBE_API_PORT="--secure-port={{ kube_master_api_port }}"
|
||||
|
||||
# Port nodes listen on
|
||||
# KUBELET_PORT="--kubelet-port=10250"
|
||||
|
||||
# Address range to use for services
|
||||
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range={{ kube_service_addresses }}"
|
||||
|
||||
# Location of the etcd cluster
|
||||
KUBE_ETCD_SERVERS="--etcd-servers={% for node in groups['etcd'] %}http://{{ node }}:2379{% if not loop.last %},{% endif %}{% endfor %}"
|
||||
|
||||
# default admission control policies
|
||||
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
|
||||
|
||||
# Add your own!
|
||||
KUBE_API_ARGS="--tls-cert-file={{ kube_cert_dir }}/server.crt --tls-private-key-file={{ kube_cert_dir }}/server.key --client-ca-file={{ kube_cert_dir }}/ca.crt --token-auth-file={{ kube_token_dir }}/known_tokens.csv --service-account-key-file={{ kube_cert_dir }}/server.crt"
|
@ -1,7 +0,0 @@
|
||||
###
|
||||
# The following values are used to configure the kubernetes controller-manager
|
||||
|
||||
# defaults from config and apiserver should be adequate
|
||||
|
||||
# Add your own!
|
||||
KUBE_CONTROLLER_MANAGER_ARGS="--kubeconfig={{ kube_config_dir }}/controller-manager.kubeconfig --service-account-private-key-file={{ kube_cert_dir }}/server.key --root-ca-file={{ kube_cert_dir }}/ca.crt"
|
@ -1,18 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
current-context: controller-manager-to-{{ cluster_name }}
|
||||
preferences: {}
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
||||
server: https://{{ groups['masters'][0] }}:{{ kube_master_api_port }}
|
||||
name: {{ cluster_name }}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: {{ cluster_name }}
|
||||
user: controller-manager
|
||||
name: controller-manager-to-{{ cluster_name }}
|
||||
users:
|
||||
- name: controller-manager
|
||||
user:
|
||||
token: {{ controller_manager_token }}
|
@ -1,18 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
current-context: kubectl-to-{{ cluster_name }}
|
||||
preferences: {}
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: {{ kube_ca_cert|b64encode }}
|
||||
server: https://{{ groups['masters'][0] }}:{{ kube_master_api_port }}
|
||||
name: {{ cluster_name }}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: {{ cluster_name }}
|
||||
user: kubectl
|
||||
name: kubectl-to-{{ cluster_name }}
|
||||
users:
|
||||
- name: kubectl
|
||||
user:
|
||||
token: {{ kubectl_token }}
|
@ -1,7 +0,0 @@
|
||||
###
|
||||
# kubernetes scheduler config
|
||||
|
||||
# default config should be adequate
|
||||
|
||||
# Add your own!
|
||||
KUBE_SCHEDULER_ARGS="--kubeconfig={{ kube_config_dir }}/scheduler.kubeconfig"
|
@ -1,18 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
current-context: scheduler-to-{{ cluster_name }}
|
||||
preferences: {}
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
||||
server: https://{{ groups['masters'][0] }}:{{ kube_master_api_port }}
|
||||
name: {{ cluster_name }}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: {{ cluster_name }}
|
||||
user: scheduler
|
||||
name: scheduler-to-{{ cluster_name }}
|
||||
users:
|
||||
- name: scheduler
|
||||
user:
|
||||
token: {{ scheduler_token }}
|
@ -1,20 +0,0 @@
|
||||
---
|
||||
- name: reload systemd
|
||||
command: systemctl --system daemon-reload
|
||||
notify:
|
||||
- restart daemons
|
||||
|
||||
- name: restart daemons
|
||||
command: /bin/true
|
||||
notify:
|
||||
- restart kubelet
|
||||
- restart proxy
|
||||
|
||||
- name: restart kubelet
|
||||
service: name=kubelet state=restarted
|
||||
|
||||
- name: restart proxy
|
||||
service: name=kube-proxy state=restarted
|
||||
|
||||
- name: restart iptables
|
||||
service: name=iptables state=restarted
|
@ -1,5 +0,0 @@
|
||||
---
|
||||
dependencies:
|
||||
- { role: common }
|
||||
- { role: docker }
|
||||
- { role: kubernetes }
|
@ -1,10 +0,0 @@
|
||||
---
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1033606 and I think others say firewalld+docker == bad
|
||||
- name: disable firewalld
|
||||
service: name=firewalld enabled=no state=stopped
|
||||
|
||||
#- name: Open firewalld port for the kubelet
|
||||
#firewalld: port=10250/tcp permanent=false state=enabled
|
||||
|
||||
#- name: Save firewalld port for the kubelet
|
||||
#firewalld: port=10250/tcp permanent=true state=enabled
|
@ -1,18 +0,0 @@
|
||||
---
|
||||
- name: Get iptables rules
|
||||
command: iptables -L
|
||||
register: iptablesrules
|
||||
always_run: yes
|
||||
|
||||
- name: Enable iptables at boot
|
||||
service: name=iptables enabled=yes state=started
|
||||
|
||||
- name: Open kubelet port with iptables
|
||||
command: /sbin/iptables -I INPUT 1 -p tcp --dport 10250 -j ACCEPT -m comment --comment "kubelet"
|
||||
when: "'kubelet' not in iptablesrules.stdout"
|
||||
notify:
|
||||
- restart iptables
|
||||
|
||||
- name: Save iptables rules
|
||||
command: service iptables save
|
||||
when: "'kubelet' not in iptablesrules.stdout"
|
@ -1,27 +0,0 @@
|
||||
---
|
||||
- name: Copy node binaries
|
||||
copy:
|
||||
src: ../../_output/local/go/bin/{{ item }}
|
||||
dest: /usr/bin/
|
||||
mode: 755
|
||||
with_items:
|
||||
- kubelet
|
||||
- kube-proxy
|
||||
- kubectl
|
||||
notify:
|
||||
- restart daemons
|
||||
|
||||
- name: Copy node service files
|
||||
copy:
|
||||
src: ../init/systemd/{{ item }}
|
||||
dest: /etc/systemd/system/
|
||||
mode: 644
|
||||
with_items:
|
||||
- kube-proxy.service
|
||||
- kubelet.service
|
||||
notify: reload systemd
|
||||
|
||||
- name: Create the /var/lib/kubelet working directory
|
||||
file:
|
||||
path: /var/lib/kubelet
|
||||
state: directory
|
@ -1,70 +0,0 @@
|
||||
---
|
||||
- name: Set selinux permissive because tokens and selinux don't work together
|
||||
selinux: state=permissive policy={{ ansible_selinux.type }}
|
||||
when: ansible_selinux is defined and ansible_selinux.status == "enabled"
|
||||
|
||||
- include: packageManagerInstall.yml
|
||||
when: source_type == "packageManager"
|
||||
tags:
|
||||
- binary-update
|
||||
|
||||
- include: localBuildInstall.yml
|
||||
when: source_type == "localBuild"
|
||||
tags:
|
||||
- binary-update
|
||||
|
||||
- name: Make sure manifest directory exists
|
||||
file: path={{ kube_manifest_dir }} state=directory
|
||||
|
||||
- name: Install fluentd pod into each node
|
||||
get_url:
|
||||
url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/fluentd-es/fluentd-es.yaml
|
||||
dest="{{ kube_manifest_dir }}"
|
||||
force=yes
|
||||
when: cluster_logging
|
||||
|
||||
- name: Get the node token values
|
||||
slurp:
|
||||
src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token"
|
||||
with_items:
|
||||
- "system:kubelet"
|
||||
- "system:proxy"
|
||||
register: tokens
|
||||
delegate_to: "{{ groups['masters'][0] }}"
|
||||
|
||||
- name: Set token facts
|
||||
set_fact:
|
||||
kubelet_token: "{{ tokens.results[0].content|b64decode }}"
|
||||
proxy_token: "{{ tokens.results[1].content|b64decode }}"
|
||||
|
||||
- name: write the config files for kubelet
|
||||
template: src=kubelet.j2 dest={{ kube_config_dir }}/kubelet
|
||||
notify:
|
||||
- restart kubelet
|
||||
|
||||
- name: write the kubecfg (auth) file for kubelet
|
||||
template: src=kubelet.kubeconfig.j2 dest={{ kube_config_dir }}/kubelet.kubeconfig
|
||||
notify:
|
||||
- restart kubelet
|
||||
|
||||
- name: Enable kubelet
|
||||
service: name=kubelet enabled=yes state=started
|
||||
|
||||
- name: write the config files for proxy
|
||||
template: src=proxy.j2 dest={{ kube_config_dir }}/proxy
|
||||
notify:
|
||||
- restart proxy
|
||||
|
||||
- name: write the kubecfg (auth) file for kube-proxy
|
||||
template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig
|
||||
notify:
|
||||
- restart proxy
|
||||
|
||||
- name: Enable proxy
|
||||
service: name=kube-proxy enabled=yes state=started
|
||||
|
||||
- include: firewalld.yml
|
||||
when: has_firewalld
|
||||
|
||||
- include: iptables.yml
|
||||
when: not has_firewalld and has_iptables
|
@ -1,28 +0,0 @@
|
||||
---
|
||||
- name: Set fact for Atomic Host package install
|
||||
set_fact:
|
||||
did_install: true
|
||||
when: is_atomic
|
||||
|
||||
- include: pkgMgrInstallers/centos-install.yml
|
||||
when: ansible_distribution == "CentOS"
|
||||
|
||||
- name: Set fact saying we did CentOS package install
|
||||
set_fact:
|
||||
did_install: true
|
||||
when: ansible_distribution == "CentOS"
|
||||
|
||||
|
||||
|
||||
- include: pkgMgrInstallers/fedora-install.yml
|
||||
when: ansible_distribution == "Fedora" and ansible_distribution_release != "Rawhide"
|
||||
|
||||
- name: Set fact saying we did Fedora package install
|
||||
set_fact:
|
||||
did_install: true
|
||||
when: ansible_distribution == "Fedora" and ansible_distribution_release != "Rawhide"
|
||||
|
||||
|
||||
|
||||
- include: pkgMgrInstallers/generic-install.yml
|
||||
when: not did_install
|
@ -1,8 +0,0 @@
|
||||
---
|
||||
- name: CentOS | Install kubernetes node
|
||||
yum:
|
||||
pkg=kubernetes-node
|
||||
state=latest
|
||||
enablerepo=virt7-docker-common-candidate
|
||||
notify:
|
||||
- restart daemons
|
@ -1,8 +0,0 @@
|
||||
- name: Fedora | Install kubernetes node
|
||||
action: "{{ ansible_pkg_mgr }}"
|
||||
args:
|
||||
name: kubernetes-node
|
||||
state: latest
|
||||
enablerepo: "updates-testing"
|
||||
notify:
|
||||
- restart daemons
|
@ -1,7 +0,0 @@
|
||||
- name: Generic | Install kubernetes node
|
||||
action: "{{ ansible_pkg_mgr }}"
|
||||
args:
|
||||
name: kubernetes-node
|
||||
state: latest
|
||||
notify:
|
||||
- restart daemons
|
@ -1,21 +0,0 @@
|
||||
###
|
||||
# kubernetes kubelet (node) config
|
||||
|
||||
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
|
||||
KUBELET_ADDRESS="--address=0.0.0.0"
|
||||
|
||||
# The port for the info server to serve on
|
||||
# KUBELET_PORT="--port=10250"
|
||||
|
||||
# You may leave this blank to use the actual hostname
|
||||
KUBELET_HOSTNAME="--hostname-override={{ inventory_hostname }}"
|
||||
|
||||
# location of the api-server
|
||||
KUBELET_API_SERVER="--api-servers=https://{{ groups['masters'][0] }}:{{ kube_master_api_port }}"
|
||||
|
||||
# Add your own!
|
||||
{% if dns_setup %}
|
||||
KUBELET_ARGS="--cluster-dns={{ dns_server }} --cluster-domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}"
|
||||
{% else %}
|
||||
KUBELET_ARGS="--kubeconfig={{ kube_config_dir }}/kubelet.kubeconfig --config={{ kube_manifest_dir }}"
|
||||
{% endif %}
|
@ -1,18 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
current-context: kubelet-to-{{ cluster_name }}
|
||||
preferences: {}
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
||||
server: https://{{ groups['masters'][0] }}:{{ kube_master_api_port }}
|
||||
name: {{ cluster_name }}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: {{ cluster_name }}
|
||||
user: kubelet
|
||||
name: kubelet-to-{{ cluster_name }}
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
token: {{ kubelet_token }}
|
@ -1,7 +0,0 @@
|
||||
###
|
||||
# kubernetes proxy config
|
||||
|
||||
# default config should be adequate
|
||||
|
||||
# Add your own!
|
||||
KUBE_PROXY_ARGS="--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig"
|
@ -1,18 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
current-context: proxy-to-{{ cluster_name }}
|
||||
preferences: {}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: {{ cluster_name }}
|
||||
user: proxy
|
||||
name: proxy-to-{{ cluster_name }}
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: {{ kube_cert_dir }}/ca.crt
|
||||
server: https://{{ groups['masters'][0] }}:{{ kube_master_api_port }}
|
||||
name: {{ cluster_name }}
|
||||
users:
|
||||
- name: proxy
|
||||
user:
|
||||
token: {{ proxy_token }}
|
@ -1,8 +0,0 @@
|
||||
---
|
||||
- name: Install minimal packages
|
||||
raw: dnf install -y {{ item }}
|
||||
with_items:
|
||||
- python # everyone need python2
|
||||
- python-dnf # some versions of ansible (2.0) use dnf directly
|
||||
- yum # some versions of ansible use yum
|
||||
- libselinux-python
|
@ -1,11 +0,0 @@
|
||||
---
|
||||
- name: Get os_version from /etc/os-release
|
||||
raw: "grep '^VERSION_ID=' /etc/os-release | sed s'/VERSION_ID=//'"
|
||||
register: os_version
|
||||
|
||||
- name: Get distro name from /etc/os-release
|
||||
raw: "grep '^NAME=' /etc/os-release | sed s'/NAME=//'"
|
||||
register: distro
|
||||
|
||||
- include: fedora-dnf.yml
|
||||
when: os_version.stdout|int >= 22 and 'Fedora' in distro.stdout
|
@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
inventory=${INVENTORY:-inventory}
|
||||
|
||||
ansible-playbook -i ${inventory} cluster.yml $@
|
1
contrib/ansible/vagrant/.gitignore
vendored
1
contrib/ansible/vagrant/.gitignore
vendored
@ -1 +0,0 @@
|
||||
openstack_config.yml
|
@ -1,65 +0,0 @@
|
||||
## Vagrant deployer for Kubernetes Ansible
|
||||
|
||||
This deployer sets up a vagrant cluster and installs kubernetes with flannel on it.
|
||||
|
||||
## Before you start !
|
||||
|
||||
You will need a functioning vagrant provider. Currently supported are openstack, libvirt, and virtualbox.
|
||||
|
||||
## USAGE
|
||||
|
||||
In general all that should be needed it to run
|
||||
|
||||
```
|
||||
vagrant up
|
||||
```
|
||||
|
||||
If you export an env variable such as
|
||||
```
|
||||
export NUM_MINIONS=4
|
||||
```
|
||||
|
||||
The system will create that number of nodes. Default is 2.
|
||||
|
||||
## Provider Specific Information
|
||||
Vagrant tries to be intelligent and pick the first provider supported by your installation. If you want to specify a provider you can do so by running vagrant like so:
|
||||
```
|
||||
vagrant up --provider=openstack
|
||||
```
|
||||
|
||||
### OpenStack
|
||||
Make sure to install the openstack provider for vagrant.
|
||||
```
|
||||
vagrant plugin install vagrant-openstack-provider --plugin-version ">= 0.6.1"
|
||||
```
|
||||
NOTE This is a more up-to-date provider than the similar `vagrant-openstack-plugin`.
|
||||
|
||||
Also note that current (required) versions of `vagrant-openstack-provider` are not compatible with ruby 2.2.
|
||||
https://github.com/ggiamarchi/vagrant-openstack-provider/pull/237
|
||||
So make sure you get at least version 0.6.1.
|
||||
|
||||
To use the vagrant openstack provider you will need
|
||||
- Copy `openstack_config.yml.example` to `openstack_config.yml`
|
||||
- Edit `openstack_config.yml` to include your relevant details.
|
||||
|
||||
For vagrant (1.7.2) does not seem to ever want to pick openstack as the provider. So you will need to tell it to use openstack explicitly.
|
||||
|
||||
###### Libvirt
|
||||
|
||||
The libvirt vagrant provider is non-deterministic when launching VMs. This is a problem as we need ansible to only run after all of the VMs are running. To solve this when using libvirt one must
|
||||
do the following
|
||||
```
|
||||
vagrant up --no-provision
|
||||
vagrant provision
|
||||
```
|
||||
|
||||
### VirtualBox
|
||||
Nothing special with VirtualBox. Hopefully `vagrant up` just works.
|
||||
|
||||
|
||||
## Random Information
|
||||
If you just want to update the binaries on your systems (either pkgManager or localBuild) you can do so using the ansible binary-update tag. To do so with vagrant provision you would need to run
|
||||
```
|
||||
ANSIBLE_TAGS="binary-update" vagrant provision
|
||||
```
|
||||
[]()
|
142
contrib/ansible/vagrant/Vagrantfile
vendored
142
contrib/ansible/vagrant/Vagrantfile
vendored
@ -1,142 +0,0 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
require "yaml"
|
||||
|
||||
### This is a new provider, different then cloudbau's.
|
||||
### RUN: vagrant plugin uninstall vagrant-openstack-plugin"
|
||||
### Then RUN: "vagrant plugin install vagrant-openstack-provider"
|
||||
require 'vagrant-openstack-provider'
|
||||
|
||||
$num_nodes = (ENV['NUM_NODES'] || 2).to_i
|
||||
ansible_tags = ENV['ANSIBLE_TAGS']
|
||||
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
# Openstack providers are best used with latest versions.
|
||||
Vagrant.require_version ">= 1.7"
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
# By default, Vagrant 1.7+ automatically inserts a different
|
||||
# insecure keypair for each new VM created. The easiest way
|
||||
# to use the same keypair for all the machines is to disable
|
||||
# this feature and rely on the legacy insecure key.
|
||||
config.ssh.insert_key = false
|
||||
|
||||
# This explicitly sets the order that vagrant will use by default if no --provider given
|
||||
config.vm.provider "openstack"
|
||||
config.vm.provider "libvirt"
|
||||
config.vm.provider "virtualbox"
|
||||
|
||||
def set_openstack(os, config, n)
|
||||
# common config
|
||||
config.vm.box = "dummy"
|
||||
config.vm.box_url = "https://github.com/cloudbau/vagrant-openstack-plugin/raw/master/dummy.box"
|
||||
|
||||
# this crap is to make it not fail if the file doesn't exist (which is ok if we are using a different provisioner)
|
||||
__filename = File.join(File.dirname(__FILE__), "openstack_config.yml")
|
||||
if File.exist?(__filename)
|
||||
_config = YAML.load(File.open(__filename, File::RDONLY).read)
|
||||
else
|
||||
_config = Hash.new("")
|
||||
_config['security_group'] = []
|
||||
end
|
||||
|
||||
config.ssh.username = "fedora"
|
||||
config.ssh.private_key_path = "~/.ssh/id_rsa"
|
||||
config.vm.boot_timeout = 60*10
|
||||
|
||||
### The below parameters need to be modified per your openstack instance.
|
||||
os.username = _config['os_username']
|
||||
os.password = _config['os_password']
|
||||
os.tenant_name = _config['os_tenant']
|
||||
os.keypair_name = _config['os_ssh_key_name']
|
||||
os.openstack_auth_url = _config['os_auth_url']
|
||||
os.region = _config['os_region_name']
|
||||
os.floating_ip_pool = _config['os_floating_ip_pool']
|
||||
os.flavor = _config['os_flavor']
|
||||
os.image = _config['os_image']
|
||||
os.security_groups = _config['os_security_groups']
|
||||
os.server_name = n.vm.hostname
|
||||
end
|
||||
|
||||
def set_vbox(vb, config)
|
||||
config.vm.box = "chef/centos-7.0"
|
||||
config.vm.network "private_network", type: "dhcp"
|
||||
vb.gui = false
|
||||
vb.memory = 2048
|
||||
vb.cpus = 2
|
||||
|
||||
# Use faster paravirtualized networking
|
||||
vb.customize ["modifyvm", :id, "--nictype1", "virtio"]
|
||||
vb.customize ["modifyvm", :id, "--nictype2", "virtio"]
|
||||
end
|
||||
|
||||
def set_libvirt(lv, config)
|
||||
config.vm.box = "kube-centos-7"
|
||||
config.vm.box_url = "http://cloud.centos.org/centos/7/vagrant/x86_64/images/CentOS-7.LibVirt.box"
|
||||
lv.memory = 2048
|
||||
lv.cpus = 2
|
||||
lv.nested = true
|
||||
lv.volume_cache = 'none'
|
||||
end
|
||||
|
||||
def set_provider(n)
|
||||
n.vm.provider :openstack do |os, override|
|
||||
set_openstack(os, override, n)
|
||||
end
|
||||
n.vm.provider :virtualbox do |vb, override|
|
||||
set_vbox(vb, override)
|
||||
end
|
||||
n.vm.provider :libvirt do |lv, override|
|
||||
set_libvirt(lv, override)
|
||||
end
|
||||
end
|
||||
|
||||
config.vm.synced_folder ".", "/vagrant", disabled: true
|
||||
|
||||
nodes = Array.new()
|
||||
$num_nodes.times do |i|
|
||||
# multi vm config
|
||||
name = "kube-node-#{i+1}"
|
||||
nodes.push(name)
|
||||
config.vm.define "#{name}" do |n|
|
||||
n.vm.hostname = name
|
||||
set_provider(n)
|
||||
end
|
||||
end
|
||||
|
||||
# This is how we create the ansible inventory, see it in .vagrant
|
||||
# if you want to debug, run 'VAGRANT_LOG=info vagrant up'
|
||||
# and you'll see exactly how the cluster comes up via ansible inv.
|
||||
groups = {
|
||||
"etcd" => ["kube-master"],
|
||||
"masters" => ["kube-master"],
|
||||
"nodes" => nodes,
|
||||
"all_groups:children" => ["etcd","masters","nodes"]
|
||||
}
|
||||
|
||||
config.vm.define "kube-master" do |n|
|
||||
name = "kube-master"
|
||||
n.vm.hostname = name
|
||||
set_provider(n)
|
||||
|
||||
if ansible_tags.nil?
|
||||
# This set up the vagrant hosts before we run the main playbook
|
||||
# Today this just creates /etc/hosts so machines can talk via their
|
||||
# 'internal' IPs instead of the openstack public ip.
|
||||
n.vm.provision :ansible do |ansible|
|
||||
ansible.groups = groups
|
||||
ansible.playbook = "./vagrant-ansible.yml"
|
||||
ansible.limit = "all" #otherwise the metadata wont be there for ipv4?
|
||||
end
|
||||
end
|
||||
|
||||
# This sets up both flannel and kube.
|
||||
n.vm.provision :ansible do |ansible|
|
||||
ansible.groups = groups
|
||||
ansible.playbook = "../cluster.yml"
|
||||
ansible.limit = "all" #otherwise the metadata wont be there for ipv4?
|
||||
ansible.tags = ansible_tags
|
||||
end
|
||||
end
|
||||
end
|
@ -1,12 +0,0 @@
|
||||
os_username: eparis
|
||||
os_password: redhat
|
||||
os_tenant: "RH US Business Group"
|
||||
os_auth_url: "http://os1-public.osop.rhcloud.com:5000/v2.0"
|
||||
os_region_name: "OS1Public"
|
||||
os_ssh_key_name: "eparis"
|
||||
os_flavor: "m1.small"
|
||||
os_image: "Fedora 22 Cloud Base x86_64 (final)"
|
||||
os_security_groups:
|
||||
- "default"
|
||||
#- some_other_group
|
||||
os_floating_ip_pool: "os1_public"
|
@ -1,12 +0,0 @@
|
||||
os_username: eparis
|
||||
os_password: password
|
||||
os_tenant: "RH US Business Group"
|
||||
os_auth_url: "http://os1-public.osop.rhcloud.com:5000/v2.0"
|
||||
os_region_name: "OS1Public"
|
||||
os_ssh_key_name: "eparis"
|
||||
os_flavor: "m1.small"
|
||||
os_image: "Fedora 22 Cloud Base x86_64 (final)"
|
||||
os_security_groups:
|
||||
- "default"
|
||||
#- some_other_group
|
||||
os_floating_ip_pool: "os1_public"
|
@ -1,11 +0,0 @@
|
||||
- hosts: all
|
||||
sudo: yes
|
||||
tasks:
|
||||
- name: "Build hosts file"
|
||||
lineinfile:
|
||||
dest=/etc/hosts
|
||||
regexp=".*{{ item }}$"
|
||||
line="{{ hostvars[item].ansible_default_ipv4.address }} {{item}}"
|
||||
state=present
|
||||
when: hostvars[item].ansible_default_ipv4.address is defined
|
||||
with_items: groups['all']
|
@ -1,8 +0,0 @@
|
||||
FROM busybox
|
||||
MAINTAINER Muhammed Uluyol "uluyol@google.com"
|
||||
|
||||
ADD dc /diurnal
|
||||
|
||||
RUN chown root:users /diurnal && chmod 755 /diurnal
|
||||
|
||||
ENTRYPOINT ["/diurnal"]
|
@ -1,24 +0,0 @@
|
||||
.PHONY: build push vet test clean
|
||||
|
||||
TAG = 0.5
|
||||
REPO = uluyol/kube-diurnal
|
||||
|
||||
BIN = dc
|
||||
|
||||
dc: dc.go time.go
|
||||
CGO_ENABLED=0 godep go build -a -installsuffix cgo -o dc dc.go time.go
|
||||
|
||||
vet:
|
||||
godep go vet .
|
||||
|
||||
test:
|
||||
godep go test .
|
||||
|
||||
build: $(BIN)
|
||||
docker build -t $(REPO):$(TAG) .
|
||||
|
||||
push:
|
||||
docker push $(REPO):$(TAG)
|
||||
|
||||
clean:
|
||||
rm -f $(BIN)
|
@ -1,44 +0,0 @@
|
||||
## Diurnal Controller
|
||||
This controller manipulates the number of replicas maintained by a replication controller throughout the day based on a provided list of times of day (according to ISO 8601) and replica counts. It should be run under a replication controller that is in the same namespace as the replication controller that it is manipulating.
|
||||
|
||||
For example, to set the replica counts of the pods with the labels "tier=backend,track=canary" to 10 at noon UTC and 6 at midnight UTC, we can use `-labels tier=backend,track=canary -times 00:00Z,12:00Z -counts 6,10`. An example replication controller config can be found [here](example-diurnal-controller.yaml).
|
||||
|
||||
Instead of providing replica counts and times of day directly, you may use a script like the one below to generate them using mathematical functions.
|
||||
|
||||
```python
|
||||
from math import *
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
def _day_to_2pi(t):
|
||||
return float(t) * 2 * pi / (24*3600)
|
||||
|
||||
def main(args):
|
||||
if len(args) < 3:
|
||||
print "Usage: %s sample_interval func" % (args[0],)
|
||||
print "func should be a function of the variable t, where t will range from 0"
|
||||
print "to 2pi over the course of the day"
|
||||
sys.exit(1)
|
||||
sampling_interval = int(args[1])
|
||||
exec "def f(t): return " + args[2]
|
||||
i = 0
|
||||
times = []
|
||||
counts = []
|
||||
while i < 24*60*60:
|
||||
hours = i / 3600
|
||||
left = i - hours*3600
|
||||
min = left / 60
|
||||
sec = left - min*60
|
||||
times.append("%dh%dm%ds" % (hours, min, sec))
|
||||
count = int(round(f(_day_to_2pi(i))))
|
||||
counts.append(str(count))
|
||||
i += sampling_interval
|
||||
print "-times %s -counts %s" % (",".join(times), ",".join(counts))
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv)
|
||||
```
|
||||
|
||||
|
||||
[]()
|
@ -1,283 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// An external diurnal controller for kubernetes. With this, it's possible to manage
|
||||
// known replica counts that vary throughout the day.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
kclient "k8s.io/kubernetes/pkg/client"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const dayPeriod = 24 * time.Hour
|
||||
|
||||
type timeCount struct {
|
||||
time time.Duration
|
||||
count int
|
||||
}
|
||||
|
||||
func (tc timeCount) String() string {
|
||||
h := tc.time / time.Hour
|
||||
m := (tc.time % time.Hour) / time.Minute
|
||||
s := (tc.time % time.Minute) / time.Second
|
||||
if m == 0 && s == 0 {
|
||||
return fmt.Sprintf("(%02dZ, %d)", h, tc.count)
|
||||
} else if s == 0 {
|
||||
return fmt.Sprintf("(%02d:%02dZ, %d)", h, m, tc.count)
|
||||
}
|
||||
return fmt.Sprintf("(%02d:%02d:%02dZ, %d)", h, m, s, tc.count)
|
||||
}
|
||||
|
||||
type byTime []timeCount
|
||||
|
||||
func (tc byTime) Len() int { return len(tc) }
|
||||
func (tc byTime) Swap(i, j int) { tc[i], tc[j] = tc[j], tc[i] }
|
||||
func (tc byTime) Less(i, j int) bool { return tc[i].time < tc[j].time }
|
||||
|
||||
func timeMustParse(layout, s string) time.Time {
|
||||
t, err := time.Parse(layout, s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// first argument is a format string equivalent to HHMMSS. See time.Parse for details.
|
||||
var epoch = timeMustParse("150405", "000000")
|
||||
|
||||
func parseTimeRelative(s string) (time.Duration, error) {
|
||||
t, err := parseTimeISO8601(s)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("unable to parse %s: %v", s, err)
|
||||
}
|
||||
return (t.Sub(epoch) + dayPeriod) % dayPeriod, nil
|
||||
}
|
||||
|
||||
func parseTimeCounts(times string, counts string) ([]timeCount, error) {
|
||||
ts := strings.Split(times, ",")
|
||||
cs := strings.Split(counts, ",")
|
||||
if len(ts) != len(cs) {
|
||||
return nil, fmt.Errorf("provided %d times but %d replica counts", len(ts), len(cs))
|
||||
}
|
||||
var tc []timeCount
|
||||
for i := range ts {
|
||||
t, err := parseTimeRelative(ts[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := strconv.ParseInt(cs[i], 10, 64)
|
||||
if c < 0 {
|
||||
return nil, errors.New("counts must be non-negative")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tc = append(tc, timeCount{t, int(c)})
|
||||
}
|
||||
sort.Sort(byTime(tc))
|
||||
return tc, nil
|
||||
}
|
||||
|
||||
type Scaler struct {
|
||||
timeCounts []timeCount
|
||||
selector labels.Selector
|
||||
start time.Time
|
||||
pos int
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
var posError = errors.New("could not find position")
|
||||
|
||||
func findPos(tc []timeCount, cur int, offset time.Duration) int {
|
||||
first := true
|
||||
for i := cur; i != cur || first; i = (i + 1) % len(tc) {
|
||||
if tc[i].time > offset {
|
||||
return i
|
||||
}
|
||||
first = false
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (s *Scaler) setCount(c int) {
|
||||
glog.Infof("scaling to %d replicas", c)
|
||||
rcList, err := client.ReplicationControllers(namespace).List(s.selector)
|
||||
if err != nil {
|
||||
glog.Errorf("could not get replication controllers: %v", err)
|
||||
return
|
||||
}
|
||||
for _, rc := range rcList.Items {
|
||||
rc.Spec.Replicas = c
|
||||
if _, err = client.ReplicationControllers(namespace).Update(&rc); err != nil {
|
||||
glog.Errorf("unable to scale replication controller: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scaler) timeOffset() time.Duration {
|
||||
return time.Since(s.start) % dayPeriod
|
||||
}
|
||||
|
||||
func (s *Scaler) curpos(offset time.Duration) int {
|
||||
return findPos(s.timeCounts, s.pos, offset)
|
||||
}
|
||||
|
||||
func (s *Scaler) scale() {
|
||||
for {
|
||||
select {
|
||||
case <-s.done:
|
||||
return
|
||||
default:
|
||||
offset := s.timeOffset()
|
||||
s.pos = s.curpos(offset)
|
||||
if s.timeCounts[s.pos].time < offset {
|
||||
time.Sleep(dayPeriod - offset)
|
||||
continue
|
||||
}
|
||||
time.Sleep(s.timeCounts[s.pos].time - offset)
|
||||
s.setCount(s.timeCounts[s.pos].count)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scaler) Start() error {
|
||||
now := time.Now().UTC()
|
||||
s.start = time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location())
|
||||
if *startNow {
|
||||
s.start = now
|
||||
}
|
||||
|
||||
// set initial count
|
||||
pos := s.curpos(s.timeOffset())
|
||||
// add the len to avoid getting a negative index
|
||||
pos = (pos - 1 + len(s.timeCounts)) % len(s.timeCounts)
|
||||
s.setCount(s.timeCounts[pos].count)
|
||||
|
||||
s.done = make(chan struct{})
|
||||
go s.scale()
|
||||
return nil
|
||||
}
|
||||
|
||||
func safeclose(c chan<- struct{}) (err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = e.(error)
|
||||
}
|
||||
}()
|
||||
close(c)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Scaler) Stop() error {
|
||||
if err := safeclose(s.done); err != nil {
|
||||
return errors.New("already stopped scaling")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
counts = flag.String("counts", "", "replica counts, must have at least one (csv)")
|
||||
times = flag.String("times", "", "times to set replica counts relative to UTC following ISO 8601 (csv)")
|
||||
userLabels = flag.String("labels", "", "replication controller labels, syntax should follow https://godoc.org/k8s.io/kubernetes/pkg/labels#Parse")
|
||||
startNow = flag.Bool("now", false, "times are relative to now not 0:00 UTC (for demos)")
|
||||
local = flag.Bool("local", false, "set to true if running on local machine not within cluster")
|
||||
localPort = flag.Int("localport", 8001, "port that kubectl proxy is running on (local must be true)")
|
||||
|
||||
namespace string = os.Getenv("POD_NAMESPACE")
|
||||
|
||||
client *kclient.Client
|
||||
)
|
||||
|
||||
const usageNotes = `
|
||||
counts and times must both be set and be of equal length. Example usage:
|
||||
diurnal -labels name=redis-slave -times 00:00:00Z,06:00:00Z -counts 3,9
|
||||
diurnal -labels name=redis-slave -times 0600-0500,0900-0500,1700-0500,2200-0500 -counts 15,20,13,6
|
||||
`
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
fmt.Fprint(os.Stderr, usageNotes)
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
var (
|
||||
cfg *kclient.Config
|
||||
err error
|
||||
)
|
||||
if *local {
|
||||
cfg = &kclient.Config{Host: fmt.Sprintf("http://localhost:%d", *localPort)}
|
||||
} else {
|
||||
cfg, err = kclient.InClusterConfig()
|
||||
if err != nil {
|
||||
glog.Errorf("failed to load config: %v", err)
|
||||
flag.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
client, err = kclient.New(cfg)
|
||||
|
||||
selector, err := labels.Parse(*userLabels)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
tc, err := parseTimeCounts(*times, *counts)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
if namespace == "" {
|
||||
glog.Fatal("POD_NAMESPACE is not set. Set to the namespace of the replication controller if running locally.")
|
||||
}
|
||||
scaler := Scaler{timeCounts: tc, selector: selector}
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan,
|
||||
syscall.SIGHUP,
|
||||
syscall.SIGINT,
|
||||
syscall.SIGQUIT,
|
||||
syscall.SIGTERM)
|
||||
|
||||
glog.Info("starting scaling")
|
||||
if err := scaler.Start(); err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
<-sigChan
|
||||
glog.Info("stopping scaling")
|
||||
if err := scaler.Stop(); err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
}
|
@ -1,100 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func equalsTimeCounts(a, b []timeCount) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if a[i].time != b[i].time || a[i].count != b[i].count {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func TestParseTimeCounts(t *testing.T) {
|
||||
cases := []struct {
|
||||
times string
|
||||
counts string
|
||||
out []timeCount
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
"00:00:01Z,00:02Z,03:00Z,04:00Z", "1,4,1,8", []timeCount{
|
||||
{time.Second, 1},
|
||||
{2 * time.Minute, 4},
|
||||
{3 * time.Hour, 1},
|
||||
{4 * time.Hour, 8},
|
||||
}, false,
|
||||
},
|
||||
{
|
||||
"00:01Z,00:02Z,00:05Z,00:03Z", "1,2,3,4", []timeCount{
|
||||
{1 * time.Minute, 1},
|
||||
{2 * time.Minute, 2},
|
||||
{3 * time.Minute, 4},
|
||||
{5 * time.Minute, 3},
|
||||
}, false,
|
||||
},
|
||||
{"00:00Z,00:01Z", "1,0", []timeCount{{0, 1}, {1 * time.Minute, 0}}, false},
|
||||
{"00:00+00,00:01+00:00,01:00Z", "0,-1,0", nil, true},
|
||||
{"-00:01Z,01:00Z", "0,1", nil, true},
|
||||
{"00:00Z", "1,2,3", nil, true},
|
||||
}
|
||||
for i, test := range cases {
|
||||
out, err := parseTimeCounts(test.times, test.counts)
|
||||
if test.err && err == nil {
|
||||
t.Errorf("case %d: expected error", i)
|
||||
} else if !test.err && err != nil {
|
||||
t.Errorf("case %d: unexpected error: %v", i, err)
|
||||
}
|
||||
if !test.err {
|
||||
if !equalsTimeCounts(test.out, out) {
|
||||
t.Errorf("case %d: expected timeCounts: %v got %v", i, test.out, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindPos(t *testing.T) {
|
||||
cases := []struct {
|
||||
tc []timeCount
|
||||
cur int
|
||||
offset time.Duration
|
||||
expected int
|
||||
}{
|
||||
{[]timeCount{{0, 1}, {4, 0}}, 1, 1, 1},
|
||||
{[]timeCount{{0, 1}, {4, 0}}, 0, 1, 1},
|
||||
{[]timeCount{{0, 1}, {4, 0}}, 1, 70, 0},
|
||||
{[]timeCount{{5, 1}, {100, 9000}, {4000, 2}, {10000, 4}}, 0, 0, 0},
|
||||
{[]timeCount{{5, 1}, {100, 9000}, {4000, 2}, {10000, 4}}, 1, 5000, 3},
|
||||
{[]timeCount{{5, 1}, {100, 9000}, {4000, 2}, {10000, 4}}, 2, 10000000, 0},
|
||||
{[]timeCount{{5, 1}, {100, 9000}, {4000, 2}, {10000, 4}}, 0, 50, 1},
|
||||
}
|
||||
for i, test := range cases {
|
||||
pos := findPos(test.tc, test.cur, test.offset)
|
||||
if pos != test.expected {
|
||||
t.Errorf("case %d: expected %d got %d", i, test.expected, pos)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,27 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
labels:
|
||||
name: diurnal-controller
|
||||
name: diurnal-controller
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
name: diurnal-controller
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: diurnal-controller
|
||||
spec:
|
||||
containers:
|
||||
- args: ["-labels", "name=redis-slave", "-times", "00:00Z,00:02Z,01:00Z,02:30Z", "-counts", "3,7,6,9"]
|
||||
resources:
|
||||
limits:
|
||||
cpu: 0.1
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
image: uluyol/kube-diurnal:0.5
|
||||
name: diurnal-controller
|
@ -1,226 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
type parseTimeState int
|
||||
|
||||
const (
|
||||
sHour parseTimeState = iota + 1
|
||||
sMinute
|
||||
sSecond
|
||||
sUTC
|
||||
sOffHour
|
||||
sOffMinute
|
||||
)
|
||||
|
||||
var parseTimeStateString = map[parseTimeState]string{
|
||||
sHour: "hour",
|
||||
sMinute: "minute",
|
||||
sSecond: "second",
|
||||
sUTC: "UTC",
|
||||
sOffHour: "offset hour",
|
||||
sOffMinute: "offset minute",
|
||||
}
|
||||
|
||||
type timeParseErr struct {
|
||||
state parseTimeState
|
||||
}
|
||||
|
||||
func (t timeParseErr) Error() string {
|
||||
return "expected two digits for " + parseTimeStateString[t.state]
|
||||
}
|
||||
|
||||
func getTwoDigits(s string) (int, bool) {
|
||||
if len(s) >= 2 && '0' <= s[0] && s[0] <= '9' && '0' <= s[1] && s[1] <= '9' {
|
||||
return int(s[0]-'0')*10 + int(s[1]-'0'), true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func zoneChar(b byte) bool {
|
||||
return b == 'Z' || b == '+' || b == '-'
|
||||
}
|
||||
|
||||
func validate(x, min, max int, name string) error {
|
||||
if x < min || max < x {
|
||||
return fmt.Errorf("the %s must be within the range %d...%d", name, min, max)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type triState int
|
||||
|
||||
const (
|
||||
unset triState = iota
|
||||
setFalse
|
||||
setTrue
|
||||
)
|
||||
|
||||
// parseTimeISO8601 parses times (without dates) according to the ISO 8601
|
||||
// standard. The standard time package can understand layouts which accept
|
||||
// valid ISO 8601 input. However, these layouts also accept input which is
|
||||
// not valid ISO 8601 (in particular, negative zero time offset or "-00").
|
||||
// Furthermore, there are a number of acceptable layouts, and to handle
|
||||
// all of them using the time package requires trying them one at a time.
|
||||
// This is error-prone, slow, not obviously correct, and again, allows
|
||||
// a wider range of input to be accepted than is desirable. For these
|
||||
// reasons, we implement ISO 8601 parsing without the use of the time
|
||||
// package.
|
||||
func parseTimeISO8601(s string) (time.Time, error) {
|
||||
theTime := struct {
|
||||
hour int
|
||||
minute int
|
||||
second int
|
||||
utc triState
|
||||
offNeg bool
|
||||
offHour int
|
||||
offMinute int
|
||||
}{}
|
||||
state := sHour
|
||||
isExtended := false
|
||||
for s != "" {
|
||||
switch state {
|
||||
case sHour:
|
||||
v, ok := getTwoDigits(s)
|
||||
if !ok {
|
||||
return time.Time{}, timeParseErr{state}
|
||||
}
|
||||
theTime.hour = v
|
||||
s = s[2:]
|
||||
case sMinute:
|
||||
if !zoneChar(s[0]) {
|
||||
if s[0] == ':' {
|
||||
isExtended = true
|
||||
s = s[1:]
|
||||
}
|
||||
v, ok := getTwoDigits(s)
|
||||
if !ok {
|
||||
return time.Time{}, timeParseErr{state}
|
||||
}
|
||||
theTime.minute = v
|
||||
s = s[2:]
|
||||
}
|
||||
case sSecond:
|
||||
if !zoneChar(s[0]) {
|
||||
if s[0] == ':' {
|
||||
if isExtended {
|
||||
s = s[1:]
|
||||
} else {
|
||||
return time.Time{}, errors.New("unexpected ':' before 'second' value")
|
||||
}
|
||||
} else if isExtended {
|
||||
return time.Time{}, errors.New("expected ':' before 'second' value")
|
||||
}
|
||||
v, ok := getTwoDigits(s)
|
||||
if !ok {
|
||||
return time.Time{}, timeParseErr{state}
|
||||
}
|
||||
theTime.second = v
|
||||
s = s[2:]
|
||||
}
|
||||
case sUTC:
|
||||
if s[0] == 'Z' {
|
||||
theTime.utc = setTrue
|
||||
s = s[1:]
|
||||
} else {
|
||||
theTime.utc = setFalse
|
||||
}
|
||||
case sOffHour:
|
||||
if theTime.utc == setTrue {
|
||||
return time.Time{}, errors.New("unexpected offset, already specified UTC")
|
||||
}
|
||||
var sign int
|
||||
if s[0] == '+' {
|
||||
sign = 1
|
||||
} else if s[0] == '-' {
|
||||
sign = -1
|
||||
theTime.offNeg = true
|
||||
} else {
|
||||
return time.Time{}, errors.New("offset must begin with '+' or '-'")
|
||||
}
|
||||
s = s[1:]
|
||||
v, ok := getTwoDigits(s)
|
||||
if !ok {
|
||||
return time.Time{}, timeParseErr{state}
|
||||
}
|
||||
theTime.offHour = sign * v
|
||||
s = s[2:]
|
||||
case sOffMinute:
|
||||
if s[0] == ':' {
|
||||
if isExtended {
|
||||
s = s[1:]
|
||||
} else {
|
||||
return time.Time{}, errors.New("unexpected ':' before 'minute' value")
|
||||
}
|
||||
} else if isExtended {
|
||||
return time.Time{}, errors.New("expected ':' before 'second' value")
|
||||
}
|
||||
v, ok := getTwoDigits(s)
|
||||
if !ok {
|
||||
return time.Time{}, timeParseErr{state}
|
||||
}
|
||||
theTime.offMinute = v
|
||||
s = s[2:]
|
||||
default:
|
||||
return time.Time{}, errors.New("an unknown error occurred")
|
||||
}
|
||||
state++
|
||||
}
|
||||
if err := validate(theTime.hour, 0, 23, "hour"); err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
if err := validate(theTime.minute, 0, 59, "minute"); err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
if err := validate(theTime.second, 0, 59, "second"); err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
if err := validate(theTime.offHour, -12, 14, "offset hour"); err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
if err := validate(theTime.offMinute, 0, 59, "offset minute"); err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
if theTime.offNeg && theTime.offHour == 0 && theTime.offMinute == 0 {
|
||||
return time.Time{}, errors.New("an offset of -00 may not be used, must use +00")
|
||||
}
|
||||
var (
|
||||
loc *time.Location
|
||||
err error
|
||||
)
|
||||
if theTime.utc == setTrue {
|
||||
loc, err = time.LoadLocation("UTC")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else if theTime.utc == setFalse {
|
||||
loc = time.FixedZone("Zone", theTime.offMinute*60+theTime.offHour*3600)
|
||||
} else {
|
||||
loc, err = time.LoadLocation("Local")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
t := time.Date(1, time.January, 1, theTime.hour, theTime.minute, theTime.second, 0, loc)
|
||||
return t, nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user