diff --git a/cluster/saltbase/pillar/systemd.sls b/cluster/saltbase/pillar/systemd.sls new file mode 100644 index 00000000000..83f4804f0b2 --- /dev/null +++ b/cluster/saltbase/pillar/systemd.sls @@ -0,0 +1,9 @@ +{% if grains['oscodename'] in [ 'vivid', 'jessie' ] %} +is_systemd: True +systemd_system_path: /lib/systemd/system +{% elif grains['os_family'] == 'RedHat' %} +is_systemd: True +systemd_system_path: /usr/lib/systemd/system +{% else %} +is_systemd: False +{% endif %} diff --git a/cluster/saltbase/pillar/top.sls b/cluster/saltbase/pillar/top.sls index 273eb165f02..d9ad7dbfc34 100644 --- a/cluster/saltbase/pillar/top.sls +++ b/cluster/saltbase/pillar/top.sls @@ -5,3 +5,4 @@ base: - logging - docker-images - privilege + - systemd diff --git a/cluster/saltbase/salt/docker/docker-defaults b/cluster/saltbase/salt/docker/docker-defaults index f325b4945d5..4c1aee2c6cf 100644 --- a/cluster/saltbase/salt/docker/docker-defaults +++ b/cluster/saltbase/salt/docker/docker-defaults @@ -1,6 +1,3 @@ -DOCKER_OPTS="" -{% if grains.docker_opts is defined and grains.docker_opts %} -DOCKER_OPTS="${DOCKER_OPTS} {{grains.docker_opts}}" -{% endif %} -DOCKER_OPTS="${DOCKER_OPTS} --bridge=cbr0 --iptables=false --ip-masq=false" +{% set grains_opts = grains.get('docker_opts', '') -%} +DOCKER_OPTS="{{grains_opts}} --bridge=cbr0 --iptables=false --ip-masq=false" DOCKER_NOFILE=1000000 diff --git a/cluster/saltbase/salt/docker/docker.service b/cluster/saltbase/salt/docker/docker.service new file mode 100644 index 00000000000..ee6c377e56e --- /dev/null +++ b/cluster/saltbase/salt/docker/docker.service @@ -0,0 +1,20 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=http://docs.docker.com +After=network.target docker.socket +Requires=docker.socket + +[Service] +EnvironmentFile={{ environment_file }} +ExecStart=/usr/bin/docker -d -H fd:// "$DOCKER_OPTS" +MountFlags=slave +LimitNOFILE=1048576 +LimitNPROC=1048576 +LimitCORE=infinity +Restart=always +RestartSec=2s +StartLimitInterval=0 + +[Install] +WantedBy=multi-user.target + diff --git a/cluster/saltbase/salt/docker/init.sls b/cluster/saltbase/salt/docker/init.sls index e425f32e708..be9e0558380 100644 --- a/cluster/saltbase/salt/docker/init.sls +++ b/cluster/saltbase/salt/docker/init.sls @@ -1,4 +1,4 @@ -{% if grains['os_family'] == 'RedHat' %} +{% if pillar.get('is_systemd') %} {% set environment_file = '/etc/sysconfig/docker' %} {% else %} {% set environment_file = '/etc/default/docker' %} @@ -116,6 +116,36 @@ lxc-docker-{{ override_docker_ver }}: - file: /var/cache/docker-install/{{ override_deb }} {% endif %} # end override_docker_ver != '' +# Default docker systemd unit file doesn't use an EnvironmentFile; replace it with one that does. +{% if pillar.get('is_systemd') %} + +{{ pillar.get('systemd_system_path') }}/docker.service: + file.managed: + - source: salt://docker/docker.service + - template: jinja + - user: root + - group: root + - mode: 644 + - defaults: + environment_file: {{ environment_file }} + +# The docker service.running block below doesn't work reliably +# Instead we run our script which e.g. does a systemd daemon-reload +# But we keep the service block below, so it can be used by dependencies +# TODO: Fix this +fix-service-docker: + cmd.wait: + - name: /opt/kubernetes/helpers/services bounce docker + - watch: + - file: {{ pillar.get('systemd_system_path') }}/docker.service + - file: {{ environment_file }} +{% if override_docker_ver != '' %} + - require: + - pkg: lxc-docker-{{ override_docker_ver }} +{% endif %} + +{% endif %} + docker: service.running: # Starting Docker is racy on aws for some reason. To be honest, since Monit diff --git a/cluster/saltbase/salt/etcd/init.sls b/cluster/saltbase/salt/etcd/init.sls index 45310cfe60d..ca828bca94b 100644 --- a/cluster/saltbase/salt/etcd/init.sls +++ b/cluster/saltbase/salt/etcd/init.sls @@ -24,9 +24,11 @@ delete_etcd_default: file.absent: - name: /etc/default/etcd +{% if pillar.get('is_systemd') %} delete_etcd_service_file: file.absent: - - name: /usr/lib/systemd/system/etcd.service + - name: {{ pillar.get('systemd_system_path') }}/etcd.service +{% endif %} delete_etcd_initd: file.absent: diff --git a/cluster/saltbase/salt/kube-addons/init.sls b/cluster/saltbase/salt/kube-addons/init.sls index c5a7b31831c..46cba18b029 100644 --- a/cluster/saltbase/salt/kube-addons/init.sls +++ b/cluster/saltbase/salt/kube-addons/init.sls @@ -119,13 +119,17 @@ addon-dir-create: - group: root - mode: 755 -{% if grains['os_family'] == 'RedHat' %} +{% if pillar.get('is_systemd') %} -/usr/lib/systemd/system/kube-addons.service: +{{ pillar.get('systemd_system_path') }}/kube-addons.service: file.managed: - source: salt://kube-addons/kube-addons.service - user: root - group: root + cmd.wait: + - name: /opt/kubernetes/helpers/services bounce kube-addons + - watch: + - file: {{ pillar.get('systemd_system_path') }}/kube-addons.service {% else %} @@ -136,8 +140,6 @@ addon-dir-create: - group: root - mode: 755 -{% endif %} - # Stop kube-addons service each time salt is executed, just in case # there was a modification of addons. # Actually, this should be handled by watching file changes, but @@ -151,3 +153,5 @@ kube-addons: - enable: True - require: - service: service-kube-addon-stop + +{% endif %} diff --git a/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest b/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest index b8d7db0f92e..34d91da223b 100644 --- a/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest +++ b/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest @@ -1,5 +1,5 @@ {% set daemon_args = "$DAEMON_ARGS" -%} -{% if grains['os_family'] == 'RedHat' -%} +{% if pillar.get('is_systemd') -%} {% set daemon_args = "" -%} {% endif -%} diff --git a/cluster/saltbase/salt/kube-master-addons/init.sls b/cluster/saltbase/salt/kube-master-addons/init.sls index 91186052205..572ce3c1ce3 100644 --- a/cluster/saltbase/salt/kube-master-addons/init.sls +++ b/cluster/saltbase/salt/kube-master-addons/init.sls @@ -5,25 +5,6 @@ - group: root - mode: 755 -{% if grains['os_family'] == 'RedHat' %} - -/usr/lib/systemd/system/kube-master-addons.service: - file.managed: - - source: salt://kube-master-addons/kube-master-addons.service - - user: root - - group: root - -{% else %} - -/etc/init.d/kube-master-addons: - file.managed: - - source: salt://kube-master-addons/initd - - user: root - - group: root - - mode: 755 - -{% endif %} - # Used to restart kube-master-addons service each time salt is run # Actually, it doens't work (the service is not restarted), # but master-addon service always terminates after it does it job, @@ -37,6 +18,42 @@ master-docker-image-tags: file.touch: - name: /srv/pillar/docker-images.sls +{% if pillar.get('is_systemd') %} + +{{ pillar.get('systemd_system_path') }}/kube-master-addons.service: + file.managed: + - source: salt://kube-master-addons/kube-master-addons.service + - user: root + - group: root + cmd.wait: + - name: /opt/kubernetes/helpers/services bounce kube-master-addons + - watch: + - file: master-docker-image-tags + - file: /etc/kubernetes/kube-master-addons.sh + - file: {{ pillar.get('systemd_system_path') }}/kube-master-addons.service + +{% else %} + +/etc/init.d/kube-master-addons: + file.managed: + - source: salt://kube-master-addons/initd + - user: root + - group: root + - mode: 755 + +# Current containervm image by default has both docker and kubelet +# running. But during cluster creation stage, docker and kubelet +# could be overwritten completely, or restarted due to flag changes. +# The ordering of salt states for service docker, kubelet and +# master-addon below is very important to avoid the race between +# salt restart docker or kubelet and kubelet start master components. +# Without the ordering of salt states, when gce instance boot up, +# configure-vm.sh will run and download the release. At the end of +# boot, run-salt will run kube-master-addons service which installs +# master component manifest files to kubelet config directory before +# the installation of proper version kubelet. Please see +# https://github.com/GoogleCloudPlatform/kubernetes/issues/10122#issuecomment-114566063 +# for detail explanation on this very issue. kube-master-addons: service.running: - enable: True @@ -44,3 +61,5 @@ kube-master-addons: - watch: - file: master-docker-image-tags - file: /etc/kubernetes/kube-master-addons.sh + +{% endif %} diff --git a/cluster/saltbase/salt/kube-proxy/default b/cluster/saltbase/salt/kube-proxy/default index 84d9b7232e3..7d7a9470c37 100644 --- a/cluster/saltbase/salt/kube-proxy/default +++ b/cluster/saltbase/salt/kube-proxy/default @@ -1,5 +1,5 @@ {% set daemon_args = "$DAEMON_ARGS" -%} -{% if grains['os_family'] == 'RedHat' -%} +{% if pillar.get('is_systemd') -%} {% set daemon_args = "" -%} {% endif -%} {# TODO(azure-maintainer): add support for distributing kubeconfig with token to kube-proxy #} diff --git a/cluster/saltbase/salt/kube-proxy/init.sls b/cluster/saltbase/salt/kube-proxy/init.sls index 3d54452b2d2..7dabce92a2e 100644 --- a/cluster/saltbase/salt/kube-proxy/init.sls +++ b/cluster/saltbase/salt/kube-proxy/init.sls @@ -1,4 +1,4 @@ -{% if grains['os_family'] == 'RedHat' %} +{% if pillar.get('is_systemd') %} {% set environment_file = '/etc/sysconfig/kube-proxy' %} {% else %} {% set environment_file = '/etc/default/kube-proxy' %} @@ -11,25 +11,6 @@ - group: root - mode: 755 -{% if grains['os_family'] == 'RedHat' %} - -/usr/lib/systemd/system/kube-proxy.service: - file.managed: - - source: salt://kube-proxy/kube-proxy.service - - user: root - - group: root - -{% else %} - -/etc/init.d/kube-proxy: - file.managed: - - source: salt://kube-proxy/initd - - user: root - - group: root - - mode: 755 - -{% endif %} - {{ environment_file }}: file.managed: - source: salt://kube-proxy/default @@ -48,15 +29,41 @@ kube-proxy: - home: /var/kube-proxy - require: - group: kube-proxy + +{% if pillar.get('is_systemd') %} + +{{ pillar.get('systemd_system_path') }}/kube-proxy.service: + file.managed: + - source: salt://kube-proxy/kube-proxy.service + - user: root + - group: root + cmd.wait: + - name: /opt/kubernetes/helpers/services bounce kube-proxy + - watch: + - file: {{ environment_file }} + - file: {{ pillar.get('systemd_system_path') }}/kube-proxy.service + - file: /var/lib/kube-proxy/kubeconfig + +{% else %} + +/etc/init.d/kube-proxy: + file.managed: + - source: salt://kube-proxy/initd + - user: root + - group: root + - mode: 755 + +kube-proxy-service: service.running: + - name: kube-proxy - enable: True - watch: - file: {{ environment_file }} -{% if grains['os_family'] != 'RedHat' %} - file: /etc/init.d/kube-proxy -{% endif %} - file: /var/lib/kube-proxy/kubeconfig +{% endif %} + /var/lib/kube-proxy/kubeconfig: file.managed: - source: salt://kube-proxy/kubeconfig diff --git a/cluster/saltbase/salt/kubelet/default b/cluster/saltbase/salt/kubelet/default index 40c3d2b9893..7ae38e1284a 100644 --- a/cluster/saltbase/salt/kubelet/default +++ b/cluster/saltbase/salt/kubelet/default @@ -1,5 +1,5 @@ {% set daemon_args = "$DAEMON_ARGS" -%} -{% if grains['os_family'] == 'RedHat' -%} +{% if pillar.get('is_systemd') -%} {% set daemon_args = "" -%} {% endif -%} diff --git a/cluster/saltbase/salt/kubelet/init.sls b/cluster/saltbase/salt/kubelet/init.sls index 8a8838a46a4..2dd0399a71d 100644 --- a/cluster/saltbase/salt/kubelet/init.sls +++ b/cluster/saltbase/salt/kubelet/init.sls @@ -1,4 +1,4 @@ -{% if grains['os_family'] == 'RedHat' %} +{% if pillar.get('is_systemd') %} {% set environment_file = '/etc/sysconfig/kubelet' %} {% else %} {% set environment_file = '/etc/default/kubelet' %} @@ -19,25 +19,6 @@ - group: root - mode: 755 -{% if grains['os_family'] == 'RedHat' %} - -/usr/lib/systemd/system/kubelet.service: - file.managed: - - source: salt://kubelet/kubelet.service - - user: root - - group: root - -{% else %} - -/etc/init.d/kubelet: - file.managed: - - source: salt://kubelet/initd - - user: root - - group: root - - mode: 755 - -{% endif %} - # The default here is that this file is blank. If this is the case, the kubelet # won't be able to parse it as JSON and will try to use the kubernetes_auth file # instead. You'll see a single error line in the kubelet start up file @@ -64,12 +45,46 @@ - mode: 400 - makedirs: true +{% if pillar.get('is_systemd') %} + +{{ pillar.get('systemd_system_path') }}/kubelet.service: + file.managed: + - source: salt://kubelet/kubelet.service + - user: root + - group: root + +# The service.running block below doesn't work reliably +# Instead we run our script which e.g. does a systemd daemon-reload +# But we keep the service block below, so it can be used by dependencies +# TODO: Fix this +fix-service-kubelet: + cmd.wait: + - name: /opt/kubernetes/helpers/services bounce kubelet + - watch: + - file: /usr/local/bin/kubelet + - file: {{ pillar.get('systemd_system_path') }}/kubelet.service + - file: {{ environment_file }} + - file: /var/lib/kubelet/kubernetes_auth + +{% else %} + +/etc/init.d/kubelet: + file.managed: + - source: salt://kubelet/initd + - user: root + - group: root + - mode: 755 + +{% endif %} + kubelet: service.running: - enable: True - watch: - file: /usr/local/bin/kubelet -{% if grains['os_family'] != 'RedHat' %} +{% if pillar.get('is_systemd') %} + - file: {{ pillar.get('systemd_system_path') }}/kubelet.service +{% else %} - file: /etc/init.d/kubelet {% endif %} {% if grains['os_family'] == 'RedHat' %} diff --git a/cluster/saltbase/salt/monit/init.sls b/cluster/saltbase/salt/monit/init.sls index 9a189e4e8da..97a35ca3b08 100644 --- a/cluster/saltbase/salt/monit/init.sls +++ b/cluster/saltbase/salt/monit/init.sls @@ -1,4 +1,4 @@ -{% if grains['os_family'] != 'RedHat' %} +{% if not pillar.get('is_systemd') %} monit: pkg: diff --git a/cluster/saltbase/salt/salt-helpers/init.sls b/cluster/saltbase/salt/salt-helpers/init.sls new file mode 100644 index 00000000000..5298e4f63a3 --- /dev/null +++ b/cluster/saltbase/salt/salt-helpers/init.sls @@ -0,0 +1,15 @@ +{% if pillar.get('is_systemd') %} +/opt/kubernetes/helpers: + file.directory: + - user: root + - group: root + - makedirs: True + - dir_mode: 755 + +/opt/kubernetes/helpers/services: + file.managed: + - source: salt://salt-helpers/services + - user: root + - group: root + - mode: 755 +{% endif %} diff --git a/cluster/saltbase/salt/salt-helpers/services b/cluster/saltbase/salt/salt-helpers/services new file mode 100644 index 00000000000..c1ff8a07791 --- /dev/null +++ b/cluster/saltbase/salt/salt-helpers/services @@ -0,0 +1,67 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +ACTION=${1} +SERVICE=${2} + +if [[ -z "${ACTION}" || -z "${SERVICE}" ]]; then + echo "Syntax: ${0} " + exit 1 +fi + + +function reload_state() { + systemctl daemon-reload +} + +function start_service() { + systemctl start ${SERVICE} +} + +function stop_service() { + systemctl stop ${SERVICE} +} + +function enable_service() { + systemctl enable ${SERVICE} +} + +function disable_service() { + systemctl disable ${SERVICE} +} + +function restart_service() { + systemctl restart ${SERVICE} +} + +if [[ "${ACTION}" == "up" ]]; then + reload_state + enable_service + start_service +elif [[ "${ACTION}" == "bounce" ]]; then + reload_state + enable_service + restart_service +elif [[ "${ACTION}" == "down" ]]; then + reload_state + disable_service + stop_service +else + echo "Unknown action: ${ACTION}" + exit 1 +fi diff --git a/cluster/saltbase/salt/top.sls b/cluster/saltbase/salt/top.sls index 30c42dd4538..de1e711d905 100644 --- a/cluster/saltbase/salt/top.sls +++ b/cluster/saltbase/salt/top.sls @@ -2,6 +2,7 @@ base: '*': - base - debian-auto-upgrades + - salt-helpers 'roles:kubernetes-pool': - match: grain