diff --git a/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest b/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest index 1202be4269a..1191e0766dc 100644 --- a/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest +++ b/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest @@ -9,7 +9,9 @@ {% set cloud_config_volume = "" -%} {% if grains.cloud is defined -%} - {% set cloud_provider = "--cloud-provider=" + grains.cloud -%} + {% if grains.cloud != 'vagrant' -%} + {% set cloud_provider = "--cloud-provider=" + grains.cloud -%} + {% endif -%} {% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%} {% set cloud_config = "--cloud-config=" + grains.cloud_config -%} diff --git a/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest b/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest index d3df6419226..bd6e454edd9 100644 --- a/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest +++ b/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest @@ -27,7 +27,9 @@ {% set cloud_config_volume = "" -%} {% if grains.cloud is defined -%} - {% set cloud_provider = "--cloud-provider=" + grains.cloud -%} + {% if grains.cloud != 'vagrant' -%} + {% set cloud_provider = "--cloud-provider=" + grains.cloud -%} + {% endif -%} {% set service_account_key = " --service-account-private-key-file=/srv/kubernetes/server.key " -%} {% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%} diff --git a/cluster/saltbase/salt/kube-node-unpacker/init.sls b/cluster/saltbase/salt/kube-node-unpacker/init.sls index eb711a14094..db93b91755d 100644 --- a/cluster/saltbase/salt/kube-node-unpacker/init.sls +++ b/cluster/saltbase/salt/kube-node-unpacker/init.sls @@ -1,6 +1,7 @@ /etc/kubernetes/kube-node-unpacker.sh: file.managed: - source: salt://kube-node-unpacker/kube-node-unpacker.sh + - makedirs: True - user: root - group: root - mode: 755 diff --git a/cluster/saltbase/salt/kubelet/default b/cluster/saltbase/salt/kubelet/default index c642e4acc9f..bca9340676f 100644 --- a/cluster/saltbase/salt/kubelet/default +++ b/cluster/saltbase/salt/kubelet/default @@ -46,7 +46,7 @@ {% endif -%} {% set cloud_provider = "" -%} -{% if grains.cloud is defined -%} +{% if grains.cloud is defined and grains.cloud != 'vagrant' -%} {% set cloud_provider = "--cloud-provider=" + grains.cloud -%} {% endif -%} diff --git a/cluster/vagrant/provision-master.sh b/cluster/vagrant/provision-master.sh index 5a16929087b..366d7731802 100755 --- a/cluster/vagrant/provision-master.sh +++ b/cluster/vagrant/provision-master.sh @@ -14,8 +14,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -# exit on any error -set -e +set -o errexit +set -o nounset +set -o pipefail # Set the host name explicitly # See: https://github.com/mitchellh/vagrant/issues/2430 @@ -47,26 +48,6 @@ function release_not_found() { exit 1 } -# Look for our precompiled binary releases. When running from a source repo, -# these are generated under _output. When running from an release tarball these -# are under ./server. -server_binary_tar="/vagrant/server/kubernetes-server-linux-amd64.tar.gz" -if [[ ! -f "$server_binary_tar" ]]; then - server_binary_tar="/vagrant/_output/release-tars/kubernetes-server-linux-amd64.tar.gz" -fi -if [[ ! -f "$server_binary_tar" ]]; then - release_not_found -fi - -salt_tar="/vagrant/server/kubernetes-salt.tar.gz" -if [[ ! -f "$salt_tar" ]]; then - salt_tar="/vagrant/_output/release-tars/kubernetes-salt.tar.gz" -fi -if [[ ! -f "$salt_tar" ]]; then - release_not_found -fi - - # Setup hosts file to support ping by hostname to each minion in the cluster from apiserver for (( i=0; i<${#NODE_NAMES[@]}; i++)); do minion=${NODE_NAMES[$i]} @@ -82,87 +63,7 @@ echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts # Configure the master network provision-network-master -# Update salt configuration -mkdir -p /etc/salt/minion.d -cat </etc/salt/minion.d/master.conf -master: '$(echo "$MASTER_NAME" | sed -e "s/'/''/g")' -auth_timeout: 10 -auth_tries: 2 -auth_safemode: True -ping_interval: 1 -random_reauth_delay: 3 -state_aggregrate: - - pkg -EOF - -cat </etc/salt/minion.d/grains.conf -grains: - node_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' - publicAddressOverride: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' - network_mode: openvswitch - networkInterfaceName: '$(echo "$NETWORK_IF_NAME" | sed -e "s/'/''/g")' - api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' - cloud: vagrant - roles: - - kubernetes-master - runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")' - docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")' - master_extra_sans: '$(echo "$MASTER_EXTRA_SANS" | sed -e "s/'/''/g")' - keep_host_etcd: true -EOF - -mkdir -p /srv/salt-overlay/pillar -cat </srv/salt-overlay/pillar/cluster-params.sls - service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' - cert_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' - enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")' - enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")' - enable_cluster_ui: '$(echo "$ENABLE_CLUSTER_UI" | sed -e "s/'/''/g")' - enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")' - logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")' - elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")' - enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")' - dns_replicas: '$(echo "$DNS_REPLICAS" | sed -e "s/'/''/g")' - dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")' - dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")' - instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")' - admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")' - enable_cpu_cfs_quota: '$(echo "$ENABLE_CPU_CFS_QUOTA" | sed -e "s/'/''/g")' - network_provider: '$(echo "$NETWORK_PROVIDER" | sed -e "s/'/''/g")' - opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG" | sed -e "s/'/''/g")' - opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG" | sed -e "s/'/''/g")' - opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET" | sed -e "s/'/''/g")' - e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")' -EOF - -# Configure the salt-master -# Auto accept all keys from minions that try to join -mkdir -p /etc/salt/master.d -cat </etc/salt/master.d/auto-accept.conf -open_mode: True -auto_accept: True -EOF - -cat </etc/salt/master.d/reactor.conf -# React to new minions starting by running highstate on them. -reactor: - - 'salt/minion/*/start': - - /srv/reactor/highstate-new.sls -EOF - -cat </etc/salt/master.d/salt-output.conf -# Minimize the amount of output to terminal -state_verbose: False -state_output: mixed -log_level: debug -log_level_logfile: debug -EOF - -cat </etc/salt/minion.d/log-level-debug.conf -log_level: debug -log_level_logfile: debug -EOF - +write-salt-config kubernetes-master # Generate and distribute a shared secret (bearer token) to # apiserver and kubelet so that kubelet can authenticate to @@ -179,57 +80,9 @@ if [[ ! -f "${known_tokens_file}" ]]; then mkdir -p /srv/salt-overlay/salt/kubelet kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth" (umask u=rw,go= ; echo "{\"BearerToken\": \"$KUBELET_TOKEN\", \"Insecure\": true }" > $kubelet_auth_file) - kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig" - - mkdir -p /srv/salt-overlay/salt/kubelet - (umask 077; - cat > "${kubelet_kubeconfig_file}" << EOF -apiVersion: v1 -kind: Config -clusters: -- cluster: - insecure-skip-tls-verify: true - name: local -contexts: -- context: - cluster: local - user: kubelet - name: service-account-context -current-context: service-account-context -users: -- name: kubelet - user: - token: ${KUBELET_TOKEN} -EOF -) - - - mkdir -p /srv/salt-overlay/salt/kube-proxy - kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig" - # Make a kubeconfig file with the token. - # TODO(etune): put apiserver certs into secret too, and reference from authfile, - # so that "Insecure" is not needed. - (umask 077; - cat > "${kube_proxy_kubeconfig_file}" << EOF -apiVersion: v1 -kind: Config -clusters: -- cluster: - insecure-skip-tls-verify: true - name: local -contexts: -- context: - cluster: local - user: kube-proxy - name: service-account-context -current-context: service-account-context -users: -- name: kube-proxy - user: - token: ${KUBE_PROXY_TOKEN} -EOF -) + create-salt-kubelet-auth + create-salt-kubeproxy-auth # Generate tokens for other "service accounts". Append to known_tokens. # # NB: If this list ever changes, this script actually has to @@ -250,94 +103,19 @@ if [ ! -e "${BASIC_AUTH_FILE}" ]; then echo "${MASTER_USER},${MASTER_PASSWD},admin" > "${BASIC_AUTH_FILE}") fi -echo "Running release install script" -rm -rf /kube-install -mkdir -p /kube-install -pushd /kube-install - tar xzf "$salt_tar" - cp "$server_binary_tar" . - ./kubernetes/saltbase/install.sh "${server_binary_tar##*/}" -popd - # Enable Fedora Cockpit on host to support Kubernetes administration # Access it by going to :9090 and login as vagrant/vagrant if ! which /usr/libexec/cockpit-ws &>/dev/null; then - + pushd /etc/yum.repos.d wget https://copr.fedoraproject.org/coprs/sgallagh/cockpit-preview/repo/fedora-21/sgallagh-cockpit-preview-fedora-21.repo - yum install -y cockpit cockpit-kubernetes + yum install -y cockpit cockpit-kubernetes popd systemctl enable cockpit.socket systemctl start cockpit.socket fi -# we will run provision to update code each time we test, so we do not want to do salt installs each time -if ! which salt-master &>/dev/null; then +install-salt - # Configure the salt-api - cat </etc/salt/master.d/salt-api.conf -# Set vagrant user as REST API user -external_auth: - pam: - vagrant: - - .* -rest_cherrypy: - port: 8000 - host: ${MASTER_IP} - disable_ssl: True - webhook_disable_auth: True -EOF - - # Install Salt Master - # - # -M installs the master - # -N does not install the minion - curl -sS -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s -- -M -N - - # Install salt-api - # - # This is used to provide the network transport for salt-api - yum install -y python-cherrypy - # This is used to inform the cloud provider used in the vagrant cluster - yum install -y salt-api - # Set log level to a level higher than "info" to prevent the message about - # enabling the service (which is not an error) from being printed to stderr. - SYSTEMD_LOG_LEVEL=notice systemctl enable salt-api - systemctl start salt-api -fi - -if ! which salt-minion >/dev/null 2>&1; then - - # Install Salt minion - curl -sS -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s - - # Edit the Salt minion unit file to do restart always - # needed because vagrant uses this as basis for registration of nodes in cloud provider - # set a oom_score_adj to -999 to prevent our node from being killed with salt-master and then making kubelet NotReady - # because its not found in salt cloud provider call - cat </usr/lib/systemd/system/salt-minion.service -[Unit] -Description=The Salt Minion -After=syslog.target network.target - -[Service] -Type=simple -ExecStart=/usr/bin/salt-minion -Restart=Always -OOMScoreAdjust=-999 - -[Install] -WantedBy=multi-user.target -EOF - - systemctl daemon-reload - systemctl restart salt-minion.service - -else - # Only run highstate when updating the config. In the first-run case, Salt is - # set up to run highstate as new minions join for the first time. - echo "Executing configuration" - salt '*' mine.update - salt --force-color '*' state.highstate -fi +run-salt diff --git a/cluster/vagrant/provision-minion.sh b/cluster/vagrant/provision-minion.sh index f5d2927c8cc..916c1d31858 100755 --- a/cluster/vagrant/provision-minion.sh +++ b/cluster/vagrant/provision-minion.sh @@ -14,59 +14,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -# exit on any error -set -e - -#setup kubelet config -mkdir -p "/var/lib/kubelet" -(umask 077; -cat > "/var/lib/kubelet/kubeconfig" << EOF -apiVersion: v1 -kind: Config -users: -- name: kubelet -user: - token: ${KUBELET_TOKEN} -clusters: -- name: local -cluster: - insecure-skip-tls-verify: true -contexts: -- context: - cluster: local - user: kubelet -name: service-account-context -current-context: service-account-context -EOF -) - -#setup proxy config -mkdir -p "/var/lib/kube-proxy/" -# Make a kubeconfig file with the token. -# TODO(etune): put apiserver certs into secret too, and reference from authfile, -# so that "Insecure" is not needed. -(umask 077; -cat > "/var/lib/kube-proxy/kubeconfig" << EOF -apiVersion: v1 -kind: Config -users: -- name: kube-proxy -user: - token: ${KUBE_PROXY_TOKEN} -clusters: -- name: local -cluster: - insecure-skip-tls-verify: true -contexts: -- context: - cluster: local - user: kube-proxy -name: service-account-context -current-context: service-account-context -EOF -) - - +set -o errexit +set -o nounset +set -o pipefail # Set the host name explicitly # See: https://github.com/mitchellh/vagrant/issues/2430 @@ -109,85 +59,11 @@ done # Configure network provision-network-minion -# Placeholder for any other manifests that may be per-node. -mkdir -p /etc/kubernetes/manifests +write-salt-config kubernetes-pool -# Let the minion know who its master is -# Recover the salt-minion if the salt-master network changes -## auth_timeout - how long we want to wait for a time out -## auth_tries - how many times we will retry before restarting salt-minion -## auth_safemode - if our cert is rejected, we will restart salt minion -## ping_interval - restart the minion if we cannot ping the master after 1 minute -## random_reauth_delay - wait 0-3 seconds when reauthenticating -## recon_default - how long to wait before reconnecting -## recon_max - how long you will wait upper bound -## state_aggregrate - try to do a single yum command to install all referenced packages where possible at once, should improve startup times -## -mkdir -p /etc/salt/minion.d -cat </etc/salt/minion.d/master.conf -master: '$(echo "$MASTER_NAME" | sed -e "s/'/''/g")' -auth_timeout: 10 -auth_tries: 2 -auth_safemode: True -ping_interval: 1 -random_reauth_delay: 3 -state_aggregrate: - - pkg -EOF +create-salt-kubelet-auth +create-salt-kubeproxy-auth -cat </etc/salt/minion.d/log-level-debug.conf -log_level: debug -log_level_logfile: debug -EOF +install-salt -# Our minions will have a pool role to distinguish them from the master. -cat </etc/salt/minion.d/grains.conf -grains: - cloud: vagrant - network_mode: openvswitch - node_ip: '$(echo "$NODE_IP" | sed -e "s/'/''/g")' - api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' - networkInterfaceName: '$(echo "$NETWORK_IF_NAME" | sed -e "s/'/''/g")' - roles: - - kubernetes-pool - cbr-cidr: '$(echo "$CONTAINER_SUBNET" | sed -e "s/'/''/g")' - hostname_override: '$(echo "$NODE_IP" | sed -e "s/'/''/g")' - docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")' -EOF - -# QoS support requires that swap memory is disabled on each of the minions -echo "Disable swap memory to ensure proper QoS" -swapoff -a - -# we will run provision to update code each time we test, so we do not want to do salt install each time -if ! which salt-minion >/dev/null 2>&1; then - # Install Salt - curl -sS -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s - - # Edit the Salt minion unit file to do restart always - # needed because vagrant uses this as basis for registration of nodes in cloud provider - # set a oom_score_adj to -999 to prevent our node from being killed with salt-master and then making kubelet NotReady - # because its not found in salt cloud provider call - cat </usr/lib/systemd/system/salt-minion.service -[Unit] -Description=The Salt Minion -After=syslog.target network.target - -[Service] -Type=simple -ExecStart=/usr/bin/salt-minion -Restart=Always -OOMScoreAdjust=-999 - -[Install] -WantedBy=multi-user.target -EOF - - systemctl daemon-reload - systemctl restart salt-minion.service - -else - # Sometimes the minion gets wedged when it comes up along with the master. - # Restarting it here un-wedges it. - systemctl restart salt-minion.service -fi +run-salt diff --git a/cluster/vagrant/provision-utils.sh b/cluster/vagrant/provision-utils.sh new file mode 100755 index 00000000000..a3dc22be6a7 --- /dev/null +++ b/cluster/vagrant/provision-utils.sh @@ -0,0 +1,153 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +function write-salt-config() { + local role="$1" + + # Update salt configuration + mkdir -p /etc/salt/minion.d + + mkdir -p /srv/salt-overlay/pillar + cat </srv/salt-overlay/pillar/cluster-params.sls +service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' +cert_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' +enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")' +enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")' +enable_cluster_ui: '$(echo "$ENABLE_CLUSTER_UI" | sed -e "s/'/''/g")' +enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")' +logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")' +elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")' +enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")' +dns_replicas: '$(echo "$DNS_REPLICAS" | sed -e "s/'/''/g")' +dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")' +dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")' +instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")' +admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")' +enable_cpu_cfs_quota: '$(echo "$ENABLE_CPU_CFS_QUOTA" | sed -e "s/'/''/g")' +network_provider: '$(echo "$NETWORK_PROVIDER" | sed -e "s/'/''/g")' +opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG" | sed -e "s/'/''/g")' +opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG" | sed -e "s/'/''/g")' +opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET" | sed -e "s/'/''/g")' +e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")' +EOF + + cat </etc/salt/minion.d/log-level-debug.conf +log_level: info +log_level_logfile: debug +EOF + + cat </etc/salt/minion.d/grains.conf +grains: + node_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' + publicAddressOverride: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' + network_mode: openvswitch + networkInterfaceName: '$(echo "$NETWORK_IF_NAME" | sed -e "s/'/''/g")' + api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' + cloud: vagrant + roles: + - $role + runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")' + docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")' + master_extra_sans: '$(echo "$MASTER_EXTRA_SANS" | sed -e "s/'/''/g")' + keep_host_etcd: true +EOF +} + +function install-salt() { + server_binary_tar="/vagrant/server/kubernetes-server-linux-amd64.tar.gz" + if [[ ! -f "$server_binary_tar" ]]; then + server_binary_tar="/vagrant/_output/release-tars/kubernetes-server-linux-amd64.tar.gz" + fi + if [[ ! -f "$server_binary_tar" ]]; then + release_not_found + fi + + salt_tar="/vagrant/server/kubernetes-salt.tar.gz" + if [[ ! -f "$salt_tar" ]]; then + salt_tar="/vagrant/_output/release-tars/kubernetes-salt.tar.gz" + fi + if [[ ! -f "$salt_tar" ]]; then + release_not_found + fi + + echo "Running release install script" + rm -rf /kube-install + mkdir -p /kube-install + pushd /kube-install + tar xzf "$salt_tar" + cp "$server_binary_tar" . + ./kubernetes/saltbase/install.sh "${server_binary_tar##*/}" + popd + + if ! which salt-call >/dev/null 2>&1; then + # Install salt binaries + curl -sS -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s + fi +} + +function run-salt() { + salt-call --local state.highstate +} + +function create-salt-kubelet-auth() { + local -r kubelet_kubeconfig_folder="/srv/salt-overlay/salt/kubelet" + mkdir -p "${kubelet_kubeconfig_folder}" + (umask 077; + cat > "${kubelet_kubeconfig_folder}/kubeconfig" << EOF +apiVersion: v1 +kind: Config +clusters: +- cluster: + insecure-skip-tls-verify: true + name: local +contexts: +- context: + cluster: local + user: kubelet + name: service-account-context +current-context: service-account-context +users: +- name: kubelet + user: + token: ${KUBELET_TOKEN} +EOF + ) +} + +function create-salt-kubeproxy-auth() { + kube_proxy_kubeconfig_folder="/srv/salt-overlay/salt/kube-proxy" + mkdir -p "${kube_proxy_kubeconfig_folder}" + (umask 077; + cat > "${kube_proxy_kubeconfig_folder}/kubeconfig" << EOF +apiVersion: v1 +kind: Config +clusters: +- cluster: + insecure-skip-tls-verify: true + name: local +contexts: +- context: + cluster: local + user: kube-proxy + name: service-account-context +current-context: service-account-context +users: +- name: kube-proxy + user: + token: ${KUBE_PROXY_TOKEN} +EOF + ) +} diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh old mode 100644 new mode 100755 index e41cbb1b2fd..7aeeefc9106 --- a/cluster/vagrant/util.sh +++ b/cluster/vagrant/util.sh @@ -120,45 +120,11 @@ function create-provision-scripts { ( echo "#! /bin/bash" - echo "KUBE_ROOT=/vagrant" - echo "INSTANCE_PREFIX='${INSTANCE_PREFIX}'" - echo "MASTER_NAME='${INSTANCE_PREFIX}-master'" - echo "MASTER_IP='${MASTER_IP}'" - echo "NODE_NAMES=(${NODE_NAMES[@]})" - echo "NODE_IPS=(${NODE_IPS[@]})" + echo-kube-env echo "NODE_IP='${MASTER_IP}'" - echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'" - echo "CONTAINER_NETMASK='${MASTER_CONTAINER_NETMASK}'" - echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'" echo "CONTAINER_ADDR='${MASTER_CONTAINER_ADDR}'" - echo "NODE_CONTAINER_NETMASKS='${NODE_CONTAINER_NETMASKS[@]}'" - echo "NODE_CONTAINER_SUBNETS=(${NODE_CONTAINER_SUBNETS[@]})" - echo "SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'" - echo "MASTER_USER='${MASTER_USER}'" - echo "MASTER_PASSWD='${MASTER_PASSWD}'" - echo "KUBE_USER='${KUBE_USER}'" - echo "KUBE_PASSWORD='${KUBE_PASSWORD}'" - echo "ENABLE_CLUSTER_MONITORING='${ENABLE_CLUSTER_MONITORING}'" - echo "ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'" - echo "ENABLE_CLUSTER_UI='${ENABLE_CLUSTER_UI}'" - echo "LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'" - echo "ENABLE_CLUSTER_DNS='${ENABLE_CLUSTER_DNS:-false}'" - echo "DNS_SERVER_IP='${DNS_SERVER_IP:-}'" - echo "DNS_DOMAIN='${DNS_DOMAIN:-}'" - echo "DNS_REPLICAS='${DNS_REPLICAS:-}'" - echo "RUNTIME_CONFIG='${RUNTIME_CONFIG:-}'" - echo "ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'" - echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS:-}'" - echo "VAGRANT_DEFAULT_PROVIDER='${VAGRANT_DEFAULT_PROVIDER:-}'" - echo "KUBELET_TOKEN='${KUBELET_TOKEN:-}'" - echo "KUBE_PROXY_TOKEN='${KUBE_PROXY_TOKEN:-}'" - echo "MASTER_EXTRA_SANS='${MASTER_EXTRA_SANS:-}'" - echo "ENABLE_CPU_CFS_QUOTA='${ENABLE_CPU_CFS_QUOTA}'" - echo "NETWORK_PROVIDER='${NETWORK_PROVIDER:-}'" - echo "OPENCONTRAIL_TAG='${OPENCONTRAIL_TAG:-}'" - echo "OPENCONTRAIL_KUBERNETES_TAG='${OPENCONTRAIL_KUBERNETES_TAG:-}'" - echo "OPENCONTRAIL_PUBLIC_SUBNET='${OPENCONTRAIL_PUBLIC_SUBNET:-}'" - echo "E2E_STORAGE_TEST_ENVIRONMENT='${E2E_STORAGE_TEST_ENVIRONMENT:-}'" + echo "CONTAINER_NETMASK='${MASTER_CONTAINER_NETMASK}'" + awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-utils.sh" awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network-master.sh" awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-master.sh" ) > "${KUBE_TEMP}/master-start.sh" @@ -166,31 +132,60 @@ function create-provision-scripts { for (( i=0; i<${#NODE_NAMES[@]}; i++)); do ( echo "#! /bin/bash" - echo "MASTER_NAME='${MASTER_NAME}'" - echo "MASTER_IP='${MASTER_IP}'" - echo "NODE_NAMES=(${NODE_NAMES[@]})" + echo-kube-env echo "NODE_NAME=(${NODE_NAMES[$i]})" - echo "NODE_IPS=(${NODE_IPS[@]})" echo "NODE_IP='${NODE_IPS[$i]}'" echo "NODE_ID='$i'" - echo "NODE_IP='${NODE_IPS[$i]}'" - echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'" echo "CONTAINER_ADDR='${NODE_CONTAINER_ADDRS[$i]}'" echo "CONTAINER_NETMASK='${NODE_CONTAINER_NETMASKS[$i]}'" - echo "NODE_CONTAINER_SUBNETS=(${NODE_CONTAINER_SUBNETS[@]})" - echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'" - echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS:-}'" - echo "VAGRANT_DEFAULT_PROVIDER='${VAGRANT_DEFAULT_PROVIDER:-}'" - echo "KUBELET_TOKEN='${KUBELET_TOKEN:-}'" - echo "KUBE_PROXY_TOKEN='${KUBE_PROXY_TOKEN:-}'" - echo "MASTER_EXTRA_SANS='${MASTER_EXTRA_SANS:-}'" - echo "E2E_STORAGE_TEST_ENVIRONMENT='${E2E_STORAGE_TEST_ENVIRONMENT:-}'" + awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-utils.sh" awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network-minion.sh" awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-minion.sh" ) > "${KUBE_TEMP}/minion-start-${i}.sh" done } +function echo-kube-env() { + echo "KUBE_ROOT=/vagrant" + echo "INSTANCE_PREFIX='${INSTANCE_PREFIX}'" + echo "MASTER_NAME='${INSTANCE_PREFIX}-master'" + echo "MASTER_IP='${MASTER_IP}'" + echo "NODE_NAMES=(${NODE_NAMES[@]})" + echo "NODE_IPS=(${NODE_IPS[@]})" + echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'" + echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'" + echo "NODE_CONTAINER_NETMASKS='${NODE_CONTAINER_NETMASKS[@]}'" + echo "NODE_CONTAINER_SUBNETS=(${NODE_CONTAINER_SUBNETS[@]})" + echo "SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'" + echo "MASTER_USER='${MASTER_USER}'" + echo "MASTER_PASSWD='${MASTER_PASSWD}'" + echo "KUBE_USER='${KUBE_USER}'" + echo "KUBE_PASSWORD='${KUBE_PASSWORD}'" + echo "ENABLE_CLUSTER_MONITORING='${ENABLE_CLUSTER_MONITORING}'" + echo "ENABLE_CLUSTER_LOGGING='${ENABLE_CLUSTER_LOGGING:-false}'" + echo "ELASTICSEARCH_LOGGING_REPLICAS='${ELASTICSEARCH_LOGGING_REPLICAS:-1}'" + echo "ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'" + echo "ENABLE_CLUSTER_UI='${ENABLE_CLUSTER_UI}'" + echo "LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'" + echo "ENABLE_CLUSTER_DNS='${ENABLE_CLUSTER_DNS:-false}'" + echo "DNS_SERVER_IP='${DNS_SERVER_IP:-}'" + echo "DNS_DOMAIN='${DNS_DOMAIN:-}'" + echo "DNS_REPLICAS='${DNS_REPLICAS:-}'" + echo "RUNTIME_CONFIG='${RUNTIME_CONFIG:-}'" + echo "ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'" + echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS:-}'" + echo "VAGRANT_DEFAULT_PROVIDER='${VAGRANT_DEFAULT_PROVIDER:-}'" + echo "KUBELET_TOKEN='${KUBELET_TOKEN:-}'" + echo "KUBE_PROXY_TOKEN='${KUBE_PROXY_TOKEN:-}'" + echo "MASTER_EXTRA_SANS='${MASTER_EXTRA_SANS:-}'" + echo "ENABLE_CPU_CFS_QUOTA='${ENABLE_CPU_CFS_QUOTA}'" + echo "NETWORK_PROVIDER='${NETWORK_PROVIDER:-}'" + echo "OPENCONTRAIL_TAG='${OPENCONTRAIL_TAG:-}'" + echo "OPENCONTRAIL_KUBERNETES_TAG='${OPENCONTRAIL_KUBERNETES_TAG:-}'" + echo "OPENCONTRAIL_PUBLIC_SUBNET='${OPENCONTRAIL_PUBLIC_SUBNET:-}'" + echo "E2E_STORAGE_TEST_ENVIRONMENT='${E2E_STORAGE_TEST_ENVIRONMENT:-}'" +} + function verify-cluster { # TODO: How does the user know the difference between "tak[ing] some # time" and "loop[ing] forever"? Can we give more specific feedback on @@ -203,16 +198,12 @@ function verify-cluster { # verify master has all required daemons echo "Validating master" local machine="master" - local -a required_daemon=("salt-master" "salt-minion" "kubelet") + local -a required_processes=("kube-apiserver" "kube-scheduler" "kube-controller-manager" "kubelet" "docker") local validated="1" - # This is a hack, but sometimes the salt-minion gets stuck on the master, so we just restart it - # to ensure that users never wait forever - vagrant ssh "$machine" -c "sudo systemctl restart salt-minion" until [[ "$validated" == "0" ]]; do validated="0" - local daemon - for daemon in "${required_daemon[@]}"; do - vagrant ssh "$machine" -c "which '${daemon}'" >/dev/null 2>&1 || { + for process in "${required_processes[@]}"; do + vagrant ssh "${machine}" -c "pgrep -f ${process}" >/dev/null 2>&1 || { printf "." validated="1" sleep 2 @@ -225,13 +216,12 @@ function verify-cluster { for (( i=0; i<${#NODE_NAMES[@]}; i++)); do echo "Validating ${VAGRANT_NODE_NAMES[$i]}" local machine=${VAGRANT_NODE_NAMES[$i]} - local -a required_daemon=("salt-minion" "kubelet" "docker") + local -a required_processes=("kube-proxy" "kubelet" "docker") local validated="1" - until [[ "$validated" == "0" ]]; do + until [[ "${validated}" == "0" ]]; do validated="0" - local daemon - for daemon in "${required_daemon[@]}"; do - vagrant ssh "$machine" -c "which $daemon" >/dev/null 2>&1 || { + for process in "${required_processes[@]}"; do + vagrant ssh "${machine}" -c "pgrep -f ${process}" >/dev/null 2>&1 || { printf "." validated="1" sleep 2 @@ -242,16 +232,14 @@ function verify-cluster { echo echo "Waiting for each minion to be registered with cloud provider" - for (( i=0; i<${#NODE_IPS[@]}; i++)); do - local machine="${NODE_IPS[$i]}" - local count="0" - until [[ "$count" == "1" ]]; do - local minions - minions=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o go-template='{{range.items}}{{.metadata.name}}:{{end}}' --api-version=v1) - count=$(echo $minions | grep -c "${NODE_IPS[i]}") || { + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do + local validated="0" + until [[ "$validated" == "1" ]]; do + local minions=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o name --api-version=v1) + validated=$(echo $minions | grep -c "${NODE_NAMES[i]}") || { printf "." sleep 2 - count="0" + validated="0" } done done