mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 17:30:00 +00:00
move vagrant to masterless salt
This commit is contained in:
parent
11574ee990
commit
e2c5c898fb
@ -9,7 +9,9 @@
|
|||||||
{% set cloud_config_volume = "" -%}
|
{% set cloud_config_volume = "" -%}
|
||||||
|
|
||||||
{% if grains.cloud is defined -%}
|
{% if grains.cloud is defined -%}
|
||||||
|
{% if grains.cloud != 'vagrant' -%}
|
||||||
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
|
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
{% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%}
|
{% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%}
|
||||||
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
|
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
|
||||||
|
@ -27,7 +27,9 @@
|
|||||||
{% set cloud_config_volume = "" -%}
|
{% set cloud_config_volume = "" -%}
|
||||||
|
|
||||||
{% if grains.cloud is defined -%}
|
{% if grains.cloud is defined -%}
|
||||||
|
{% if grains.cloud != 'vagrant' -%}
|
||||||
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
|
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
|
||||||
|
{% endif -%}
|
||||||
{% set service_account_key = " --service-account-private-key-file=/srv/kubernetes/server.key " -%}
|
{% set service_account_key = " --service-account-private-key-file=/srv/kubernetes/server.key " -%}
|
||||||
|
|
||||||
{% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%}
|
{% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
/etc/kubernetes/kube-node-unpacker.sh:
|
/etc/kubernetes/kube-node-unpacker.sh:
|
||||||
file.managed:
|
file.managed:
|
||||||
- source: salt://kube-node-unpacker/kube-node-unpacker.sh
|
- source: salt://kube-node-unpacker/kube-node-unpacker.sh
|
||||||
|
- makedirs: True
|
||||||
- user: root
|
- user: root
|
||||||
- group: root
|
- group: root
|
||||||
- mode: 755
|
- mode: 755
|
||||||
|
@ -46,7 +46,7 @@
|
|||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
|
||||||
{% set cloud_provider = "" -%}
|
{% set cloud_provider = "" -%}
|
||||||
{% if grains.cloud is defined -%}
|
{% if grains.cloud is defined and grains.cloud != 'vagrant' -%}
|
||||||
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
|
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
|
||||||
|
@ -14,8 +14,9 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
# exit on any error
|
set -o errexit
|
||||||
set -e
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
# Set the host name explicitly
|
# Set the host name explicitly
|
||||||
# See: https://github.com/mitchellh/vagrant/issues/2430
|
# See: https://github.com/mitchellh/vagrant/issues/2430
|
||||||
@ -47,26 +48,6 @@ function release_not_found() {
|
|||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
# Look for our precompiled binary releases. When running from a source repo,
|
|
||||||
# these are generated under _output. When running from an release tarball these
|
|
||||||
# are under ./server.
|
|
||||||
server_binary_tar="/vagrant/server/kubernetes-server-linux-amd64.tar.gz"
|
|
||||||
if [[ ! -f "$server_binary_tar" ]]; then
|
|
||||||
server_binary_tar="/vagrant/_output/release-tars/kubernetes-server-linux-amd64.tar.gz"
|
|
||||||
fi
|
|
||||||
if [[ ! -f "$server_binary_tar" ]]; then
|
|
||||||
release_not_found
|
|
||||||
fi
|
|
||||||
|
|
||||||
salt_tar="/vagrant/server/kubernetes-salt.tar.gz"
|
|
||||||
if [[ ! -f "$salt_tar" ]]; then
|
|
||||||
salt_tar="/vagrant/_output/release-tars/kubernetes-salt.tar.gz"
|
|
||||||
fi
|
|
||||||
if [[ ! -f "$salt_tar" ]]; then
|
|
||||||
release_not_found
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
# Setup hosts file to support ping by hostname to each minion in the cluster from apiserver
|
# Setup hosts file to support ping by hostname to each minion in the cluster from apiserver
|
||||||
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
|
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
|
||||||
minion=${NODE_NAMES[$i]}
|
minion=${NODE_NAMES[$i]}
|
||||||
@ -82,87 +63,7 @@ echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts
|
|||||||
# Configure the master network
|
# Configure the master network
|
||||||
provision-network-master
|
provision-network-master
|
||||||
|
|
||||||
# Update salt configuration
|
write-salt-config kubernetes-master
|
||||||
mkdir -p /etc/salt/minion.d
|
|
||||||
cat <<EOF >/etc/salt/minion.d/master.conf
|
|
||||||
master: '$(echo "$MASTER_NAME" | sed -e "s/'/''/g")'
|
|
||||||
auth_timeout: 10
|
|
||||||
auth_tries: 2
|
|
||||||
auth_safemode: True
|
|
||||||
ping_interval: 1
|
|
||||||
random_reauth_delay: 3
|
|
||||||
state_aggregrate:
|
|
||||||
- pkg
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF >/etc/salt/minion.d/grains.conf
|
|
||||||
grains:
|
|
||||||
node_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
|
||||||
publicAddressOverride: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
|
||||||
network_mode: openvswitch
|
|
||||||
networkInterfaceName: '$(echo "$NETWORK_IF_NAME" | sed -e "s/'/''/g")'
|
|
||||||
api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
|
||||||
cloud: vagrant
|
|
||||||
roles:
|
|
||||||
- kubernetes-master
|
|
||||||
runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")'
|
|
||||||
docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")'
|
|
||||||
master_extra_sans: '$(echo "$MASTER_EXTRA_SANS" | sed -e "s/'/''/g")'
|
|
||||||
keep_host_etcd: true
|
|
||||||
EOF
|
|
||||||
|
|
||||||
mkdir -p /srv/salt-overlay/pillar
|
|
||||||
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
|
|
||||||
service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
|
|
||||||
cert_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
|
||||||
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
|
|
||||||
enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'
|
|
||||||
enable_cluster_ui: '$(echo "$ENABLE_CLUSTER_UI" | sed -e "s/'/''/g")'
|
|
||||||
enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
|
|
||||||
logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
|
|
||||||
elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
|
|
||||||
enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
|
|
||||||
dns_replicas: '$(echo "$DNS_REPLICAS" | sed -e "s/'/''/g")'
|
|
||||||
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
|
|
||||||
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
|
|
||||||
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
|
|
||||||
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
|
|
||||||
enable_cpu_cfs_quota: '$(echo "$ENABLE_CPU_CFS_QUOTA" | sed -e "s/'/''/g")'
|
|
||||||
network_provider: '$(echo "$NETWORK_PROVIDER" | sed -e "s/'/''/g")'
|
|
||||||
opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG" | sed -e "s/'/''/g")'
|
|
||||||
opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG" | sed -e "s/'/''/g")'
|
|
||||||
opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET" | sed -e "s/'/''/g")'
|
|
||||||
e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")'
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Configure the salt-master
|
|
||||||
# Auto accept all keys from minions that try to join
|
|
||||||
mkdir -p /etc/salt/master.d
|
|
||||||
cat <<EOF >/etc/salt/master.d/auto-accept.conf
|
|
||||||
open_mode: True
|
|
||||||
auto_accept: True
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF >/etc/salt/master.d/reactor.conf
|
|
||||||
# React to new minions starting by running highstate on them.
|
|
||||||
reactor:
|
|
||||||
- 'salt/minion/*/start':
|
|
||||||
- /srv/reactor/highstate-new.sls
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF >/etc/salt/master.d/salt-output.conf
|
|
||||||
# Minimize the amount of output to terminal
|
|
||||||
state_verbose: False
|
|
||||||
state_output: mixed
|
|
||||||
log_level: debug
|
|
||||||
log_level_logfile: debug
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
|
|
||||||
log_level: debug
|
|
||||||
log_level_logfile: debug
|
|
||||||
EOF
|
|
||||||
|
|
||||||
|
|
||||||
# Generate and distribute a shared secret (bearer token) to
|
# Generate and distribute a shared secret (bearer token) to
|
||||||
# apiserver and kubelet so that kubelet can authenticate to
|
# apiserver and kubelet so that kubelet can authenticate to
|
||||||
@ -179,57 +80,9 @@ if [[ ! -f "${known_tokens_file}" ]]; then
|
|||||||
mkdir -p /srv/salt-overlay/salt/kubelet
|
mkdir -p /srv/salt-overlay/salt/kubelet
|
||||||
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
|
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
|
||||||
(umask u=rw,go= ; echo "{\"BearerToken\": \"$KUBELET_TOKEN\", \"Insecure\": true }" > $kubelet_auth_file)
|
(umask u=rw,go= ; echo "{\"BearerToken\": \"$KUBELET_TOKEN\", \"Insecure\": true }" > $kubelet_auth_file)
|
||||||
kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig"
|
|
||||||
|
|
||||||
mkdir -p /srv/salt-overlay/salt/kubelet
|
|
||||||
(umask 077;
|
|
||||||
cat > "${kubelet_kubeconfig_file}" << EOF
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Config
|
|
||||||
clusters:
|
|
||||||
- cluster:
|
|
||||||
insecure-skip-tls-verify: true
|
|
||||||
name: local
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: local
|
|
||||||
user: kubelet
|
|
||||||
name: service-account-context
|
|
||||||
current-context: service-account-context
|
|
||||||
users:
|
|
||||||
- name: kubelet
|
|
||||||
user:
|
|
||||||
token: ${KUBELET_TOKEN}
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
mkdir -p /srv/salt-overlay/salt/kube-proxy
|
|
||||||
kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig"
|
|
||||||
# Make a kubeconfig file with the token.
|
|
||||||
# TODO(etune): put apiserver certs into secret too, and reference from authfile,
|
|
||||||
# so that "Insecure" is not needed.
|
|
||||||
(umask 077;
|
|
||||||
cat > "${kube_proxy_kubeconfig_file}" << EOF
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Config
|
|
||||||
clusters:
|
|
||||||
- cluster:
|
|
||||||
insecure-skip-tls-verify: true
|
|
||||||
name: local
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: local
|
|
||||||
user: kube-proxy
|
|
||||||
name: service-account-context
|
|
||||||
current-context: service-account-context
|
|
||||||
users:
|
|
||||||
- name: kube-proxy
|
|
||||||
user:
|
|
||||||
token: ${KUBE_PROXY_TOKEN}
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
|
|
||||||
|
create-salt-kubelet-auth
|
||||||
|
create-salt-kubeproxy-auth
|
||||||
# Generate tokens for other "service accounts". Append to known_tokens.
|
# Generate tokens for other "service accounts". Append to known_tokens.
|
||||||
#
|
#
|
||||||
# NB: If this list ever changes, this script actually has to
|
# NB: If this list ever changes, this script actually has to
|
||||||
@ -250,15 +103,6 @@ if [ ! -e "${BASIC_AUTH_FILE}" ]; then
|
|||||||
echo "${MASTER_USER},${MASTER_PASSWD},admin" > "${BASIC_AUTH_FILE}")
|
echo "${MASTER_USER},${MASTER_PASSWD},admin" > "${BASIC_AUTH_FILE}")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Running release install script"
|
|
||||||
rm -rf /kube-install
|
|
||||||
mkdir -p /kube-install
|
|
||||||
pushd /kube-install
|
|
||||||
tar xzf "$salt_tar"
|
|
||||||
cp "$server_binary_tar" .
|
|
||||||
./kubernetes/saltbase/install.sh "${server_binary_tar##*/}"
|
|
||||||
popd
|
|
||||||
|
|
||||||
# Enable Fedora Cockpit on host to support Kubernetes administration
|
# Enable Fedora Cockpit on host to support Kubernetes administration
|
||||||
# Access it by going to <master-ip>:9090 and login as vagrant/vagrant
|
# Access it by going to <master-ip>:9090 and login as vagrant/vagrant
|
||||||
if ! which /usr/libexec/cockpit-ws &>/dev/null; then
|
if ! which /usr/libexec/cockpit-ws &>/dev/null; then
|
||||||
@ -272,72 +116,6 @@ if ! which /usr/libexec/cockpit-ws &>/dev/null; then
|
|||||||
systemctl start cockpit.socket
|
systemctl start cockpit.socket
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# we will run provision to update code each time we test, so we do not want to do salt installs each time
|
install-salt
|
||||||
if ! which salt-master &>/dev/null; then
|
|
||||||
|
|
||||||
# Configure the salt-api
|
run-salt
|
||||||
cat <<EOF >/etc/salt/master.d/salt-api.conf
|
|
||||||
# Set vagrant user as REST API user
|
|
||||||
external_auth:
|
|
||||||
pam:
|
|
||||||
vagrant:
|
|
||||||
- .*
|
|
||||||
rest_cherrypy:
|
|
||||||
port: 8000
|
|
||||||
host: ${MASTER_IP}
|
|
||||||
disable_ssl: True
|
|
||||||
webhook_disable_auth: True
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Install Salt Master
|
|
||||||
#
|
|
||||||
# -M installs the master
|
|
||||||
# -N does not install the minion
|
|
||||||
curl -sS -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s -- -M -N
|
|
||||||
|
|
||||||
# Install salt-api
|
|
||||||
#
|
|
||||||
# This is used to provide the network transport for salt-api
|
|
||||||
yum install -y python-cherrypy
|
|
||||||
# This is used to inform the cloud provider used in the vagrant cluster
|
|
||||||
yum install -y salt-api
|
|
||||||
# Set log level to a level higher than "info" to prevent the message about
|
|
||||||
# enabling the service (which is not an error) from being printed to stderr.
|
|
||||||
SYSTEMD_LOG_LEVEL=notice systemctl enable salt-api
|
|
||||||
systemctl start salt-api
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! which salt-minion >/dev/null 2>&1; then
|
|
||||||
|
|
||||||
# Install Salt minion
|
|
||||||
curl -sS -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s
|
|
||||||
|
|
||||||
# Edit the Salt minion unit file to do restart always
|
|
||||||
# needed because vagrant uses this as basis for registration of nodes in cloud provider
|
|
||||||
# set a oom_score_adj to -999 to prevent our node from being killed with salt-master and then making kubelet NotReady
|
|
||||||
# because its not found in salt cloud provider call
|
|
||||||
cat <<EOF >/usr/lib/systemd/system/salt-minion.service
|
|
||||||
[Unit]
|
|
||||||
Description=The Salt Minion
|
|
||||||
After=syslog.target network.target
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=simple
|
|
||||||
ExecStart=/usr/bin/salt-minion
|
|
||||||
Restart=Always
|
|
||||||
OOMScoreAdjust=-999
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
EOF
|
|
||||||
|
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl restart salt-minion.service
|
|
||||||
|
|
||||||
else
|
|
||||||
# Only run highstate when updating the config. In the first-run case, Salt is
|
|
||||||
# set up to run highstate as new minions join for the first time.
|
|
||||||
echo "Executing configuration"
|
|
||||||
salt '*' mine.update
|
|
||||||
salt --force-color '*' state.highstate
|
|
||||||
fi
|
|
||||||
|
@ -14,59 +14,9 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
# exit on any error
|
set -o errexit
|
||||||
set -e
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
#setup kubelet config
|
|
||||||
mkdir -p "/var/lib/kubelet"
|
|
||||||
(umask 077;
|
|
||||||
cat > "/var/lib/kubelet/kubeconfig" << EOF
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Config
|
|
||||||
users:
|
|
||||||
- name: kubelet
|
|
||||||
user:
|
|
||||||
token: ${KUBELET_TOKEN}
|
|
||||||
clusters:
|
|
||||||
- name: local
|
|
||||||
cluster:
|
|
||||||
insecure-skip-tls-verify: true
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: local
|
|
||||||
user: kubelet
|
|
||||||
name: service-account-context
|
|
||||||
current-context: service-account-context
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
|
|
||||||
#setup proxy config
|
|
||||||
mkdir -p "/var/lib/kube-proxy/"
|
|
||||||
# Make a kubeconfig file with the token.
|
|
||||||
# TODO(etune): put apiserver certs into secret too, and reference from authfile,
|
|
||||||
# so that "Insecure" is not needed.
|
|
||||||
(umask 077;
|
|
||||||
cat > "/var/lib/kube-proxy/kubeconfig" << EOF
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Config
|
|
||||||
users:
|
|
||||||
- name: kube-proxy
|
|
||||||
user:
|
|
||||||
token: ${KUBE_PROXY_TOKEN}
|
|
||||||
clusters:
|
|
||||||
- name: local
|
|
||||||
cluster:
|
|
||||||
insecure-skip-tls-verify: true
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: local
|
|
||||||
user: kube-proxy
|
|
||||||
name: service-account-context
|
|
||||||
current-context: service-account-context
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Set the host name explicitly
|
# Set the host name explicitly
|
||||||
# See: https://github.com/mitchellh/vagrant/issues/2430
|
# See: https://github.com/mitchellh/vagrant/issues/2430
|
||||||
@ -109,85 +59,11 @@ done
|
|||||||
# Configure network
|
# Configure network
|
||||||
provision-network-minion
|
provision-network-minion
|
||||||
|
|
||||||
# Placeholder for any other manifests that may be per-node.
|
write-salt-config kubernetes-pool
|
||||||
mkdir -p /etc/kubernetes/manifests
|
|
||||||
|
|
||||||
# Let the minion know who its master is
|
create-salt-kubelet-auth
|
||||||
# Recover the salt-minion if the salt-master network changes
|
create-salt-kubeproxy-auth
|
||||||
## auth_timeout - how long we want to wait for a time out
|
|
||||||
## auth_tries - how many times we will retry before restarting salt-minion
|
|
||||||
## auth_safemode - if our cert is rejected, we will restart salt minion
|
|
||||||
## ping_interval - restart the minion if we cannot ping the master after 1 minute
|
|
||||||
## random_reauth_delay - wait 0-3 seconds when reauthenticating
|
|
||||||
## recon_default - how long to wait before reconnecting
|
|
||||||
## recon_max - how long you will wait upper bound
|
|
||||||
## state_aggregrate - try to do a single yum command to install all referenced packages where possible at once, should improve startup times
|
|
||||||
##
|
|
||||||
mkdir -p /etc/salt/minion.d
|
|
||||||
cat <<EOF >/etc/salt/minion.d/master.conf
|
|
||||||
master: '$(echo "$MASTER_NAME" | sed -e "s/'/''/g")'
|
|
||||||
auth_timeout: 10
|
|
||||||
auth_tries: 2
|
|
||||||
auth_safemode: True
|
|
||||||
ping_interval: 1
|
|
||||||
random_reauth_delay: 3
|
|
||||||
state_aggregrate:
|
|
||||||
- pkg
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
|
install-salt
|
||||||
log_level: debug
|
|
||||||
log_level_logfile: debug
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Our minions will have a pool role to distinguish them from the master.
|
run-salt
|
||||||
cat <<EOF >/etc/salt/minion.d/grains.conf
|
|
||||||
grains:
|
|
||||||
cloud: vagrant
|
|
||||||
network_mode: openvswitch
|
|
||||||
node_ip: '$(echo "$NODE_IP" | sed -e "s/'/''/g")'
|
|
||||||
api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
|
||||||
networkInterfaceName: '$(echo "$NETWORK_IF_NAME" | sed -e "s/'/''/g")'
|
|
||||||
roles:
|
|
||||||
- kubernetes-pool
|
|
||||||
cbr-cidr: '$(echo "$CONTAINER_SUBNET" | sed -e "s/'/''/g")'
|
|
||||||
hostname_override: '$(echo "$NODE_IP" | sed -e "s/'/''/g")'
|
|
||||||
docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")'
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# QoS support requires that swap memory is disabled on each of the minions
|
|
||||||
echo "Disable swap memory to ensure proper QoS"
|
|
||||||
swapoff -a
|
|
||||||
|
|
||||||
# we will run provision to update code each time we test, so we do not want to do salt install each time
|
|
||||||
if ! which salt-minion >/dev/null 2>&1; then
|
|
||||||
# Install Salt
|
|
||||||
curl -sS -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s
|
|
||||||
|
|
||||||
# Edit the Salt minion unit file to do restart always
|
|
||||||
# needed because vagrant uses this as basis for registration of nodes in cloud provider
|
|
||||||
# set a oom_score_adj to -999 to prevent our node from being killed with salt-master and then making kubelet NotReady
|
|
||||||
# because its not found in salt cloud provider call
|
|
||||||
cat <<EOF >/usr/lib/systemd/system/salt-minion.service
|
|
||||||
[Unit]
|
|
||||||
Description=The Salt Minion
|
|
||||||
After=syslog.target network.target
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=simple
|
|
||||||
ExecStart=/usr/bin/salt-minion
|
|
||||||
Restart=Always
|
|
||||||
OOMScoreAdjust=-999
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
EOF
|
|
||||||
|
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl restart salt-minion.service
|
|
||||||
|
|
||||||
else
|
|
||||||
# Sometimes the minion gets wedged when it comes up along with the master.
|
|
||||||
# Restarting it here un-wedges it.
|
|
||||||
systemctl restart salt-minion.service
|
|
||||||
fi
|
|
||||||
|
153
cluster/vagrant/provision-utils.sh
Executable file
153
cluster/vagrant/provision-utils.sh
Executable file
@ -0,0 +1,153 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
function write-salt-config() {
|
||||||
|
local role="$1"
|
||||||
|
|
||||||
|
# Update salt configuration
|
||||||
|
mkdir -p /etc/salt/minion.d
|
||||||
|
|
||||||
|
mkdir -p /srv/salt-overlay/pillar
|
||||||
|
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
|
||||||
|
service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
|
||||||
|
cert_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
||||||
|
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
|
||||||
|
enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'
|
||||||
|
enable_cluster_ui: '$(echo "$ENABLE_CLUSTER_UI" | sed -e "s/'/''/g")'
|
||||||
|
enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
|
||||||
|
logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
|
||||||
|
elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
|
||||||
|
enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
|
||||||
|
dns_replicas: '$(echo "$DNS_REPLICAS" | sed -e "s/'/''/g")'
|
||||||
|
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
|
||||||
|
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
|
||||||
|
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
|
||||||
|
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
|
||||||
|
enable_cpu_cfs_quota: '$(echo "$ENABLE_CPU_CFS_QUOTA" | sed -e "s/'/''/g")'
|
||||||
|
network_provider: '$(echo "$NETWORK_PROVIDER" | sed -e "s/'/''/g")'
|
||||||
|
opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG" | sed -e "s/'/''/g")'
|
||||||
|
opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG" | sed -e "s/'/''/g")'
|
||||||
|
opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET" | sed -e "s/'/''/g")'
|
||||||
|
e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")'
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
|
||||||
|
log_level: info
|
||||||
|
log_level_logfile: debug
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cat <<EOF >/etc/salt/minion.d/grains.conf
|
||||||
|
grains:
|
||||||
|
node_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
||||||
|
publicAddressOverride: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
||||||
|
network_mode: openvswitch
|
||||||
|
networkInterfaceName: '$(echo "$NETWORK_IF_NAME" | sed -e "s/'/''/g")'
|
||||||
|
api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
||||||
|
cloud: vagrant
|
||||||
|
roles:
|
||||||
|
- $role
|
||||||
|
runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")'
|
||||||
|
docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")'
|
||||||
|
master_extra_sans: '$(echo "$MASTER_EXTRA_SANS" | sed -e "s/'/''/g")'
|
||||||
|
keep_host_etcd: true
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
function install-salt() {
|
||||||
|
server_binary_tar="/vagrant/server/kubernetes-server-linux-amd64.tar.gz"
|
||||||
|
if [[ ! -f "$server_binary_tar" ]]; then
|
||||||
|
server_binary_tar="/vagrant/_output/release-tars/kubernetes-server-linux-amd64.tar.gz"
|
||||||
|
fi
|
||||||
|
if [[ ! -f "$server_binary_tar" ]]; then
|
||||||
|
release_not_found
|
||||||
|
fi
|
||||||
|
|
||||||
|
salt_tar="/vagrant/server/kubernetes-salt.tar.gz"
|
||||||
|
if [[ ! -f "$salt_tar" ]]; then
|
||||||
|
salt_tar="/vagrant/_output/release-tars/kubernetes-salt.tar.gz"
|
||||||
|
fi
|
||||||
|
if [[ ! -f "$salt_tar" ]]; then
|
||||||
|
release_not_found
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Running release install script"
|
||||||
|
rm -rf /kube-install
|
||||||
|
mkdir -p /kube-install
|
||||||
|
pushd /kube-install
|
||||||
|
tar xzf "$salt_tar"
|
||||||
|
cp "$server_binary_tar" .
|
||||||
|
./kubernetes/saltbase/install.sh "${server_binary_tar##*/}"
|
||||||
|
popd
|
||||||
|
|
||||||
|
if ! which salt-call >/dev/null 2>&1; then
|
||||||
|
# Install salt binaries
|
||||||
|
curl -sS -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function run-salt() {
|
||||||
|
salt-call --local state.highstate
|
||||||
|
}
|
||||||
|
|
||||||
|
function create-salt-kubelet-auth() {
|
||||||
|
local -r kubelet_kubeconfig_folder="/srv/salt-overlay/salt/kubelet"
|
||||||
|
mkdir -p "${kubelet_kubeconfig_folder}"
|
||||||
|
(umask 077;
|
||||||
|
cat > "${kubelet_kubeconfig_folder}/kubeconfig" << EOF
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Config
|
||||||
|
clusters:
|
||||||
|
- cluster:
|
||||||
|
insecure-skip-tls-verify: true
|
||||||
|
name: local
|
||||||
|
contexts:
|
||||||
|
- context:
|
||||||
|
cluster: local
|
||||||
|
user: kubelet
|
||||||
|
name: service-account-context
|
||||||
|
current-context: service-account-context
|
||||||
|
users:
|
||||||
|
- name: kubelet
|
||||||
|
user:
|
||||||
|
token: ${KUBELET_TOKEN}
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
function create-salt-kubeproxy-auth() {
|
||||||
|
kube_proxy_kubeconfig_folder="/srv/salt-overlay/salt/kube-proxy"
|
||||||
|
mkdir -p "${kube_proxy_kubeconfig_folder}"
|
||||||
|
(umask 077;
|
||||||
|
cat > "${kube_proxy_kubeconfig_folder}/kubeconfig" << EOF
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Config
|
||||||
|
clusters:
|
||||||
|
- cluster:
|
||||||
|
insecure-skip-tls-verify: true
|
||||||
|
name: local
|
||||||
|
contexts:
|
||||||
|
- context:
|
||||||
|
cluster: local
|
||||||
|
user: kube-proxy
|
||||||
|
name: service-account-context
|
||||||
|
current-context: service-account-context
|
||||||
|
users:
|
||||||
|
- name: kube-proxy
|
||||||
|
user:
|
||||||
|
token: ${KUBE_PROXY_TOKEN}
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
}
|
94
cluster/vagrant/util.sh
Normal file → Executable file
94
cluster/vagrant/util.sh
Normal file → Executable file
@ -120,17 +120,40 @@ function create-provision-scripts {
|
|||||||
|
|
||||||
(
|
(
|
||||||
echo "#! /bin/bash"
|
echo "#! /bin/bash"
|
||||||
|
echo-kube-env
|
||||||
|
echo "NODE_IP='${MASTER_IP}'"
|
||||||
|
echo "CONTAINER_ADDR='${MASTER_CONTAINER_ADDR}'"
|
||||||
|
echo "CONTAINER_NETMASK='${MASTER_CONTAINER_NETMASK}'"
|
||||||
|
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-utils.sh"
|
||||||
|
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network-master.sh"
|
||||||
|
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-master.sh"
|
||||||
|
) > "${KUBE_TEMP}/master-start.sh"
|
||||||
|
|
||||||
|
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
|
||||||
|
(
|
||||||
|
echo "#! /bin/bash"
|
||||||
|
echo-kube-env
|
||||||
|
echo "NODE_NAME=(${NODE_NAMES[$i]})"
|
||||||
|
echo "NODE_IP='${NODE_IPS[$i]}'"
|
||||||
|
echo "NODE_ID='$i'"
|
||||||
|
echo "CONTAINER_ADDR='${NODE_CONTAINER_ADDRS[$i]}'"
|
||||||
|
echo "CONTAINER_NETMASK='${NODE_CONTAINER_NETMASKS[$i]}'"
|
||||||
|
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-utils.sh"
|
||||||
|
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network-minion.sh"
|
||||||
|
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-minion.sh"
|
||||||
|
) > "${KUBE_TEMP}/minion-start-${i}.sh"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
function echo-kube-env() {
|
||||||
echo "KUBE_ROOT=/vagrant"
|
echo "KUBE_ROOT=/vagrant"
|
||||||
echo "INSTANCE_PREFIX='${INSTANCE_PREFIX}'"
|
echo "INSTANCE_PREFIX='${INSTANCE_PREFIX}'"
|
||||||
echo "MASTER_NAME='${INSTANCE_PREFIX}-master'"
|
echo "MASTER_NAME='${INSTANCE_PREFIX}-master'"
|
||||||
echo "MASTER_IP='${MASTER_IP}'"
|
echo "MASTER_IP='${MASTER_IP}'"
|
||||||
echo "NODE_NAMES=(${NODE_NAMES[@]})"
|
echo "NODE_NAMES=(${NODE_NAMES[@]})"
|
||||||
echo "NODE_IPS=(${NODE_IPS[@]})"
|
echo "NODE_IPS=(${NODE_IPS[@]})"
|
||||||
echo "NODE_IP='${MASTER_IP}'"
|
|
||||||
echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'"
|
echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'"
|
||||||
echo "CONTAINER_NETMASK='${MASTER_CONTAINER_NETMASK}'"
|
|
||||||
echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'"
|
echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'"
|
||||||
echo "CONTAINER_ADDR='${MASTER_CONTAINER_ADDR}'"
|
|
||||||
echo "NODE_CONTAINER_NETMASKS='${NODE_CONTAINER_NETMASKS[@]}'"
|
echo "NODE_CONTAINER_NETMASKS='${NODE_CONTAINER_NETMASKS[@]}'"
|
||||||
echo "NODE_CONTAINER_SUBNETS=(${NODE_CONTAINER_SUBNETS[@]})"
|
echo "NODE_CONTAINER_SUBNETS=(${NODE_CONTAINER_SUBNETS[@]})"
|
||||||
echo "SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
|
echo "SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
|
||||||
@ -139,6 +162,8 @@ function create-provision-scripts {
|
|||||||
echo "KUBE_USER='${KUBE_USER}'"
|
echo "KUBE_USER='${KUBE_USER}'"
|
||||||
echo "KUBE_PASSWORD='${KUBE_PASSWORD}'"
|
echo "KUBE_PASSWORD='${KUBE_PASSWORD}'"
|
||||||
echo "ENABLE_CLUSTER_MONITORING='${ENABLE_CLUSTER_MONITORING}'"
|
echo "ENABLE_CLUSTER_MONITORING='${ENABLE_CLUSTER_MONITORING}'"
|
||||||
|
echo "ENABLE_CLUSTER_LOGGING='${ENABLE_CLUSTER_LOGGING:-false}'"
|
||||||
|
echo "ELASTICSEARCH_LOGGING_REPLICAS='${ELASTICSEARCH_LOGGING_REPLICAS:-1}'"
|
||||||
echo "ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'"
|
echo "ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'"
|
||||||
echo "ENABLE_CLUSTER_UI='${ENABLE_CLUSTER_UI}'"
|
echo "ENABLE_CLUSTER_UI='${ENABLE_CLUSTER_UI}'"
|
||||||
echo "LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'"
|
echo "LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'"
|
||||||
@ -159,36 +184,6 @@ function create-provision-scripts {
|
|||||||
echo "OPENCONTRAIL_KUBERNETES_TAG='${OPENCONTRAIL_KUBERNETES_TAG:-}'"
|
echo "OPENCONTRAIL_KUBERNETES_TAG='${OPENCONTRAIL_KUBERNETES_TAG:-}'"
|
||||||
echo "OPENCONTRAIL_PUBLIC_SUBNET='${OPENCONTRAIL_PUBLIC_SUBNET:-}'"
|
echo "OPENCONTRAIL_PUBLIC_SUBNET='${OPENCONTRAIL_PUBLIC_SUBNET:-}'"
|
||||||
echo "E2E_STORAGE_TEST_ENVIRONMENT='${E2E_STORAGE_TEST_ENVIRONMENT:-}'"
|
echo "E2E_STORAGE_TEST_ENVIRONMENT='${E2E_STORAGE_TEST_ENVIRONMENT:-}'"
|
||||||
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network-master.sh"
|
|
||||||
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-master.sh"
|
|
||||||
) > "${KUBE_TEMP}/master-start.sh"
|
|
||||||
|
|
||||||
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
|
|
||||||
(
|
|
||||||
echo "#! /bin/bash"
|
|
||||||
echo "MASTER_NAME='${MASTER_NAME}'"
|
|
||||||
echo "MASTER_IP='${MASTER_IP}'"
|
|
||||||
echo "NODE_NAMES=(${NODE_NAMES[@]})"
|
|
||||||
echo "NODE_NAME=(${NODE_NAMES[$i]})"
|
|
||||||
echo "NODE_IPS=(${NODE_IPS[@]})"
|
|
||||||
echo "NODE_IP='${NODE_IPS[$i]}'"
|
|
||||||
echo "NODE_ID='$i'"
|
|
||||||
echo "NODE_IP='${NODE_IPS[$i]}'"
|
|
||||||
echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'"
|
|
||||||
echo "CONTAINER_ADDR='${NODE_CONTAINER_ADDRS[$i]}'"
|
|
||||||
echo "CONTAINER_NETMASK='${NODE_CONTAINER_NETMASKS[$i]}'"
|
|
||||||
echo "NODE_CONTAINER_SUBNETS=(${NODE_CONTAINER_SUBNETS[@]})"
|
|
||||||
echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'"
|
|
||||||
echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS:-}'"
|
|
||||||
echo "VAGRANT_DEFAULT_PROVIDER='${VAGRANT_DEFAULT_PROVIDER:-}'"
|
|
||||||
echo "KUBELET_TOKEN='${KUBELET_TOKEN:-}'"
|
|
||||||
echo "KUBE_PROXY_TOKEN='${KUBE_PROXY_TOKEN:-}'"
|
|
||||||
echo "MASTER_EXTRA_SANS='${MASTER_EXTRA_SANS:-}'"
|
|
||||||
echo "E2E_STORAGE_TEST_ENVIRONMENT='${E2E_STORAGE_TEST_ENVIRONMENT:-}'"
|
|
||||||
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network-minion.sh"
|
|
||||||
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-minion.sh"
|
|
||||||
) > "${KUBE_TEMP}/minion-start-${i}.sh"
|
|
||||||
done
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function verify-cluster {
|
function verify-cluster {
|
||||||
@ -203,16 +198,12 @@ function verify-cluster {
|
|||||||
# verify master has all required daemons
|
# verify master has all required daemons
|
||||||
echo "Validating master"
|
echo "Validating master"
|
||||||
local machine="master"
|
local machine="master"
|
||||||
local -a required_daemon=("salt-master" "salt-minion" "kubelet")
|
local -a required_processes=("kube-apiserver" "kube-scheduler" "kube-controller-manager" "kubelet" "docker")
|
||||||
local validated="1"
|
local validated="1"
|
||||||
# This is a hack, but sometimes the salt-minion gets stuck on the master, so we just restart it
|
|
||||||
# to ensure that users never wait forever
|
|
||||||
vagrant ssh "$machine" -c "sudo systemctl restart salt-minion"
|
|
||||||
until [[ "$validated" == "0" ]]; do
|
until [[ "$validated" == "0" ]]; do
|
||||||
validated="0"
|
validated="0"
|
||||||
local daemon
|
for process in "${required_processes[@]}"; do
|
||||||
for daemon in "${required_daemon[@]}"; do
|
vagrant ssh "${machine}" -c "pgrep -f ${process}" >/dev/null 2>&1 || {
|
||||||
vagrant ssh "$machine" -c "which '${daemon}'" >/dev/null 2>&1 || {
|
|
||||||
printf "."
|
printf "."
|
||||||
validated="1"
|
validated="1"
|
||||||
sleep 2
|
sleep 2
|
||||||
@ -225,13 +216,12 @@ function verify-cluster {
|
|||||||
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
|
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
|
||||||
echo "Validating ${VAGRANT_NODE_NAMES[$i]}"
|
echo "Validating ${VAGRANT_NODE_NAMES[$i]}"
|
||||||
local machine=${VAGRANT_NODE_NAMES[$i]}
|
local machine=${VAGRANT_NODE_NAMES[$i]}
|
||||||
local -a required_daemon=("salt-minion" "kubelet" "docker")
|
local -a required_processes=("kube-proxy" "kubelet" "docker")
|
||||||
local validated="1"
|
local validated="1"
|
||||||
until [[ "$validated" == "0" ]]; do
|
until [[ "${validated}" == "0" ]]; do
|
||||||
validated="0"
|
validated="0"
|
||||||
local daemon
|
for process in "${required_processes[@]}"; do
|
||||||
for daemon in "${required_daemon[@]}"; do
|
vagrant ssh "${machine}" -c "pgrep -f ${process}" >/dev/null 2>&1 || {
|
||||||
vagrant ssh "$machine" -c "which $daemon" >/dev/null 2>&1 || {
|
|
||||||
printf "."
|
printf "."
|
||||||
validated="1"
|
validated="1"
|
||||||
sleep 2
|
sleep 2
|
||||||
@ -242,16 +232,14 @@ function verify-cluster {
|
|||||||
|
|
||||||
echo
|
echo
|
||||||
echo "Waiting for each minion to be registered with cloud provider"
|
echo "Waiting for each minion to be registered with cloud provider"
|
||||||
for (( i=0; i<${#NODE_IPS[@]}; i++)); do
|
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
|
||||||
local machine="${NODE_IPS[$i]}"
|
local validated="0"
|
||||||
local count="0"
|
until [[ "$validated" == "1" ]]; do
|
||||||
until [[ "$count" == "1" ]]; do
|
local minions=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o name --api-version=v1)
|
||||||
local minions
|
validated=$(echo $minions | grep -c "${NODE_NAMES[i]}") || {
|
||||||
minions=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o go-template='{{range.items}}{{.metadata.name}}:{{end}}' --api-version=v1)
|
|
||||||
count=$(echo $minions | grep -c "${NODE_IPS[i]}") || {
|
|
||||||
printf "."
|
printf "."
|
||||||
sleep 2
|
sleep 2
|
||||||
count="0"
|
validated="0"
|
||||||
}
|
}
|
||||||
done
|
done
|
||||||
done
|
done
|
||||||
|
Loading…
Reference in New Issue
Block a user