mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 07:20:13 +00:00
Remove the --machines SaltStack configuration on GCE
Per https://github.com/GoogleCloudPlatform/kubernetes/issues/6072#issuecomment-87074456, this is no longer necessary. We now no longer need a static node list. Woo!
This commit is contained in:
parent
a2801a5a18
commit
68ccb97907
@ -73,22 +73,12 @@ for k,v in yaml.load(sys.stdin).iteritems():
|
||||
KUBERNETES_MASTER="true"
|
||||
fi
|
||||
|
||||
if [[ "${KUBERNETES_MASTER}" == "true" ]]; then
|
||||
# TODO(zmerlynn): This block of code should disappear once #4561 & #4562 are done
|
||||
if [[ -z "${KUBERNETES_NODE_NAMES:-}" ]]; then
|
||||
until KUBERNETES_NODE_NAMES=$(curl-metadata kube-node-names); do
|
||||
echo 'Waiting for metadata KUBERNETES_NODE_NAMES...'
|
||||
sleep 3
|
||||
done
|
||||
fi
|
||||
else
|
||||
# And this should go away once the master can allocate CIDRs
|
||||
if [[ -z "${MINION_IP_RANGE:-}" ]]; then
|
||||
until MINION_IP_RANGE=$(curl-metadata node-ip-range); do
|
||||
echo 'Waiting for metadata MINION_IP_RANGE...'
|
||||
sleep 3
|
||||
done
|
||||
fi
|
||||
if [[ "${KUBERNETES_MASTER}" != "true" ]] && [[ -z "${MINION_IP_RANGE:-}" ]]; then
|
||||
# This block of code should go away once the master can allocate CIDRs
|
||||
until MINION_IP_RANGE=$(curl-metadata node-ip-range); do
|
||||
echo 'Waiting for metadata MINION_IP_RANGE...'
|
||||
sleep 3
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
@ -238,12 +228,6 @@ dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
|
||||
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
|
||||
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
|
||||
if [[ "${KUBERNETES_MASTER}" == "true" ]]; then
|
||||
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
|
||||
gce_node_names: '$(echo "$KUBERNETES_NODE_NAMES" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
# This should only happen on cluster initialization
|
||||
|
@ -584,18 +584,7 @@ function kube-up {
|
||||
# command returns, but currently it returns before the instances come up due
|
||||
# to gcloud's deficiency.
|
||||
wait-for-minions-to-run
|
||||
|
||||
# Give the master an initial node list (it's waiting in
|
||||
# startup). This resolves a bit of a chicken-egg issue: The minions
|
||||
# need to know the master's ip, so we boot the master first. The
|
||||
# master still needs to know the initial minion list (until all the
|
||||
# pieces #156 are complete), so we have it wait on the minion
|
||||
# boot. (The minions further wait until the loop below, where CIDRs
|
||||
# get filled in.)
|
||||
detect-minion-names
|
||||
local kube_node_names
|
||||
kube_node_names=$(IFS=,; echo "${MINION_NAMES[*]}")
|
||||
add-instance-metadata "${MASTER_NAME}" "kube-node-names=${kube_node_names}"
|
||||
|
||||
# Create the routes and set IP ranges to instance metadata, 5 instances at a time.
|
||||
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
||||
|
@ -21,7 +21,6 @@
|
||||
{% if grains.cloud is defined -%}
|
||||
{% if grains.cloud == 'gce' -%}
|
||||
{% set cloud_provider = "--cloud_provider=gce" -%}
|
||||
{% set machines = "--machines=" + pillar['gce_node_names'] -%}
|
||||
{% endif -%}
|
||||
{% if grains.cloud == 'aws' -%}
|
||||
{% set cloud_provider = "--cloud_provider=aws" -%}
|
||||
|
Loading…
Reference in New Issue
Block a user