diff --git a/cluster/gce/configure-vm.sh b/cluster/gce/configure-vm.sh index 3c1d8eaa6e1..4dfbcdb9ae0 100644 --- a/cluster/gce/configure-vm.sh +++ b/cluster/gce/configure-vm.sh @@ -73,22 +73,12 @@ for k,v in yaml.load(sys.stdin).iteritems(): KUBERNETES_MASTER="true" fi - if [[ "${KUBERNETES_MASTER}" == "true" ]]; then - # TODO(zmerlynn): This block of code should disappear once #4561 & #4562 are done - if [[ -z "${KUBERNETES_NODE_NAMES:-}" ]]; then - until KUBERNETES_NODE_NAMES=$(curl-metadata kube-node-names); do - echo 'Waiting for metadata KUBERNETES_NODE_NAMES...' - sleep 3 - done - fi - else - # And this should go away once the master can allocate CIDRs - if [[ -z "${MINION_IP_RANGE:-}" ]]; then - until MINION_IP_RANGE=$(curl-metadata node-ip-range); do - echo 'Waiting for metadata MINION_IP_RANGE...' - sleep 3 - done - fi + if [[ "${KUBERNETES_MASTER}" != "true" ]] && [[ -z "${MINION_IP_RANGE:-}" ]]; then + # This block of code should go away once the master can allocate CIDRs + until MINION_IP_RANGE=$(curl-metadata node-ip-range); do + echo 'Waiting for metadata MINION_IP_RANGE...' + sleep 3 + done fi } @@ -238,12 +228,6 @@ dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")' dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")' admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")' EOF - - if [[ "${KUBERNETES_MASTER}" == "true" ]]; then - cat <>/srv/salt-overlay/pillar/cluster-params.sls -gce_node_names: '$(echo "$KUBERNETES_NODE_NAMES" | sed -e "s/'/''/g")' -EOF - fi } # This should only happen on cluster initialization diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 17cabc423a4..eca1716cdf9 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -584,18 +584,7 @@ function kube-up { # command returns, but currently it returns before the instances come up due # to gcloud's deficiency. wait-for-minions-to-run - - # Give the master an initial node list (it's waiting in - # startup). This resolves a bit of a chicken-egg issue: The minions - # need to know the master's ip, so we boot the master first. The - # master still needs to know the initial minion list (until all the - # pieces #156 are complete), so we have it wait on the minion - # boot. (The minions further wait until the loop below, where CIDRs - # get filled in.) detect-minion-names - local kube_node_names - kube_node_names=$(IFS=,; echo "${MINION_NAMES[*]}") - add-instance-metadata "${MASTER_NAME}" "kube-node-names=${kube_node_names}" # Create the routes and set IP ranges to instance metadata, 5 instances at a time. for (( i=0; i<${#MINION_NAMES[@]}; i++)); do diff --git a/cluster/saltbase/salt/kube-controller-manager/default b/cluster/saltbase/salt/kube-controller-manager/default index 585c54f8685..3f9b8d38be3 100644 --- a/cluster/saltbase/salt/kube-controller-manager/default +++ b/cluster/saltbase/salt/kube-controller-manager/default @@ -21,7 +21,6 @@ {% if grains.cloud is defined -%} {% if grains.cloud == 'gce' -%} {% set cloud_provider = "--cloud_provider=gce" -%} - {% set machines = "--machines=" + pillar['gce_node_names'] -%} {% endif -%} {% if grains.cloud == 'aws' -%} {% set cloud_provider = "--cloud_provider=aws" -%}