Merge pull request #31467 from vmware/fix-vsphere-cloud-provider.kerneltime

Automatic merge from submit-queue

Add support for vpshere cloud provider in kubeup

<!--  Thanks for sending a pull request!  Here are some tips for you:
1. If this is your first time, read our contributor guidelines https://github.com/kubernetes/kubernetes/blob/master/CONTRIBUTING.md and developer guide https://github.com/kubernetes/kubernetes/blob/master/docs/devel/development.md
2. If you want *faster* PR reviews, read how: https://github.com/kubernetes/kubernetes/blob/master/docs/devel/faster_reviews.md
3. Follow the instructions for writing a release note: https://github.com/kubernetes/kubernetes/blob/master/docs/devel/pull-requests.md#release-notes
-->

**What this PR does / why we need it**:
vSphere cloud provider added in 1.3 was not configured when deploying via kubeup

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #

**Special notes for your reviewer**:

**Release note**:
<!--  Steps to write your release note:
1. Use the release-note-* labels to set the release note state (if you have access) 
2. Enter your extended release note in the below block; leaving it blank means using the PR title as the release note. If no release note is required, just write `NONE`. 
-->
```release-note
Add support for vSphere Cloud Provider when deploying via kubeup on vSphere.
```

When deploying on vSphere using kube up add configuration
for vSphere cloud provider.
This commit is contained in:
Kubernetes Submit Queue 2016-10-05 19:12:17 -07:00 committed by GitHub
commit 7766b408b8
9 changed files with 87 additions and 19 deletions

View File

@ -14,7 +14,7 @@
{% set srv_sshproxy_path = "/srv/sshproxy" -%}
{% if grains.cloud is defined -%}
{% if grains.cloud not in ['vagrant', 'vsphere', 'photon-controller', 'azure-legacy'] -%}
{% if grains.cloud not in ['vagrant', 'photon-controller', 'azure-legacy'] -%}
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
{% endif -%}
@ -22,7 +22,7 @@
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% endif -%}
{% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%}
{% if grains.cloud in [ 'vsphere', 'aws', 'gce' ] and grains.cloud_config is defined -%}
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%}
{% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\"}}," -%}

View File

@ -37,7 +37,7 @@
{% set srv_kube_path = "/srv/kubernetes" -%}
{% if grains.cloud is defined -%}
{% if grains.cloud not in ['vagrant', 'vsphere', 'photon-controller', 'azure-legacy'] -%}
{% if grains.cloud not in ['vagrant', 'photon-controller', 'azure-legacy'] -%}
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
{% endif -%}
{% set service_account_key = "--service-account-private-key-file=/srv/kubernetes/server.key" -%}
@ -46,7 +46,7 @@
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% endif -%}
{% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%}
{% if grains.cloud in [ 'vsphere', 'aws', 'gce' ] and grains.cloud_config is defined -%}
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%}
{% set cloud_config_volume = "{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"" + grains.cloud_config + "\"}}," -%}

View File

@ -48,12 +48,12 @@
{% endif -%}
{% set cloud_provider = "" -%}
{% if grains.cloud is defined and grains.cloud not in ['vagrant', 'vsphere', 'photon-controller', 'azure-legacy'] -%}
{% if grains.cloud is defined and grains.cloud not in ['vagrant', 'photon-controller', 'azure-legacy'] -%}
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
{% endif -%}
{% set cloud_config = "" -%}
{% if grains.cloud in [ 'openstack' ] and grains.cloud_config is defined -%}
{% if grains.cloud in [ 'openstack', 'vsphere' ] and grains.cloud_config is defined -%}
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% endif -%}

View File

@ -17,12 +17,19 @@
SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=ERROR -C"
# These need to be set
#export GOVC_URL=
#export GOVC_DATACENTER=
#export GOVC_DATASTORE=
#export GOVC_NETWORK=
#export GOVC_GUEST_LOGIN=
# export GOVC_URL='hostname' # hostname of the vc
# export GOVC_USERNAME='username' # username for logging into the vsphere.
# export GOVC_PASSWORD='password' # password for the above username
# export GOVC_NETWORK='Network Name' # Name of the network the vms should join. Many times it could be "VM Network"
# export GOVC_DATASTORE='target datastore'
# To get resource pool via govc: govc ls -l 'host/*' | grep ResourcePool | awk '{print $1}' | xargs -n1 -t govc pool.info
# export GOVC_RESOURCE_POOL='resource pool or cluster with access to datastore'
# export GOVC_GUEST_LOGIN='kube:kube' # Used for logging into kube.vmdk during deployment.
# export GOVC_PORT=443 # The port to be used by vSphere cloud provider plugin
# To get datacente via govc: govc datacenter.info
# export GOVC_DATACENTER='ha-datacenter' # The datacenter to be used by vSphere cloud provider plugin
# export GOVC_GUEST_LOGIN='kube:kube' # Used for logging into kube.vmdk during deployment.
# Set GOVC_INSECURE if the host in GOVC_URL is using a certificate that cannot
# be verified (i.e. a self-signed certificate), but IS trusted.
#export GOVC_INSECURE=1
# export GOVC_INSECURE=1

View File

@ -17,6 +17,7 @@
NUM_NODES=4
DISK=./kube/kube.vmdk
GUEST_ID=debian7_64Guest
ENABLE_UUID=TRUE
INSTANCE_PREFIX=kubernetes
MASTER_TAG="${INSTANCE_PREFIX}-master"

View File

@ -20,6 +20,22 @@ sed -i -e "s/http.us.debian.org/mirrors.kernel.org/" /etc/apt/sources.list
# Prepopulate the name of the Master
mkdir -p /etc/salt/minion.d
echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf
CLOUD_CONFIG=/etc/vsphere_cloud.config
# Configuration to initialize vsphere cloud provider
cat <<EOF > $CLOUD_CONFIG
[Global]
user = $GOVC_USERNAME
password = $GOVC_PASSWORD
server = $GOVC_URL
port = $GOVC_PORT
insecure-flag = $GOVC_INSECURE
datacenter = $GOVC_DATACENTER
datastore = $GOVC_DATASTORE
[Disk]
scsicontrollertype = pvscsi
EOF
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
@ -29,6 +45,7 @@ grains:
cloud: vsphere
master_extra_sans: $MASTER_EXTRA_SANS
kube_user: $KUBE_USER
cloud_config: $CLOUD_CONFIG
EOF
# Auto accept all keys from minions that try to join

View File

@ -30,6 +30,23 @@ echo "master: $KUBE_MASTER" > /etc/salt/minion.d/master.conf
# Turn on debugging for salt-minion
# echo "DAEMON_ARGS=\"\$DAEMON_ARGS --log-file-level=debug\"" > /etc/default/salt-minion
# Configuration to initialize vsphere cloud provider
CLOUD_CONFIG=/etc/vsphere_cloud.config
cat <<EOF > $CLOUD_CONFIG
[Global]
user = $GOVC_USERNAME
password = $GOVC_PASSWORD
server = $GOVC_URL
port = $GOVC_PORT
insecure-flag = $GOVC_INSECURE
datacenter = $GOVC_DATACENTER
datastore = $GOVC_DATASTORE
[Disk]
scsicontrollertype = pvscsi
EOF
# Our minions will have a pool role to distinguish them from the master.
#
# Setting the "minion_ip" here causes the kubelet to use its IP for
@ -37,11 +54,11 @@ echo "master: $KUBE_MASTER" > /etc/salt/minion.d/master.conf
#
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
hostname_override: $(ip route get 1.1.1.1 | awk '{print $7}')
roles:
- kubernetes-pool
- kubernetes-pool-vsphere
cloud: vsphere
cloud_config: $CLOUD_CONFIG
EOF
# Install Salt

View File

@ -180,12 +180,17 @@ function kube-up-vm {
-debug \
-disk="${DISK}" \
-g="${GUEST_ID}" \
-on=false \
-link=true \
"$@" \
"${vm_name}"
govc vm.change -e="disk.enableUUID=${ENABLE_UUID}" -vm="${vm_name}"
govc vm.power -on=true "${vm_name}"
# Retrieve IP first, to confirm the guest operations agent is running.
govc vm.ip "${vm_name}" > /dev/null
CURRENT_NODE_IP=$(govc vm.ip "${vm_name}")
govc guest.mkdir \
-l "kube:kube" \
@ -395,6 +400,13 @@ function kube-up {
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
echo "readonly E2E_STORAGE_TEST_ENVIRONMENT='${E2E_STORAGE_TEST_ENVIRONMENT:-}'"
echo "readonly MASTER_EXTRA_SANS='${MASTER_EXTRA_SANS:-}'"
echo "readonly GOVC_USERNAME='${GOVC_USERNAME}'"
echo "readonly GOVC_PASSWORD='${GOVC_PASSWORD}'"
echo "readonly GOVC_URL='${GOVC_URL}'"
echo "readonly GOVC_PORT='${GOVC_PORT}'"
echo "readonly GOVC_INSECURE='${GOVC_INSECURE}'"
echo "readonly GOVC_DATACENTER='${GOVC_DATACENTER}'"
echo "readonly GOVC_DATASTORE='${GOVC_DATASTORE}'"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/create-dynamic-salt-files.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/install-release.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/salt-master.sh"
@ -416,11 +428,23 @@ function kube-up {
echo "KUBE_MASTER=${KUBE_MASTER}"
echo "KUBE_MASTER_IP=${KUBE_MASTER_IP}"
echo "NODE_IP_RANGE=$NODE_IP_RANGES"
echo "readonly GOVC_USERNAME='${GOVC_USERNAME}'"
echo "readonly GOVC_PASSWORD='${GOVC_PASSWORD}'"
echo "readonly GOVC_URL='${GOVC_URL}'"
echo "readonly GOVC_PORT='${GOVC_PORT}'"
echo "readonly GOVC_INSECURE='${GOVC_INSECURE}'"
echo "readonly GOVC_DATACENTER='${GOVC_DATACENTER}'"
echo "readonly GOVC_DATASTORE='${GOVC_DATASTORE}'"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/salt-minion.sh"
) > "${KUBE_TEMP}/node-start-${i}.sh"
(
kube-up-vm "${NODE_NAMES[$i]}" -c ${NODE_CPU-1} -m ${NODE_MEMORY_MB-1024}
add_to_hosts="${NODE_NAMES[$i]} ${CURRENT_NODE_IP}"
node_ip_file=${NODE_NAMES[$i]}-ip
echo "sudo bash -c \"echo $add_to_hosts >> /etc/hosts\"" > ${KUBE_TEMP}/${node_ip_file}
kube-scp ${KUBE_MASTER_IP} ${KUBE_TEMP}/${node_ip_file} /tmp/
kube-ssh ${KUBE_MASTER_IP} "bash /tmp/${node_ip_file}"
kube-run "${NODE_NAMES[$i]}" "${KUBE_TEMP}/node-start-${i}.sh"
) &
done
@ -441,18 +465,18 @@ function kube-up {
printf "Waiting for salt-master to be up on ${KUBE_MASTER} ...\n"
remote-pgrep ${KUBE_MASTER_IP} "salt-master"
printf "Waiting for all packages to be installed on ${KUBE_MASTER} ...\n"
kube-check ${KUBE_MASTER_IP} 'sudo salt "kubernetes-master" state.highstate -t 30 | grep -E "Failed:[[:space:]]+0"'
local i
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
printf "Waiting for salt-minion to be up on ${NODE_NAMES[$i]} ....\n"
remote-pgrep ${KUBE_NODE_IP_ADDRESSES[$i]} "salt-minion"
printf "Waiting for all salt packages to be installed on ${NODE_NAMES[$i]} .... \n"
kube-check ${KUBE_MASTER_IP} 'sudo salt '"${NODE_NAMES[$i]}"' state.highstate -t 30 | grep -E "Failed:[[:space:]]+0"'
printf " OK\n"
done
printf "Waiting for init highstate to be done on all nodes (this can take a few minutes) ...\n"
kube-check ${KUBE_MASTER_IP} 'sudo salt '\''*'\'' state.show_highstate -t 50'
printf "Waiting for all packages to be installed on all nodes (this can take a few minutes) ...\n"
kube-check ${KUBE_MASTER_IP} 'sudo salt '\''*'\'' state.highstate -t 50 | grep -E "Failed:[[:space:]]+0"'
echo
echo "Waiting for master and node initialization."

View File

@ -68,6 +68,8 @@ cluster/ubuntu/util.sh: local node_ip=${1}
cluster/vagrant/provision-utils.sh: api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
cluster/vagrant/provision-utils.sh: node_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
cluster/vagrant/provision-utils.sh: runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")'
cluster/vsphere/templates/salt-master.sh: cloud_config: $CLOUD_CONFIG
cluster/vsphere/templates/salt-minion.sh: cloud_config: $CLOUD_CONFIG
cluster/vsphere/templates/salt-minion.sh: hostname_override: $(ip route get 1.1.1.1 | awk '{print $7}')
examples/cluster-dns/images/frontend/client.py: service_address = socket.gethostbyname(hostname)
examples/storage/cassandra/image/files/run.sh: cluster_name \